diff --git a/CMakeLists.txt b/CMakeLists.txt
index b6e88667a2849484a2905d9272388f8e7ff77555..ab31a0f59b79ea02e395122e3b83363c1a6d0db3 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -79,7 +79,7 @@ file(MAKE_DIRECTORY ${STAGE_DIR}
 setup_compiler_flags()
 
 # Python needed before Boost
-find_package(Python 2.7 REQUIRED)
+find_package(Python 3.6 REQUIRED)
 # Split version string
 string(REPLACE "." ";" _python_version_list ${PYTHON_VERSION})
 list(GET _python_version_list 0 PYTHON_VERSION_MAJOR)
@@ -88,7 +88,10 @@ list(GET _python_version_list 1 PYTHON_VERSION_MINOR)
 setup_boost()
 
 if(NOT DISABLE_DOCUMENTATION)
-  find_package(Sphinx ${PYTHON_VERSION} REQUIRED)
+  # disabled the findpackage, as we don't call the sphinx-build executable 
+  # anymore but directly execute Sphinx with the Python interpreter.
+  # should be replaced by a check whether Sphinx is installed on the system.
+  #find_package(Sphinx)
   set(PYTHON_DOC_URL "https://docs.python.org/${PYTHON_VERSION}")
   # this URL should always point to the latest version of OST
   set(OST_DOC_URL "https://www.openstructure.org/docs")
diff --git a/cmake_support/FindPython.cmake b/cmake_support/FindPython.cmake
index 18ae9617f87d5b7d8b4f3fae2370271c10f0d677..7cdeeaae0797940c8192441f153f753205c516ed 100644
--- a/cmake_support/FindPython.cmake
+++ b/cmake_support/FindPython.cmake
@@ -18,8 +18,8 @@
 # Author: Marco Biasini/ Stefan Bienert
 #-------------------------------------------------------------------------------
 
-set(PYTHON_VERSIONS 2.7)
-set(PYTHON_MIN_VERSION 2.7)
+set(PYTHON_VERSIONS 3.8 3.7 3.6 )
+set(PYTHON_MIN_VERSION 3.6.0)
 
 #-------------------------------------------------------------------------------
 # check for python framework
diff --git a/cmake_support/FindSphinx.cmake b/cmake_support/FindSphinx.cmake
deleted file mode 100644
index 749e9cbdd713e5ba7877f653488f1809b8402f36..0000000000000000000000000000000000000000
--- a/cmake_support/FindSphinx.cmake
+++ /dev/null
@@ -1,35 +0,0 @@
-#-------------------------------------------------------------------------------
-# Check for Sphinx binary
-#
-#    SPHINX_BINARY            is set to the path to the sphinx-build executable,
-#                             preferably sphinx-build-${Sphinx_FIND_VERSION} if
-#                             provided. Also admires PYTHON_ROOT if available.
-#
-# Author: Bienchen
-#-------------------------------------------------------------------------------
-
-if(Sphinx_FIND_VERSION)
-  set(ADD_SPHINX_NAMES "sphinx-build-${Sphinx_FIND_VERSION}")
-endif()
-
-if(PYTHON_ROOT)
-  find_program(SPHINX_BINARY NAMES sphinx-build ${ADD_SPHINX_NAMES}
-    HINTS ${PYTHON_ROOT}
-    PATH_SUFFIXES bin
-    DOC "Sphinx documentation generator"
-  )
-else()
-  find_program(SPHINX_BINARY NAMES sphinx-build ${ADD_SPHINX_NAMES}
-    PATH_SUFFIXES bin
-    DOC "Sphinx documentation generator"
-  )
-endif()
-
-include(FindPackageHandleStandardArgs)
-find_package_handle_standard_args(Sphinx DEFAULT_MSG
-  SPHINX_BINARY
-)
-
-mark_as_advanced(
-  SPHINX_BINARY
-)
diff --git a/cmake_support/PROMOD3.cmake b/cmake_support/PROMOD3.cmake
index 7da10d674d174a7a899a0d17f6fcdd227535cc06..f42ac97d137dc170880b4add0e1dc3879c638b1a 100644
--- a/cmake_support/PROMOD3.cmake
+++ b/cmake_support/PROMOD3.cmake
@@ -1019,10 +1019,10 @@ macro(setup_boost)
   #              python${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR} REQUIRED)
   # set(BOOST_PYTHON_LIBRARIES ${Boost_LIBRARIES})
   # see https://cmake.org/cmake/help/v3.11/module/FindBoost.html
-  foreach(_python_lib_name python
-                           python${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}
+  foreach(_python_lib_name python${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}
                            python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}
-                           python${PYTHON_VERSION_MAJOR})
+                           python${PYTHON_VERSION_MAJOR}
+                           python)
     find_package(Boost ${_BOOST_MIN_VERSION} COMPONENTS ${_python_lib_name} QUIET)
     if(Boost_FOUND)
       message(STATUS "Found Boost package: " ${_python_lib_name})
diff --git a/core/init/__init__.py.in b/core/init/__init__.py.in
index 3be41dac459e2ba3cbeed1f8ae70cecb281470c9..a4e38d44e1d6cbad893b13b0fb5252bdb2f7e78a 100644
--- a/core/init/__init__.py.in
+++ b/core/init/__init__.py.in
@@ -61,7 +61,7 @@ def SetProMod3SharedDataPath(path):
 # check if we already have an OST PrefixPath
 try:
     ost.GetSharedDataPath()
-except RuntimeError, rt_err:
+except RuntimeError:
     ost.SetPrefixPath("@OST_ROOT@")
 except:
     raise
diff --git a/core/pymod/CMakeLists.txt b/core/pymod/CMakeLists.txt
index 563feb76131b0461e24434761c9071ced9a47a56..bee3460b2c5c4c1625eb2f6124bd5a3140b4c258 100644
--- a/core/pymod/CMakeLists.txt
+++ b/core/pymod/CMakeLists.txt
@@ -10,7 +10,6 @@ set(CORE_PYMOD
   __init__.py
   helper.py
   pm3argparse.py
-  _filecache.py
 )
 
 pymod(NAME core CPP ${CORE_CPP} PY ${CORE_PYMOD})
diff --git a/core/pymod/__init__.py b/core/pymod/__init__.py
index 1eac336f15bb63c7f87c2b62c281dbcb444572be..0c0066f597836afb33879fff16e2dbd264b7c26d 100644
--- a/core/pymod/__init__.py
+++ b/core/pymod/__init__.py
@@ -14,5 +14,5 @@
 # limitations under the License.
 
 
-from _core import *
-from _filecache import FileCache
+from ._core import *
+
diff --git a/core/pymod/_filecache.py b/core/pymod/_filecache.py
deleted file mode 100644
index 26b25b2484e3d0f5c428ad8452e0ae581b420c2d..0000000000000000000000000000000000000000
--- a/core/pymod/_filecache.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright (c) 2013-2018, SIB - Swiss Institute of Bioinformatics and 
-#                          Biozentrum - University of Basel
-# 
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import shutil
-
-
-class FileCache:
-  
-    def __init__(self, cache_dir):
-        self.cache_dir = cache_dir
-    
-    def CopyFromCache(self, filename, target_dir):
-    
-        cache_file = os.path.join(self.cache_dir, filename)
-        lock = os.path.join(self.cache_dir, filename+".lock")
-        if not os.path.exists(lock):
-            if os.path.exists(cache_file):
-              #create a lock file
-              try:
-                  lock_fh = open(lock,'w')
-                  lock_fh.close()
-              except:
-                  # I can't even create a lock file, somethings super wrong... 
-                  # let's give up
-                  if os.path.exists(lock):
-                    os.remove(lock)
-                    return False
-              try:
-                  #try to copy what we want
-                  file_to_retreive = os.path.join(target_dir, filename)
-                  shutil.copy(cache_file, file_to_retreive)
-                  #remove the lock file
-                  if os.path.exists(lock):
-                      os.remove(lock)
-                  #Yeah, it worked!
-                  return True
-              except:
-                  # something failed with copying, we still have to remove 
-                  # the lock file
-                  if os.path.exists(lock):
-                      os.remove(lock)
-                  pass
-        return False
-    
-    def CopyToCache(self, filename, source_dir):
-
-        cache_file = os.path.join(self.cache_dir, filename)
-        lock = os.path.join(self.cache_dir, filename+".lock")
-
-        # this overwrites previously cached files with same name!
-        if not os.path.exists(lock):
-            #create a lock file
-            try:
-                lock_fh = open(lock,'w')
-                lock_fh.close()
-            except:
-                # I can't even create a lock file, somethings super wrong.... 
-                # let's give up
-                if os.path.exists(lock):
-                    os.remove(lock)
-                return
-            try:
-                #let's try to copy
-                file_to_cache = os.path.join(source_dir, filename)
-                shutil.copy(file_to_cache, cache_file)
-                if os.path.exists(lock):
-                    os.remove(lock)
-            except:
-                #something went wrong, there still might be the lock file!
-                if os.path.exists(lock):
-                    os.remove(lock)
diff --git a/core/pymod/pm3argparse.py b/core/pymod/pm3argparse.py
index c88da835d3df1c19192347c38481afcbea981f2a..94a6c458380989ca6ef6e17e2686747e03a30bbf 100644
--- a/core/pymod/pm3argparse.py
+++ b/core/pymod/pm3argparse.py
@@ -40,27 +40,27 @@ def _TmpForGZip(filename, suffix, msg_prefix):
     helper.FileExists(msg_prefix, 12, filename)
     try:
         zip_fh = gzip.open(filename)
-        unzip_str = zip_fh.read()
+        unzip_content = zip_fh.read()
         zip_fh.close()
-    except IOError, ioe:
+    except IOError as ioe:
         helper.MsgErrorAndExit(msg_prefix + " gzip file '" + filename +
                                "' cannot be opened: " + str(ioe), 14)
-    unzip_file = tempfile.NamedTemporaryFile(mode='w', suffix=suffix)
-    unzip_file.write(unzip_str)
+    unzip_file = tempfile.NamedTemporaryFile(mode='wb', suffix=suffix)
+    unzip_file.write(unzip_content)
     unzip_file.flush()
     return unzip_file
 
 def _CheckJSONAlnSeqKeyType(key_name, val_type, json_aln, seqtype, json_source):
     '''Check a key/value in a sequence exists and is of certain type.
     '''
-    if key_name not in json_aln[seqtype].keys():
+    if key_name not in list(json_aln[seqtype].keys()):
         helper.MsgErrorAndExit("JSON 'alignmentlist' '%s' " % seqtype+
                                "from '%s' is " % json_source+
                                "missing the '%s' key" % key_name, 27)
     altype = type(json_aln[seqtype][key_name])
 
-    if val_type is str or val_type is unicode:
-        if not (altype is unicode or altype is str):
+    if val_type is str or val_type is str:
+        if not (altype is str or altype is str):
             helper.MsgErrorAndExit("JSON 'alignmentlist' '%s' " % seqtype+
                                    "'%s' from" % key_name+
                                    "'%s' is not a " % json_source+
@@ -78,7 +78,7 @@ def _GetAlnFromJSON(json_object, json_source):
     yield operator.
     """
     # alignments are stored via the 'alignmentlist' key
-    if 'alignmentlist' not in json_object.keys():
+    if 'alignmentlist' not in list(json_object.keys()):
         helper.MsgErrorAndExit("JSON object from '%s' does not " % json_source+
                                "provide an 'alignmentlist' key.", 21)
     # alignments come as lists, to enable hetero oligos
@@ -95,7 +95,7 @@ def _GetAlnFromJSON(json_object, json_source):
         # an alignment has a 'target' and a 'template' dictionary
         # each of them has a 'name' and a 'seqres' pair
         for flav in ['target', 'template']:
-            if flav not in json_aln.keys():
+            if flav not in list(json_aln.keys()):
                 helper.MsgErrorAndExit("JSON 'alignmentlist' from "+
                                        "'%s' does not " % json_source+
                                        "provide a '%s' key." % flav, 22)
@@ -144,7 +144,7 @@ def _GetJSONOBject(json_input):
             readfile = unzip_file.name
         try:
             jfh = open(readfile)
-        except IOError, ioe:
+        except IOError as ioe:
             helper.MsgErrorAndExit("'--json' file '%s' " % json_input+
                                    "can not be processed: %s" % ioe.strerror,
                                    19)
@@ -152,22 +152,19 @@ def _GetJSONOBject(json_input):
             raise
         try:
             json_object = json.load(jfh)
-        except ValueError, vae:
-            if vae.message == 'No JSON object could be decoded':
-                helper.MsgErrorAndExit("'--json' file '%s' could " % json_input+
-                                       "not be processed into a JSON object, "+
-                                       "probably it's empty.", 20)
-            else:
-                raise
+        except json.JSONDecodeError as e:
+            helper.MsgErrorAndExit("'--json' file '%s' could " % json_input+
+                                   "not be processed into a JSON object, "+
+                                   "probably it's empty.", 20)
         except:
             raise
         jfh.close()
     else:
         try:
             json_object = json.loads(json_input)
-        except ValueError, vae:
+        except ValueError as vae:
             helper.MsgErrorAndExit("'--json' string '%s' " % json_input+\
-                                   "could not be decoded: %s" % vae.message, 23)
+                                   "could not be decoded: %s" % str(vae), 23)
     return json_object
 
 def _FetchAlnFromFile(seqfile, allow_multitemplate, format):
@@ -183,9 +180,9 @@ def _FetchAlnFromFile(seqfile, allow_multitemplate, format):
         readfile = unzip_file.name
     try:
         aln = io.LoadAlignment(readfile, format=format)
-    except Exception, exc: #pylint: disable=broad-except
-        if exc.message in ['Bad FASTA file: File is empty',
-                           'Bad CLUSTAL file: File is empty']:
+    except Exception as exc: #pylint: disable=broad-except
+        if str(exc) in ['Bad FASTA file: File is empty',
+                        'Bad CLUSTAL file: File is empty']:
             helper.MsgErrorAndExit(argstr +  " refers to an empty file or " +
                                    "its in the wrong format.", 15)
         else:
@@ -229,7 +226,7 @@ def _LoadPDB(filename):
     helper.FileExists("PDB Structure", 32, filename)
     try:
         ent = io.LoadPDB(filename)
-    except Exception, exc: #pylint: disable=broad-except
+    except Exception as exc: #pylint: disable=broad-except
         helper.MsgErrorAndExit(argstr + ": failure to parse PDB file: " +
                                str(exc), 33)
     return ent
@@ -240,8 +237,8 @@ def _LoadEntity(filename):
     helper.FileExists("Structure", 32, filename)
     try:
         ent = io.LoadEntity(filename)
-    except Exception, exc: #pylint: disable=broad-except
-        if exc.message.startswith('no suitable entity io handler found'):
+    except Exception as exc: #pylint: disable=broad-except
+        if str(exc).startswith('no suitable entity io handler found'):
             helper.MsgErrorAndExit(argstr + ": not a supported format " +
                                    str(exc), 34)
         else:
@@ -255,7 +252,7 @@ def _FetchProfileFromFile(filename):
     helper.FileExists("Profile", 51, filename)
     try:
         prof = io.LoadSequenceProfile(filename)
-    except Exception, exc:
+    except Exception as exc:
         helper.MsgErrorAndExit(argstr + ": failure to parse profile file: " +
                                str(exc), 52)        
     return prof
@@ -266,7 +263,7 @@ def _FetchPsipredFromFile(filename):
     helper.FileExists("Profile", 51, filename)
     try:
         pred = loop.PsipredPrediction.FromHHM(filename)
-    except Exception, exc:
+    except Exception as exc:
         helper.MsgErrorAndExit(argstr + ": failure to parse psipred " +
                                "prediction: " + str(exc), 56)        
     return pred
@@ -334,7 +331,7 @@ def _AttachViewsToAln(aln, chain_entities):
         # identify chain and attach view
         if len(chain_entities) == 1:
             aln.AttachView(i, chain_entities['UNIQUE'].CreateFullView())
-        elif chain_entities.has_key(tpl_id):
+        elif tpl_id in chain_entities:
             aln.AttachView(i, chain_entities[tpl_id].CreateFullView())
         else:
             helper.MsgErrorAndExit("Could not find chain with ID " + tpl_id +
@@ -818,7 +815,7 @@ class PM3OptionsNamespace(object):
         for s in trg_sequences:
             try:
                 self.profiles.append(self.loaded_profiles[prof_sequences.index(s)])
-            except Exception, exc:
+            except Exception as exc:
                 helper.MsgErrorAndExit("Could not find profile with sequence " +
                                        "that exactly matches trg seq: " + s, 55)
 
diff --git a/core/tests/test_helper.py b/core/tests/test_helper.py
index eb82be1d97daed11716f60332cf080886099a1f1..f18cbbf50534e313d240f3954b62df46d566dbda 100644
--- a/core/tests/test_helper.py
+++ b/core/tests/test_helper.py
@@ -28,7 +28,7 @@ class _FetchLog(ost.LogSink):
         levels = ['ERROR', 'WARNING', 'SCRIPT', 'INFO', 'VERBOSE', 'DEBUG',
                   'TRACE']
         level = levels[severity]
-        if not level in self.messages.keys():
+        if not level in list(self.messages.keys()):
             self.messages[level] = list()
         self.messages[level].append(message.strip())
 
diff --git a/core/tests/test_pm3argparse.py b/core/tests/test_pm3argparse.py
index 7094be920f57acae2822d043eb09a37334ce0433..880f75c6467d52566cbd27394c40b91add275250 100644
--- a/core/tests/test_pm3argparse.py
+++ b/core/tests/test_pm3argparse.py
@@ -38,7 +38,7 @@ class _FetchLog(ost.LogSink):
         levels = ['ERROR', 'WARNING', 'SCRIPT', 'INFO', 'VERBOSE', 'DEBUG',
                   'TRACE']
         level = levels[severity]
-        if not level in self.messages.keys():
+        if not level in list(self.messages.keys()):
             self.messages[level] = list()
         self.messages[level].append(message.strip())
 
@@ -386,38 +386,35 @@ class PM3ArgParseTests(unittest.TestCase):
         self.assertEqual(ecd.exception.code, 24)
         self.assertEqual(len(self.log.messages['ERROR']), 1)
         self.assertEqual(self.log.messages['ERROR'],
-                         ['JSON object from \'{"alignmentlist": "I\'m not a '+
-                          'list!"}\' does notprovide a list behind '+
-                          '\'alignmentlist\'.'])
+                         ['JSON object from \'' + json_str + 
+                          '\' does notprovide a list behind \'alignmentlist\'.'])
 
     def testAddAlignmentJsonMalString(self):
         # fail on improper JSON string
         parser = pm3argparse.PM3ArgumentParser(__doc__, action=False)
         parser.AddAlignment()
         parser.AssembleParser()
-        json_obj = "{'Wrong': 'wrong'}"
+        json_str = "{'Wrong': 'wrong'}"
         with self.assertRaises(SystemExit) as ecd:
-            parser.Parse(['--json', json_obj])
+            parser.Parse(['--json', json_str])
         self.assertEqual(ecd.exception.code, 23)
         self.assertEqual(len(self.log.messages['ERROR']), 1)
-        self.assertEqual(self.log.messages['ERROR'],
-                         ["'--json' string '{'Wrong': 'wrong'}' could not be "+
-                          "decoded: Expecting property name: line 1 column 2 "+
-                          "(char 1)"])
+        self.assertTrue(self.log.messages['ERROR'][0].startswith(
+          "'--json' string '{'Wrong': 'wrong'}' could not be decoded:"))
 
     def testAddAlignmentJsonNoAlnLstKey(self):
         # detect missing key 'alignmentlist
         parser = pm3argparse.PM3ArgumentParser(__doc__, action=False)
         parser.AddAlignment()
         parser.AssembleParser()
-        json_obj = json.dumps({"Sth different": "Foo"})
+        json_str = json.dumps({"Sth different": "Foo"})
         with self.assertRaises(SystemExit) as ecd:
-            parser.Parse(['--json', json_obj])
+            parser.Parse(['--json', json_str])
         self.assertEqual(ecd.exception.code, 21)
         self.assertEqual(len(self.log.messages['ERROR']), 1)
         self.assertEqual(self.log.messages['ERROR'],
-                         ['JSON object from \'{"Sth different": "Foo"}\' '+
-                          'does not provide an \'alignmentlist\' key.'])
+                         ['JSON object from \'' + json_str + 
+                          '\' does not provide an \'alignmentlist\' key.'])
 
     def testAddAlignmentJsonNoTargetKey(self):
         # check that 'alignmentlist'target' is required
@@ -430,25 +427,23 @@ class PM3ArgParseTests(unittest.TestCase):
         self.assertEqual(ecd.exception.code, 22)
         self.assertEqual(len(self.log.messages['ERROR']), 1)
         self.assertEqual(self.log.messages['ERROR'],
-                         ['JSON \'alignmentlist\' from \'{"alignmentlist": '+
-                          '[{"Foo": "BAR"}]}\' does not provide a \'target\' '+
-                          'key.'])
+                         ['JSON \'alignmentlist\' from \'' + json_str + 
+                          '\' does not provide a \'target\' key.'])
 
     def testAddAlignmentJsonNoTemplateKey(self):
         # check that 'alignmentlist'template' is required
         parser = pm3argparse.PM3ArgumentParser(__doc__, action=False)
         parser.AddAlignment()
         parser.AssembleParser()
-        json_str = json.dumps({'alignmentlist': [{'target': {'name' : 'AAA',
-                                                             'seqres': 'AA'}}]})
+        json_str = '{\"alignmentlist\": [{\"target\": {\"seqres\": \"AA\", '\
+                   '\"name\": \"AAA\"}}]}'
         with self.assertRaises(SystemExit) as ecd:
             parser.Parse(['--json', json_str])
         self.assertEqual(ecd.exception.code, 22)
         self.assertEqual(len(self.log.messages['ERROR']), 1)
         self.assertEqual(self.log.messages['ERROR'],
-                         ['JSON \'alignmentlist\' from \'{"alignmentlist": '+
-                          '[{"target": {"seqres": "AA", "name": "AAA"}}]}\' '+
-                          'does not provide a \'template\' key.'])
+                         ['JSON \'alignmentlist\' from \'' + json_str + 
+                          '\' does not provide a \'template\' key.'])
 
     def testAddAlignmentJsonAlnTrgNoDict(self):
         # entries of the alignmentlist need to be dict's
@@ -461,9 +456,8 @@ class PM3ArgParseTests(unittest.TestCase):
         self.assertEqual(ecd.exception.code, 25)
         self.assertEqual(len(self.log.messages['ERROR']), 1)
         self.assertEqual(self.log.messages['ERROR'],
-                         ['JSON \'alignmentlist\' member from '+
-                          '\'{"alignmentlist": ["Seq1", "Seq2"]}\' is not '+
-                          'a \'  dictionary: Seq1'])
+                         ['JSON \'alignmentlist\' member from \'' + json_str +
+                          '\' is not a \'  dictionary: Seq1'])
 
     def testAddAlignmentJsonAlnTrgNotDict(self):
         # entries of the alignmentlist need to be dict's of dict's
@@ -478,8 +472,7 @@ class PM3ArgParseTests(unittest.TestCase):
         self.assertEqual(len(self.log.messages['ERROR']), 1)
         self.assertEqual(self.log.messages['ERROR'],
                          ['JSON \'alignmentlist\' \'target\' '+
-                          'from\'{"alignmentlist": [{"target": "AAA", '+
-                          '"template": "BBB"}]}\' is not a dictionary: AAA'])
+                          'from\'' + json_str + '\' is not a dictionary: AAA'])
 
     def testAddAlignmentJsonAlnTrgNoNameNoSeqres(self):
         # entries of the alignmentlist need to be dict's
@@ -494,28 +487,22 @@ class PM3ArgParseTests(unittest.TestCase):
         self.assertEqual(len(self.log.messages['ERROR']), 1)
         self.assertEqual(self.log.messages['ERROR'],
                          ['JSON \'alignmentlist\' \'target\' from '+
-                          '\'{"alignmentlist": [{"target": {"AAA": 1}, '+
-                          '"template": {"BBB": 2}}]}\' is missing the '+
-                          '\'name\' key'])
+                          '\'' + json_str + '\' is missing the \'name\' key'])
 
     def testAddAlignmentJsonAlnTrgTplNoString(self):
         # entries of the sequence dict in an aln need to be str
         parser = pm3argparse.PM3ArgumentParser(__doc__, action=False)
         parser.AddAlignment()
         parser.AssembleParser()
-        json_str = json.dumps({'alignmentlist': [{'target': {'name': 1,
-                                                             'seqres': 2},
-                                                  'template': {'name': 2,
-                                                               'seqres': 2}}]})
+        json_str = '{"alignmentlist": [{"target": {"name": 1, "seqres": 2}, '+\
+                   '"template": {"name": 2, "seqres": 2}}]}'
         with self.assertRaises(SystemExit) as ecd:
             parser.Parse(['--json', json_str])
         self.assertEqual(ecd.exception.code, 28)
         self.assertEqual(len(self.log.messages['ERROR']), 1)
         self.assertEqual(self.log.messages['ERROR'],
                          ['JSON \'alignmentlist\' \'target\' \'name\' '+
-                          'from\'{"alignmentlist": [{"target": {"seqres": 2, '+
-                          '"name": 1}, "template": {"seqres": 2, "name": '+
-                          '2}}]}\' is not a <type \'str\'>'])
+                          'from\'' + json_str + '\' is not a <class \'str\'>'])
 
     def testAddAlignmentJsonAlnTplNoOffset(self):
         # no offset for template sequence
@@ -533,9 +520,7 @@ class PM3ArgParseTests(unittest.TestCase):
         self.assertEqual(len(self.log.messages['ERROR']), 1)
         self.assertEqual(self.log.messages['ERROR'],
                          ['JSON \'alignmentlist\' \'template\' from '+
-                          '\'{"alignmentlist": [{"target": {"seqres": "AA", '+
-                          '"name": "A"}, "template": {"seqres": "AA", '+
-                          '"name": "A"}}]}\' is missing the \'offset\' key'])
+                          '\'' + json_str + '\' is missing the \'offset\' key'])
 
     def testAddAlignmentJsonAlnTplOffsetStr(self):
         # entries of the alignmentlist need to be dict's
@@ -554,10 +539,7 @@ class PM3ArgParseTests(unittest.TestCase):
         self.assertEqual(len(self.log.messages['ERROR']), 1)
         self.assertEqual(self.log.messages['ERROR'],
                          ['JSON \'alignmentlist\' \'template\' \'offset\' '+
-                          'from\'{"alignmentlist": [{"target": {"seqres": '+
-                          '"AA", "name": "A"}, "template": {"seqres": "AA", '+
-                          '"name": "A", "offset": "0"}}]}\' is not a <type '+
-                          '\'int\'>'])
+                          'from\'' + json_str + '\' is not a <class \'int\'>'])
 
     def testAddAlignmentJsonAlnString(self):
         # entries of the alignmentlist need to be dict's
@@ -609,18 +591,7 @@ class PM3ArgParseTests(unittest.TestCase):
                          'AKE.B  ELTTRKDDQEETVRKRLVEYHQMTAPLIGYYYYSKEAEAGNTKY'+
                          'AKVDGTKPV---AEVRADLEK\n')
         self.assertEqual(len(opts.aln_sources), 1)
-        self.assertEqual(opts.aln_sources[0],
-                         '{"alignmentlist": [{"target": {"seqres": "APGAGKGTQ'+
-                         'AQFIMEKYGIPQISTGGGLRAAVKS---LGKQAKDIMDAGKLVTDELVIAL'+
-                         'VKERIAQEDCRNGFLLDGFPRTIPQADAMKEAGINVDYVLEF----ELIVD'+
-                         'RIVGRRVHAPSGRVYHVKFNPPKVEGKDDVTGEELTTRKDDQEETVRKRLV'+
-                         'EYHQMTAPLL--YYYYKEAEAGNTKYAKVDGTKPVAEVRADLEKILG", "'+
-                         'name": "target"}, "template": {"seqres": "APGAGKGTQ'+
-                         'AQFIMEKYGIPQISTGDMLRAAVKSGSELGKQAKDIMDAGKLVTDELVIAL'+
-                         'VKERIAQEDCRNGFLLDGFPRTIPQADAMKEAGINVDYVLEFDVPDELIVD'+
-                         'RIVGRRVHAPSGRVYHVKFNPPKVEGKDDVTGEELTTRKDDQEETVRKRLV'+
-                         'EYHQMTAPLIGYYYYSKEAEAGNTKYAKVDGTKPV---AEVRADLEK", "'+
-                         'name": "1AKE.B", "offset": 7}}]}')
+        self.assertEqual(opts.aln_sources[0], json_str)
 
     def testAddAlignmentJsonAlnMultiString(self):
         # test multiple alignments
@@ -681,35 +652,7 @@ class PM3ArgParseTests(unittest.TestCase):
         opts = parser.Parse(['--json', json_str])
 
         self.assertEqual(len(opts.aln_sources), 1)
-        self.assertEqual(opts.aln_sources[0],
-                         '{"alignmentlist": [{"target": {"seqres": "VLSPADKTN'+
-                         'VKAAWGKVGAHAGEYGAEALERMFLSFPTTKTYFPHF-DL-S----HGSAQ'+
-                         'VKGHGKKVADALTNAVAHVDDMPNALSALSDLHAHK-LRVDPVNFKLLSHC'+
-                         'LLVTLAAHLPAEFTPAVHASLDKFLASVSTVLTSKYR", "name": " ta'+
-                         'rget 1"}, "template": {"seqres": "VLSEGEWQLVLHVWAKV'+
-                         'EADVAGHGQDILIRLFKSHPETLEKFDRFKHLKTEAEMKASEDLKKHGVTV'+
-                         'LTALGAILKKKGHHEAELKPLAQSHA-TKHKIPIKYLEFISEAIIHVLHSR'+
-                         'HPGDFGADAQGAMNKALELFRKDIAAKYK", "name": "3e7b90809b'+
-                         'd446a5", "offset": 1}}, {"target": {"seqres": "VLSP'+
-                         'ADKTNVKAAWGKVGAHAGEYGAEALERMFLSFPTTKTYFPHFDLSHGSAQV'+
-                         'KGHGKKVADALTNAVAHVDDMPNALSALSDLHAHKLRVDPVNFKLLSHCLL'+
-                         'VTLAAHLPAEFTPAVHASLDKFLASVSTVLTSKYR", "name": "targ'+
-                         'et 2"}, "template": {"seqres": "VLSPADKTNVKAAWGKVGA'+
-                         'HAGEYGAEALERMFLSFPTTKTYFPHFDLSHGSAQVKGHGKKVADALTNAV'+
-                         'AHVDDMPNALSALSDLHAHKLRVDPVNFKLLSHCLLVTLAAHLPAEFTPAV'+
-                         'HASLDKFLASVSTVLTSKYR", "name": "af828e69a5f2d0fd", '+
-                         '"offset": 2}}, {"target": {"seqres": "VLSPADKTNVKAA'+
-                         'WGKVGAHAGEYGAEALERMFLSFPTTKTYFPHF-DLS-----HGSAQVKGH'+
-                         'GKKVADALTNAVAHVDDMPNALSALSDLHAHK-LRVDPVNFKLLSHCLLVT'+
-                         'LAAHLPAEFTPAVHASLDKFLASVSTVLTSKYR", "name": "target'+
-                         ' 3"}, "template": {"seqres": "HLTPEEKSAVTALWGKVN--V'+
-                         'DEVGGEALGRLLVVYPWTQRFFESFGDLSTPDAVMGNPKVKAHGKKVLGAF'+
-                         'SDGLAHLDNLKGTFATLSELHC-DKLHVDPENFRLLGNVLVCVLAHHFGKE'+
-                         'FTPPVQAAYQKVVAGVANALAHKYH", "name": "9287755aa6aa27'+
-                         '58", "offset": 3}}, {"target": {"seqres": "VDPVNFKL'+
-                         'LSHCLLVTLAAHL", "name": "target 4"}, "template": {"'+
-                         'seqres": "ATPEQAQLVHKEIRKIVKDTC", "name": "e69e1ac0'+
-                         'a4b2554d", "offset": 4}}]}')
+        self.assertEqual(opts.aln_sources[0],json_str)
         self.assertEqual(len(opts.alignments), 4)
         # aln 1
         self.assertEqual(opts.alignments[0].GetCount(), 2)
@@ -792,7 +735,7 @@ class PM3ArgParseTests(unittest.TestCase):
         parser.AssembleParser()
         json_obj = _GetJsonTestObj()
         tmp_json = tempfile.NamedTemporaryFile(suffix='.json.gz')
-        with gzip.open(tmp_json.name, 'wb') as gfh:
+        with gzip.open(tmp_json.name, 'wt') as gfh:
             json.dump(json_obj, gfh)
         tmp_json.flush()
         opts = parser.Parse(['--json', tmp_json.name])
@@ -815,7 +758,7 @@ class PM3ArgParseTests(unittest.TestCase):
         parser.AddAlignment()
         parser.AssembleParser()
         json_obj = _GetJsonTestObj()
-        tmp_json = tempfile.NamedTemporaryFile(suffix='.json')
+        tmp_json = tempfile.NamedTemporaryFile(suffix='.json', mode='w')
         json.dump(json_obj, tmp_json)
         tmp_json.flush()
         opts = parser.Parse(['--json', tmp_json.name])
@@ -839,7 +782,7 @@ class PM3ArgParseTests(unittest.TestCase):
         parser.AddAlignment()
         parser.AssembleParser()
         json_obj = _GetJsonTestObj()
-        tmp_json = tempfile.NamedTemporaryFile(suffix='.json')
+        tmp_json = tempfile.NamedTemporaryFile(suffix='.json', mode='w')
         json.dump(json_obj, tmp_json)
         tmp_json.flush()
         json_str = json.dumps(json_obj)
diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt
index 180320c7279d719508bbbfbd6eaf9b3056e69d59..acaf8d0f77ff49b0e4b296003b70eae9022cd292 100644
--- a/doc/CMakeLists.txt
+++ b/doc/CMakeLists.txt
@@ -144,7 +144,7 @@ set(_SPHINX_INDEX_HTML "${_SPHINX_HTML_DIR}/index.html")
 add_custom_command(OUTPUT ${_SPHINX_INDEX_HTML}
                    DEPENDS doc_wait_for_rst "${_SPHINX_CONF_PY}" ${_RST_DEPS}
                            ${_DOC_MODULE_DEPS} doc_wait_for_modules
-                   COMMAND ${SPHINX_BINARY} -b html -c "${_RST_SOURCE_DIR}"
+                   COMMAND "${PYTHON_BINARY}" -m sphinx -b html -c "${_RST_SOURCE_DIR}"
                                      "${_RST_SOURCE_DIR}" "${_SPHINX_HTML_DIR}")
 add_custom_target(html DEPENDS ${_SPHINX_INDEX_HTML})
 
@@ -156,7 +156,7 @@ set(_SPHINX_INDEX_JSON "${_SPHINX_JSON_DIR}/index.fjson")
 add_custom_command(OUTPUT ${_SPHINX_INDEX_JSON}
                    DEPENDS doc_wait_for_rst "${_SPHINX_CONF_PY}" ${_RST_DEPS}
                            ${_DOC_MODULE_DEPS} doc_wait_for_modules
-                   COMMAND ${SPHINX_BINARY} -b json -c "${_RST_SOURCE_DIR}"
+                   COMMAND "${PYTHON_BINARY}" -m sphinx -b json -c "${_RST_SOURCE_DIR}"
                                      "${_RST_SOURCE_DIR}" "${_SPHINX_JSON_DIR}")
 add_custom_target(json DEPENDS ${_SPHINX_INDEX_JSON})
 
@@ -167,7 +167,7 @@ set(_SPHINX_MAN "${_SPHINX_MAN_DIR}/promod3.1")
 add_custom_command(OUTPUT ${_SPHINX_MAN}
                    DEPENDS doc_wait_for_rst "${_SPHINX_CONF_PY}" ${_RST_DEPS}
                            ${_DOC_MODULE_DEPS} doc_wait_for_modules
-                   COMMAND ${SPHINX_BINARY} -b man -c "${_RST_SOURCE_DIR}"
+                   COMMAND "${PYTHON_BINARY}" -m sphinx -b man -c "${_RST_SOURCE_DIR}"
                                       "${_RST_SOURCE_DIR}" "${_SPHINX_MAN_DIR}")
 add_custom_target(man DEPENDS ${_SPHINX_MAN})
 
@@ -180,7 +180,7 @@ add_dependencies(doc man)
 set(_SPHINX_LINKCHECK_DIR "${CMAKE_CURRENT_BINARY_DIR}/linkcheck")
 file(MAKE_DIRECTORY ${_SPHINX_LINKCHECK_DIR})
 add_custom_target(linkcheck
-                  COMMAND ${SPHINX_BINARY} -b linkcheck -c "${_RST_SOURCE_DIR}"
+                  COMMAND "${PYTHON_BINARY}" -m sphinx -b linkcheck -c "${_RST_SOURCE_DIR}"
                                  "${_RST_SOURCE_DIR}" "${_SPHINX_LINKCHECK_DIR}"
                   DEPENDS "${_SPHINX_CONF_PY}" ${_RST_DEPS} ${_DOC_MODULE_DEPS})
 
diff --git a/doc/cmake.py b/doc/cmake.py
index 5c0406d05f73760f78e32ae5559290e20deddcd2..38bd014ba93cbaed0512fe927db99a21d9f974d6 100644
--- a/doc/cmake.py
+++ b/doc/cmake.py
@@ -131,7 +131,7 @@ class _cmake_index_entry:
         self.desc = desc
 
     def __call__(self, title, targetid, main = 'main'):
-        return ('pair', u'%s ; %s' % (self.desc, title), targetid, main)
+        return ('pair', '%s ; %s' % (self.desc, title), targetid, main)
 
 _cmake_index_objs = {
     'command':    _cmake_index_entry('command'),
@@ -365,7 +365,7 @@ class CMakeDomain(Domain):
 
     def clear_doc(self, docname):
         to_clear = set()
-        for fullname, (fn, _) in self.data['objects'].items():
+        for fullname, (fn, _) in list(self.data['objects'].items()):
             if fn == docname:
                 to_clear.add(fullname)
         for fullname in to_clear:
@@ -382,7 +382,7 @@ class CMakeDomain(Domain):
                             contnode, target)
 
     def get_objects(self):
-        for refname, (docname, type) in self.data['objects'].items():
+        for refname, (docname, type) in list(self.data['objects'].items()):
             yield (refname, refname, type, docname, refname, 1)
 
 def setup(app):
diff --git a/doc/tests/scripts/loop_backbone.py b/doc/tests/scripts/loop_backbone.py
index 79901d51689acc3de66d12f05f0130a7554fe400..d6151898ae8f39c2f19d29302bb767dc87340df8 100644
--- a/doc/tests/scripts/loop_backbone.py
+++ b/doc/tests/scripts/loop_backbone.py
@@ -10,11 +10,11 @@ bb_list = loop.BackboneList(sequence)
 
 # let's have a look at the set dihedral angles
 for i in range(len(bb_list)):
-    print "Looking at position %d" % i
+    print("Looking at position %d" % i)
     if i > 0:
-        print "phi: %.4f" % bb_list.GetPhiTorsion(i)
+        print("phi: %.4f" % bb_list.GetPhiTorsion(i))
     if i < len(bb_list)-1:
-        print "psi: %.4f" % bb_list.GetPsiTorsion(i)
+        print("psi: %.4f" % bb_list.GetPsiTorsion(i))
 
 
 # we now use a TorsionSampler to set random dihedral angles
diff --git a/doc/tests/scripts/loop_fragger.py b/doc/tests/scripts/loop_fragger.py
index c61d634127143ba8b57a72eeee85bb05d8505db5..7bf495a41599db0caa4ff2563c3531cef4d261c1 100644
--- a/doc/tests/scripts/loop_fragger.py
+++ b/doc/tests/scripts/loop_fragger.py
@@ -30,12 +30,12 @@ fragger.Fill(db, 1.0, 100)
 below_three = 0
 for i in range(len(fragger)):
     ca_rmsd = fragger[i].CARMSD(ref_backbone,True)
-    print "Fragment %d has CA RMSD of %.3f" % (i, ca_rmsd)
+    print("Fragment %d has CA RMSD of %.3f" % (i, ca_rmsd))
     if ca_rmsd < 3.0:
         below_three += 1
 
 fraction = float(below_three)/len(fragger)
-print "Fraction of fragments below 3A: %.2f" % fraction
+print("Fraction of fragments below 3A: %.2f" % fraction)
 
 # add into a cached map with ID based on frag_pos
 fragger_map = loop.FraggerMap()
diff --git a/doc/tests/scripts/loop_main.py b/doc/tests/scripts/loop_main.py
index 6e4f4f45cb419e8607f5a8e2f965c934cf9e4e2a..b5aa19e9d202c48f492c75d53652985aacd42ada 100644
--- a/doc/tests/scripts/loop_main.py
+++ b/doc/tests/scripts/loop_main.py
@@ -17,7 +17,7 @@ c_stem = prot.residues[frag_pos+frag_length-1]
 # extract potential loops from fragment database based on geometry
 frag_db = loop.LoadFragDB()
 fragments = frag_db.SearchDB(n_stem, c_stem, frag_length)
-print "Num. fragments found in FragDB: %d" % len(fragments)
+print("Num. fragments found in FragDB: %d" % len(fragments))
 # compare with reference
 struct_db = loop.LoadStructureDB()
 for i in range(len(fragments)):
@@ -25,15 +25,15 @@ for i in range(len(fragments)):
     bb_list = struct_db.GetBackboneList(n_stem, c_stem,
                                         fragments[i], frag_seq)
     ca_rmsd = bb_list.CARMSD(ref_backbone, True)
-    print "-> fragment %d has CA RMSD of %.3f" % (i, ca_rmsd)
+    print("-> fragment %d has CA RMSD of %.3f" % (i, ca_rmsd))
 
 # extract potential loops from fragment database based on sequence
 fragger = loop.Fragger(frag_seq)
 # for simplicity we just use a sequence similarity score
 fragger.AddSeqSimParameters(1.0, seq.alg.BLOSUM62)
 fragger.Fill(struct_db, 1.0, 5)
-print "Num. fragments found in Fragger: %d" % len(fragger)
+print("Num. fragments found in Fragger: %d" % len(fragger))
 # compare fraggers with reference
 for i in range(len(fragger)):
     ca_rmsd = fragger[i].CARMSD(ref_backbone, True)
-    print "-> fragment %d has CA RMSD of %.3f" % (i, ca_rmsd)
+    print("-> fragment %d has CA RMSD of %.3f" % (i, ca_rmsd))
diff --git a/doc/tests/scripts/loop_mm_sys_creation.py b/doc/tests/scripts/loop_mm_sys_creation.py
index 642c4dac3b7403b5eb7a389608c6b28eeaa5336c..45f8d6a785b13b085a97ec4283b7930b86f45efa 100644
--- a/doc/tests/scripts/loop_mm_sys_creation.py
+++ b/doc/tests/scripts/loop_mm_sys_creation.py
@@ -14,7 +14,7 @@ num_residues = len(res_list)
 all_atoms = loop.AllAtomPositions(res_list)
 # here full structure in res_indices but in practice this could
 # be just a subset of residues relevant to the loop of interest
-res_indices = range(num_residues)
+res_indices = list(range(num_residues))
 # define two loops (indices into res_indices list)
 loop_start_indices = [10, 20]
 loop_lengths = [6, 4]
@@ -29,9 +29,9 @@ mm_sys.SetupSystem(all_atoms, res_indices, loop_start_indices,
 
 # run simulation
 sim = mm_sys.GetSimulation()
-print "Potential energy before: %g" % sim.GetPotentialEnergy()
+print("Potential energy before: %g" % sim.GetPotentialEnergy())
 sim.ApplySD(0.01, 100)
-print "Potential energy after: %g" % sim.GetPotentialEnergy()
+print("Potential energy after: %g" % sim.GetPotentialEnergy())
 
 # extract new loop positions and store it
 mm_sys.ExtractLoopPositions(all_atoms, res_indices)
diff --git a/doc/tests/scripts/modelling_allatomrelaxer.py b/doc/tests/scripts/modelling_allatomrelaxer.py
index c99244bcdd68e64d1cfb81cf6d717b1c44562e18..85bb4639eed79f9261c34ff4a807f30968b639e9 100644
--- a/doc/tests/scripts/modelling_allatomrelaxer.py
+++ b/doc/tests/scripts/modelling_allatomrelaxer.py
@@ -20,7 +20,7 @@ mm_sys = loop.MmSystemCreator(ff_lookup)
 relaxer = modelling.AllAtomRelaxer(sc_result, mm_sys)
 # relax loop
 pot_e = relaxer.Run(sc_result, 300, 0.1)
-print "Potential energy after: %g" % pot_e
+print("Potential energy after: %g" % pot_e)
 # update environment with solution
 env.SetEnvironment(sc_result.env_pos)
 # store all positions of environment
diff --git a/doc/tests/scripts/modelling_close_small_deletions.py b/doc/tests/scripts/modelling_close_small_deletions.py
index 80e8c3e590b9bd3d4d80449ee34bb7eac4c68263..9a7f2daca16fd0b95e356ea74631e4c3350040f1 100644
--- a/doc/tests/scripts/modelling_close_small_deletions.py
+++ b/doc/tests/scripts/modelling_close_small_deletions.py
@@ -8,6 +8,6 @@ aln = seq.CreateAlignment(seq.CreateSequence('trg', 'GGG-GGG'),
 aln.AttachView(1, tpl.CreateFullView())
 mhandle = modelling.BuildRawModel(aln)
 # close small deletion
-print 'Number of gaps before: %d' % len(mhandle.gaps)
+print('Number of gaps before: %d' % len(mhandle.gaps))
 modelling.CloseSmallDeletions(mhandle)
-print 'Number of gaps after: %d' % len(mhandle.gaps)
+print('Number of gaps after: %d' % len(mhandle.gaps))
diff --git a/doc/tests/scripts/modelling_fill_loops_by_database.py b/doc/tests/scripts/modelling_fill_loops_by_database.py
index d7126b079f38a1c6ae54f82394e770202d27acb4..76c2aef9947e6780bc6e057e92fc70b2d227e48d 100644
--- a/doc/tests/scripts/modelling_fill_loops_by_database.py
+++ b/doc/tests/scripts/modelling_fill_loops_by_database.py
@@ -10,8 +10,8 @@ aln = seq.CreateAlignment(seq.CreateSequence('trg', seq_trg),
 aln.AttachView(1, tpl.CreateFullView())
 mhandle = modelling.BuildRawModel(aln)
 # close gaps
-print 'Number of gaps before: %d' % len(mhandle.gaps)
+print('Number of gaps before: %d' % len(mhandle.gaps))
 modelling.FillLoopsByDatabase(mhandle, loop.LoadFragDB(),
                               loop.LoadStructureDB(),
                               loop.LoadTorsionSamplerCoil())
-print 'Number of gaps after: %d' % len(mhandle.gaps)
+print('Number of gaps after: %d' % len(mhandle.gaps))
diff --git a/doc/tests/scripts/modelling_fill_loops_by_monte_carlo.py b/doc/tests/scripts/modelling_fill_loops_by_monte_carlo.py
index daa860909ec68c0bbd9c2129b3856f20381a58b1..8a52c11e0cb519b9e49ca66da1af67fdd3bda82b 100644
--- a/doc/tests/scripts/modelling_fill_loops_by_monte_carlo.py
+++ b/doc/tests/scripts/modelling_fill_loops_by_monte_carlo.py
@@ -10,7 +10,7 @@ aln = seq.CreateAlignment(seq.CreateSequence('trg', seq_trg),
 aln.AttachView(1, tpl.CreateFullView())
 mhandle = modelling.BuildRawModel(aln)
 # close gaps
-print 'Number of gaps before: %d' % len(mhandle.gaps)
+print('Number of gaps before: %d' % len(mhandle.gaps))
 modelling.FillLoopsByMonteCarlo(mhandle,
                               	loop.LoadTorsionSamplerCoil())
-print 'Number of gaps after: %d' % len(mhandle.gaps)
+print('Number of gaps after: %d' % len(mhandle.gaps))
diff --git a/doc/tests/scripts/modelling_loop_scoring.py b/doc/tests/scripts/modelling_loop_scoring.py
index b6456842b3765897d54c6fd9bb5060a142604291..c90169b79dd57439fd70615eabeb4e5a346b2c6b 100644
--- a/doc/tests/scripts/modelling_loop_scoring.py
+++ b/doc/tests/scripts/modelling_loop_scoring.py
@@ -9,7 +9,7 @@ aln = seq.CreateAlignment(seq.CreateSequence('trg', seq_trg),
                           seq.CreateSequence('tpl', seq_tpl))
 aln.AttachView(1, tpl.CreateFullView())
 mhandle = modelling.BuildRawModel(aln)
-print("Number of gaps in raw model: %d" % len(mhandle.gaps))
+print(("Number of gaps in raw model: %d" % len(mhandle.gaps)))
 
 # setup default scorers for modelling handle
 modelling.SetupDefaultBackboneScoring(mhandle)
@@ -22,7 +22,7 @@ torsion_sampler = loop.LoadTorsionSamplerCoil()
 
 # get data for gap to close
 gap = mhandle.gaps[0].Copy()
-print("Gap to close: %s" % str(gap))
+print(("Gap to close: %s" % str(gap)))
 n_stem = gap.before
 c_stem = gap.after
 start_resnum = n_stem.GetNumber().GetNum()
@@ -31,20 +31,20 @@ start_idx = start_resnum - 1   # res. num. starts at 1
 # get loop candidates from FragDB
 candidates = modelling.LoopCandidates.FillFromDatabase(\
                 n_stem, c_stem, gap.full_seq, frag_db, structure_db)
-print("Number of loop candidates: %d" % len(candidates))
+print(("Number of loop candidates: %d" % len(candidates)))
 
 # all scores will be kept in a score container which we update
 all_scores = modelling.ScoreContainer()
 # the keys used to identify scores are globally defined
-print("Stem RMSD key = '%s'" \
-      % modelling.ScoringWeights.GetStemRMSDsKey())
-print("Profile keys = ['%s', '%s']" \
+print(("Stem RMSD key = '%s'" \
+      % modelling.ScoringWeights.GetStemRMSDsKey()))
+print(("Profile keys = ['%s', '%s']" \
       % (modelling.ScoringWeights.GetSequenceProfileScoresKey(),
-         modelling.ScoringWeights.GetStructureProfileScoresKey()))
-print("Backbone scoring keys = %s" \
-      % str(modelling.ScoringWeights.GetBackboneScoringKeys()))
-print("All atom scoring keys = %s" \
-      % str(modelling.ScoringWeights.GetAllAtomScoringKeys()))
+         modelling.ScoringWeights.GetStructureProfileScoresKey())))
+print(("Backbone scoring keys = %s" \
+      % str(modelling.ScoringWeights.GetBackboneScoringKeys())))
+print(("All atom scoring keys = %s" \
+      % str(modelling.ScoringWeights.GetAllAtomScoringKeys())))
 
 # get stem RMSDs for each candidate (i.e. how well does it fit?)
 # -> this must be done before CCD to be meaningful
@@ -52,7 +52,7 @@ candidates.CalculateStemRMSDs(all_scores, n_stem, c_stem)
 
 # close the candidates with CCD
 orig_indices = candidates.ApplyCCD(n_stem, c_stem, torsion_sampler)
-print("Number of closed loop candidates: %d" % len(candidates))
+print(("Number of closed loop candidates: %d" % len(candidates)))
 
 # get subset of previously computed scores
 all_scores = all_scores.Extract(orig_indices)
@@ -77,10 +77,10 @@ scores = all_scores.LinearCombine(weights)
 arg_sorted_scores = sorted([(v,i) for i,v in enumerate(scores)])
 print("Ranked candidates: score, index")
 for v,i in arg_sorted_scores:
-  print("%g, %d" % (v,i))
+  print(("%g, %d" % (v,i)))
 
 # insert best into model, update scorers and clear gaps
 best_candidate = candidates[arg_sorted_scores[0][1]]
 modelling.InsertLoopClearGaps(mhandle, best_candidate, gap)
-print("Number of gaps in closed model: %d" % len(mhandle.gaps))
+print(("Number of gaps in closed model: %d" % len(mhandle.gaps)))
 io.SavePDB(mhandle.model, "model.pdb")
diff --git a/doc/tests/scripts/modelling_merge_gaps_by_distance.py b/doc/tests/scripts/modelling_merge_gaps_by_distance.py
index 1606d30d3813c3f67c6e1ee3caf70e3a0ec84e7e..6380685542977ed995d513fb205e23ed18523eb8 100644
--- a/doc/tests/scripts/modelling_merge_gaps_by_distance.py
+++ b/doc/tests/scripts/modelling_merge_gaps_by_distance.py
@@ -10,6 +10,6 @@ aln = seq.CreateAlignment(seq.CreateSequence('trg', seq_trg),
 aln.AttachView(1, tpl.CreateFullView())
 mhandle = modelling.BuildRawModel(aln)
 # merge gaps
-print 'Number of gaps before: %d' % len(mhandle.gaps)
+print('Number of gaps before: %d' % len(mhandle.gaps))
 modelling.MergeGapsByDistance(mhandle, 0)
-print 'Number of gaps after: %d' % len(mhandle.gaps)
+print('Number of gaps after: %d' % len(mhandle.gaps))
diff --git a/doc/tests/scripts/modelling_model_termini.py b/doc/tests/scripts/modelling_model_termini.py
index 3b312a7625b94f6813ef0572de95e1d47eb533a6..cf0c39947e506f56e9022b6e9b098ad6d1529ead 100644
--- a/doc/tests/scripts/modelling_model_termini.py
+++ b/doc/tests/scripts/modelling_model_termini.py
@@ -10,7 +10,7 @@ aln = seq.CreateAlignment(seq.CreateSequence('trg', seq_trg),
 aln.AttachView(1, tpl.CreateFullView())
 mhandle = modelling.BuildRawModel(aln)
 # close gaps
-print 'Number of gaps before: %d' % len(mhandle.gaps)
+print('Number of gaps before: %d' % len(mhandle.gaps))
 modelling.ModelTermini(mhandle,
                        loop.LoadTorsionSamplerCoil())
-print 'Number of gaps after: %d' % len(mhandle.gaps)
+print('Number of gaps after: %d' % len(mhandle.gaps))
diff --git a/doc/tests/scripts/scoring_main.py b/doc/tests/scripts/scoring_main.py
index a5bb94771e369e18b2f14720e4c312689386e566..23ac574d21222359e317de7bcb6f9a1782c1c097 100644
--- a/doc/tests/scripts/scoring_main.py
+++ b/doc/tests/scripts/scoring_main.py
@@ -18,5 +18,5 @@ cbeta_scorer.AttachEnvironment(score_env)
 # calculate scores for 10 residues starting at residue number 23.
 # all required structural information comes from the environment
 # that can evolve as the modelling proceeds.
-print "Clash-Score", clash_scorer.CalculateScore(23, 10)
-print "CBeta-Score", cbeta_scorer.CalculateScore(23, 10)
+print("Clash-Score", clash_scorer.CalculateScore(23, 10))
+print("CBeta-Score", cbeta_scorer.CalculateScore(23, 10))
diff --git a/doc/tests/scripts/unittest_sidechain_reconstruction.py b/doc/tests/scripts/unittest_sidechain_reconstruction.py
index 083416eb7b838380f457bf217d1d53a8dbea4494..f9a1447b8520fa164d94bf54cefd42561dbad48c 100644
--- a/doc/tests/scripts/unittest_sidechain_reconstruction.py
+++ b/doc/tests/scripts/unittest_sidechain_reconstruction.py
@@ -5,13 +5,13 @@ import os
 
 class ReconstructTests(unittest.TestCase):
     def testReconstruct(self):
-    	in_file = os.path.join('data', '1eye.pdb')
-    	ref_file = os.path.join('data', '1eye_rec.pdb')
-    	# get and reconstruct 1eye
-    	prot = io.LoadPDB(in_file)
-    	modelling.ReconstructSidechains(prot, keep_sidechains=False)
-    	# compare with reference solution
-    	prot_rec = io.LoadPDB(ref_file)
+        in_file = os.path.join('data', '1eye.pdb')
+        ref_file = os.path.join('data', '1eye_rec.pdb')
+        # get and reconstruct 1eye
+        prot = io.LoadPDB(in_file)
+        modelling.ReconstructSidechains(prot, keep_sidechains=False)
+        # compare with reference solution
+        prot_rec = io.LoadPDB(ref_file)
         self.assertEqual(prot.GetAtomCount(), prot_rec.GetAtomCount())
 
 if __name__ == "__main__":
diff --git a/doc/tests/test_doctests.py b/doc/tests/test_doctests.py
index d988f653f1558b27a727c26623a5cc05efbb99b3..ee66a9fc4d696213e57acaa185066702a98b5f18 100644
--- a/doc/tests/test_doctests.py
+++ b/doc/tests/test_doctests.py
@@ -62,7 +62,7 @@ class DocTests(unittest.TestCase):
         job = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
         sout, serr = job.communicate()
-        return job.returncode, sout, serr
+        return job.returncode, sout.decode(), serr.decode()
 
     def checkPMRun(self, script_name, arguments=[], expect_rcode=0,
                    expect_stdout=None, expect_stderr=None):
@@ -111,6 +111,7 @@ class DocTests(unittest.TestCase):
         # check that no pyc file was created
         self.assertFalse(os.path.exists(pyc_file))
 
+
     def testActionVerbose(self):
         # to emulate action tests, change to actions folder
         cur_dir = os.getcwd()
@@ -121,18 +122,19 @@ class DocTests(unittest.TestCase):
         # check return code and parts of output
         self.assertEqual(rcode, 0)
         out_lines = serr.splitlines()
-        self.assertRegexpMatches(out_lines[0].strip(), "stdout of '.*pm help'")
+        self.assertRegex(out_lines[0].strip(), "stdout of '.*pm help'")
         self.assertEqual(out_lines[1].strip(), "------")
         line_nr = 2
         while not out_lines[line_nr].strip() == "------":
             line_nr += 1
         self.assertGreater(line_nr, 2)
-        self.assertRegexpMatches(out_lines[line_nr+1], "stderr of '.*pm help'")
+        self.assertRegex(out_lines[line_nr+1], "stderr of '.*pm help'")
         self.assertEqual(out_lines[line_nr+2].strip(), "------")
         self.assertEqual(out_lines[line_nr+3].strip(), "------")
         # go back to proper folder (important!)
         os.chdir(cur_dir)
 
+
     ################################################################
 
     def testCorePm3argparse(self):
@@ -146,7 +148,7 @@ class DocTests(unittest.TestCase):
         rcode, sout, serr = self.runPM(script_path, ['-h'])
         # check return code and some of output
         out_lines = serr.splitlines()
-        self.assertRegexpMatches(out_lines[0], "usage: .*")
+        self.assertRegex(out_lines[0], "usage: .*")
         self.assertEqual(out_lines[2].strip(),
                          "Place the description of your script right in the file and import")
         self.assertEqual(out_lines[3].strip(),
@@ -261,7 +263,7 @@ class DocTests(unittest.TestCase):
             import matplotlib.pyplot as plt
             import numpy as np
         except ImportError:
-            print 'Missing python libraries, skipping testLoopTorsionSampler...'
+            print('Missing python libraries, skipping testLoopTorsionSampler...')
             return
         # run it
         self.checkPMRun('loop_torsion_sampler.py', [], 0)
diff --git a/extras/code_generation/amino_acid_atoms.py b/extras/code_generation/amino_acid_atoms.py
index be8d8cfce8ec1cd7fe33a4ebba96c5a0b09f4161..5907e7e1f659daf5a8e650985c1203f81b361813 100644
--- a/extras/code_generation/amino_acid_atoms.py
+++ b/extras/code_generation/amino_acid_atoms.py
@@ -26,7 +26,7 @@ bb_atom_names = ["N", "CA", "C", "O", "CB"]
 # HELPER
 def CheckLine(line, new_txt):
   if len(line) + len(new_txt) > 80:
-      print line[:-1]
+      print(line[:-1])
       return "  " + new_txt
   else:
     return my_txt + new_txt
@@ -64,26 +64,26 @@ for i in range(conop.XXX):
     old_names = cur_names
     cur_names = ["CB", "CG", "CD1", "CD2", "CE2", "NE1", "CE3", "CZ3", "CH2", "CZ2"]
     assert(sorted(old_names) == sorted(cur_names))
-  print "DONE", aa_name, cur_names, aa_names[-1]
+  print("DONE", aa_name, cur_names, aa_names[-1])
   atom_names.append(cur_names)
 
-print "XXX_NUM_ATOMS =", sum(len(atom_name) for atom_name in atom_names)
+print("XXX_NUM_ATOMS =", sum(len(atom_name) for atom_name in atom_names))
 
 # print out backbone
-print "\n" + "-" * 79
-print "\nenum BackboneAtomIndex {"
+print("\n" + "-" * 79)
+print("\nenum BackboneAtomIndex {")
 my_txt = "  "
 for name in bb_atom_names:
   my_txt += "BB_%s_INDEX" % name
   if name != bb_atom_names[-1]:
     my_txt += ", "
-print my_txt
-print "};"
+print(my_txt)
+print("};")
 
 # print out aa-enums
 for i in range(conop.XXX):
   aa_name = conop.AminoAcidToResidueName(conop.AminoAcid(i))
-  print "\nenum %sAtomIndex {" % aa_names[i]
+  print("\nenum %sAtomIndex {" % aa_names[i])
   my_txt = "  "
   for name in atom_names[i]:
     new_txt = "%s_%s_INDEX" % (aa_name, name)
@@ -95,13 +95,13 @@ for i in range(conop.XXX):
   if len(atom_names[i]) == 0:
     new_txt += " = 4"
   my_txt = CheckLine(my_txt, new_txt)
-  print my_txt
-  print "};"
+  print(my_txt)
+  print("};")
 
 # print out all atoms
 aaa_list = list()
-print "\n" + "-" * 79
-print "\nenum AminoAcidAtom {"
+print("\n" + "-" * 79)
+print("\nenum AminoAcidAtom {")
 my_txt = "  "
 for i in range(conop.XXX):
   aa_name = conop.AminoAcidToResidueName(conop.AminoAcid(i))
@@ -110,35 +110,35 @@ for i in range(conop.XXX):
     aaa_list.append(cur_aaa)
     my_txt = CheckLine(my_txt, cur_aaa + ", ")
 my_txt = CheckLine(my_txt, "XXX_NUM_ATOMS")
-print my_txt
-print "};"
+print(my_txt)
+print("};")
 
 # print out element types
-print "\n" + "-" * 79
-print "\nELEMENTS: ", element_types
+print("\n" + "-" * 79)
+print("\nELEMENTS: ", element_types)
 
 # code snippets (assumes "using namespace ost::conop")
-print "\n" + "-" * 79
-print "// AA-LUT\n" + "-" * 79
+print("\n" + "-" * 79)
+print("// AA-LUT\n" + "-" * 79)
 for i in range(conop.XXX):
   aa_name = conop.AminoAcidToResidueName(conop.AminoAcid(i))
-  print "olc_[%s] = '%s';" \
-        % (aa_name, conop.ResidueNameToOneLetterCode(aa_name))
-  print "first_aaa_[%s] = %s_N;" \
-        % (aa_name, aa_name)
-  print "num_atoms_[%s] = %s_NUM_ATOMS;" \
-        % (aa_name, aa_name)
+  print("olc_[%s] = '%s';" \
+        % (aa_name, conop.ResidueNameToOneLetterCode(aa_name)))
+  print("first_aaa_[%s] = %s_N;" \
+        % (aa_name, aa_name))
+  print("num_atoms_[%s] = %s_NUM_ATOMS;" \
+        % (aa_name, aa_name))
 # last one
-print "olc_[XXX] = 'X';"
-print "first_aaa_[XXX] = XXX_NUM_ATOMS;"
-print "num_atoms_[XXX] = 0;"
+print("olc_[XXX] = 'X';")
+print("first_aaa_[XXX] = XXX_NUM_ATOMS;")
+print("num_atoms_[XXX] = 0;")
 
-print "\n" + "-" * 79
-print "// AAA-LUT\n" + "-" * 79
+print("\n" + "-" * 79)
+print("// AAA-LUT\n" + "-" * 79)
 for aaa in aaa_list:
-  print 'atom_name_[%s] = "%s";' % (aaa, aaa[4:])
+  print('atom_name_[%s] = "%s";' % (aaa, aaa[4:]))
 # last one
-print 'atom_name_[XXX_NUM_ATOMS] = "UNK";'
+print('atom_name_[XXX_NUM_ATOMS] = "UNK";')
 
 # print "\n" + "-" * 79
 # print "GetIndex\n" + "-" * 79
@@ -156,8 +156,8 @@ print 'atom_name_[XXX_NUM_ATOMS] = "UNK";'
 #   print '}'
 
 # bonds
-print "\n" + "-" * 79
-print "// heavy-atom bonding\n" + "-" * 79
+print("\n" + "-" * 79)
+print("// heavy-atom bonding\n" + "-" * 79)
 for i in range(conop.XXX):
   aa_name = conop.AminoAcidToResidueName(conop.AminoAcid(i))
   c = comp_lib.FindCompound(aa_name)
@@ -166,8 +166,8 @@ for i in range(conop.XXX):
     idx_str_one = NameIndex(aa_name, anames[b.atom_one], atom_names[i])
     idx_str_two = NameIndex(aa_name, anames[b.atom_two], atom_names[i])
     if not (idx_str_one is None or idx_str_two is None):
-      print "bonds_[%s].push_back(BondInfo(%s, %s, %d));" \
-            % (aa_name, idx_str_one, idx_str_two, b.order)
+      print("bonds_[%s].push_back(BondInfo(%s, %s, %d));" \
+            % (aa_name, idx_str_one, idx_str_two, b.order))
 
 # check elements for all heavy sidechain atoms for all amino acids
 comp_lib = conop.GetDefaultLib()
@@ -177,5 +177,5 @@ for i in range(conop.XXX):
   for a in c.atom_specs:
     # check name consistency
     if a.name[:1] != a.element:
-      print "MISMATCH", aa_name, a.name, a.element
+      print("MISMATCH", aa_name, a.name, a.element)
       
\ No newline at end of file
diff --git a/extras/code_generation/amino_acid_atoms_hydrogens.py b/extras/code_generation/amino_acid_atoms_hydrogens.py
index 52c8c6cc44179b6adb4c9863d18f5b723d7918ad..8b4375e86b95280d6e0908cde1af30f67a03c8c0 100644
--- a/extras/code_generation/amino_acid_atoms_hydrogens.py
+++ b/extras/code_generation/amino_acid_atoms_hydrogens.py
@@ -24,7 +24,7 @@ from ost.mol import mm
 # HELPER
 def CheckLine(line, new_txt):
   if len(line) + len(new_txt) > 80:
-      print line[:-1]
+      print(line[:-1])
       return "  " + new_txt
   else:
     return my_txt + new_txt
@@ -128,17 +128,17 @@ for i in range(conop.XXX):
   atom_names_charmm.append(new_names_charmm)
   atom_names_amber.append(new_names_amber)
   atom_names[i] = new_names + atom_names[i]
-  print "DONE", aa_tlc, aa_names[i]
+  print("DONE", aa_tlc, aa_names[i])
   if aa_tlc == "PRO":
-    print "-> IDX:   ", atom_names[i]
-    print "-> PDB:    ", atom_names_pdb[i]
-    print "-> CHARMM:", atom_names_charmm[i]
-    print "-> AMBER: ", atom_names_amber[i]
+    print("-> IDX:   ", atom_names[i])
+    print("-> PDB:    ", atom_names_pdb[i])
+    print("-> CHARMM:", atom_names_charmm[i])
+    print("-> AMBER: ", atom_names_amber[i])
   else:
-    print "-> IDX:   ", atom_names[i]
-    print "-> PDB:     ", atom_names_pdb[i]
-    print "-> CHARMM:", atom_names_charmm[i]
-    print "-> AMBER:  ", atom_names_amber[i]
+    print("-> IDX:   ", atom_names[i])
+    print("-> PDB:     ", atom_names_pdb[i])
+    print("-> CHARMM:", atom_names_charmm[i])
+    print("-> AMBER:  ", atom_names_amber[i])
   assert(len(atom_names[i]) == len(atom_names_pdb[i]))
   assert(len(atom_names[i]) == len(atom_names_charmm[i]))
   assert(len(atom_names[i]) == len(atom_names_amber[i]))
@@ -162,7 +162,7 @@ for i in range(conop.XXX):
       # get pdb name
       idx = atom_names_charmm[i].index(h_name)
       aah = "%s_%s" % (aa_tlc, atom_names_pdb[i][idx])
-      assert(not anchors.has_key(aah))
+      assert(aah not in anchors)
       anchors[aah] = NameIndex(aa_tlc, anchor)
   # add terminal N
   anchors["%s_H1" % (aa_tlc)] = NameIndex(aa_tlc, "N")
@@ -170,17 +170,17 @@ for i in range(conop.XXX):
   if aa_tlc != "PRO":
     anchors["%s_H3" % (aa_tlc)] = NameIndex(aa_tlc, "N")
 
-print "XXX_NUM_HYDROGENS =", sum(len(atom_name) for atom_name in atom_names)
+print("XXX_NUM_HYDROGENS =", sum(len(atom_name) for atom_name in atom_names))
 ########################################################################
 
-print "\n" + "="*77
-print "I META PROGRAM and HENCE I AM"
-print "="*77
+print("\n" + "="*77)
+print("I META PROGRAM and HENCE I AM")
+print("="*77)
 
 # print out aa-enums
 for i in range(conop.XXX):
   aa_tlc = conop.AminoAcidToResidueName(conop.AminoAcid(i))
-  print "\nenum %sHydrogenIndex {" % aa_names[i]
+  print("\nenum %sHydrogenIndex {" % aa_names[i])
   my_txt = "  "
   for name in atom_names[i]:
     new_txt = "%s_%s_INDEX" % (aa_tlc, name)
@@ -188,11 +188,11 @@ for i in range(conop.XXX):
     my_txt = CheckLine(my_txt, new_txt)
   new_txt = "%s_NUM_HYDROGENS" % aa_tlc
   my_txt = CheckLine(my_txt, new_txt)
-  print my_txt
-  print "};"
+  print(my_txt)
+  print("};")
 
 # print out all atoms
-print "\nenum AminoAcidHydrogen {"
+print("\nenum AminoAcidHydrogen {")
 my_txt = "  "
 for i in range(conop.XXX):
   aa_tlc = conop.AminoAcidToResidueName(conop.AminoAcid(i))
@@ -200,47 +200,47 @@ for i in range(conop.XXX):
     aah = "%s_%s" % (aa_tlc, name)
     my_txt = CheckLine(my_txt, aah + ", ")
 my_txt = CheckLine(my_txt, "XXX_NUM_HYDROGENS")
-print my_txt
-print "};"
+print(my_txt)
+print("};")
 
 # code snippets (assumes "using namespace ost::conop")
-print "\n" + "-"*77
-print "\n// HYDROGEN AA-LUT"
+print("\n" + "-"*77)
+print("\n// HYDROGEN AA-LUT")
 for i in range(conop.XXX):
   aa_tlc = conop.AminoAcidToResidueName(conop.AminoAcid(i))
-  print "first_aah_[%s] = %s_%s;" \
-        % (aa_tlc, aa_tlc, atom_names[i][0])
-  print "num_hydrogens_[%s] = %s_NUM_HYDROGENS;" \
-        % (aa_tlc, aa_tlc)
+  print("first_aah_[%s] = %s_%s;" \
+        % (aa_tlc, aa_tlc, atom_names[i][0]))
+  print("num_hydrogens_[%s] = %s_NUM_HYDROGENS;" \
+        % (aa_tlc, aa_tlc))
 # last one
-print 'first_aah_[XXX] = XXX_NUM_HYDROGENS;'
-print 'num_hydrogens_[XXX] = 0;'
+print('first_aah_[XXX] = XXX_NUM_HYDROGENS;')
+print('num_hydrogens_[XXX] = 0;')
 
 # generate code
-print "\n" + "-"*77
-print "\n// HYDROGEN AAA-LUT"
+print("\n" + "-"*77)
+print("\n// HYDROGEN AAA-LUT")
 for i in range(conop.XXX):
   aa_tlc = conop.AminoAcidToResidueName(conop.AminoAcid(i))
   for idx_name, pdb_name in zip(atom_names[i], atom_names_pdb[i]):
     aah = "%s_%s" % (aa_tlc, idx_name)
-    print 'hydrogen_name_[%s] = "%s";' % (aah, pdb_name)
-    print 'anchor_atom_idx_[%s] = %s;' % (aah, anchors[aah])
+    print('hydrogen_name_[%s] = "%s";' % (aah, pdb_name))
+    print('anchor_atom_idx_[%s] = %s;' % (aah, anchors[aah]))
 # last one
-print 'hydrogen_name_[XXX_NUM_HYDROGENS] = "UNK";'
-print 'anchor_atom_idx_[XXX_NUM_HYDROGENS] = XXX_NUM_ATOMS;'
+print('hydrogen_name_[XXX_NUM_HYDROGENS] = "UNK";')
+print('anchor_atom_idx_[XXX_NUM_HYDROGENS] = XXX_NUM_ATOMS;')
 
 # amber has less renamings so we do that first
-print "\n// TODO: add loop to copy hydrogen_name_ to hydrogen_name_amber_"
+print("\n// TODO: add loop to copy hydrogen_name_ to hydrogen_name_amber_")
 for i in range(conop.XXX):
   aa_tlc = conop.AminoAcidToResidueName(conop.AminoAcid(i))
   for j in range(len(atom_names[i])):
     if atom_names_amber[i][j] != atom_names_pdb[i][j]:
-      print 'hydrogen_name_amber_[%s_%s] = "%s";' \
-            % (aa_tlc, atom_names[i][j], atom_names_amber[i][j])
-print "\n// TODO: add loop to copy hydrogen_name_amber_ to hydrogen_name_charmm_"
+      print('hydrogen_name_amber_[%s_%s] = "%s";' \
+            % (aa_tlc, atom_names[i][j], atom_names_amber[i][j]))
+print("\n// TODO: add loop to copy hydrogen_name_amber_ to hydrogen_name_charmm_")
 for i in range(conop.XXX):
   aa_tlc = conop.AminoAcidToResidueName(conop.AminoAcid(i))
   for j in range(len(atom_names[i])):
     if atom_names_amber[i][j] != atom_names_charmm[i][j]:
-      print 'hydrogen_name_charmm_[%s_%s] = "%s";' \
-            % (aa_tlc, atom_names[i][j], atom_names_charmm[i][j])
+      print('hydrogen_name_charmm_[%s_%s] = "%s";' \
+            % (aa_tlc, atom_names[i][j], atom_names_charmm[i][j]))
diff --git a/extras/code_generation/backbone_score_unittest.py b/extras/code_generation/backbone_score_unittest.py
index a01583ae95e8ddaf14c85d9475ee9b22e5b7c08a..f8c7737b1c1b40884cc86389b56ffc33a9235775 100644
--- a/extras/code_generation/backbone_score_unittest.py
+++ b/extras/code_generation/backbone_score_unittest.py
@@ -88,56 +88,56 @@ def WriteValueVectors(selection, environment, start_rnum,
 
   # header
   gaps = " " * (15 + len(vector_base_name))
-  print "void Get%sScores(std::vector<Real>& %s_cb_packing," \
-        % (vector_base_name.title(), vector_base_name)
-  print "%sstd::vector<Real>& %s_cb," % (gaps, vector_base_name)
-  print "%sstd::vector<Real>& %s_reduced," % (gaps, vector_base_name)
-  print "%sstd::vector<Real>& %s_torsion," % (gaps, vector_base_name)
-  print "%sstd::vector<Real>& %s_hbond," % (gaps, vector_base_name)
-  print "%sstd::vector<Real>& %s_ss_agreement) {" % (gaps, vector_base_name)
+  print("void Get%sScores(std::vector<Real>& %s_cb_packing," \
+        % (vector_base_name.title(), vector_base_name))
+  print("%sstd::vector<Real>& %s_cb," % (gaps, vector_base_name))
+  print("%sstd::vector<Real>& %s_reduced," % (gaps, vector_base_name))
+  print("%sstd::vector<Real>& %s_torsion," % (gaps, vector_base_name))
+  print("%sstd::vector<Real>& %s_hbond," % (gaps, vector_base_name))
+  print("%sstd::vector<Real>& %s_ss_agreement) {" % (gaps, vector_base_name))
 
   # values
   for s in cb_packing_scores:
-    print "  " + vector_base_name + "_cb_packing.push_back(%.4f);" % (s)
-  print
+    print("  " + vector_base_name + "_cb_packing.push_back(%.4f);" % (s))
+  print()
 
   for s in cb_scores:
-    print "  " + vector_base_name + "_cb.push_back(%.4f);"%(s)
-  print
+    print("  " + vector_base_name + "_cb.push_back(%.4f);"%(s))
+  print()
 
   for s in reduced_scores:
-    print "  " + vector_base_name + "_reduced.push_back(%.4f);"%(s)
-  print
+    print("  " + vector_base_name + "_reduced.push_back(%.4f);"%(s))
+  print()
 
   for s in torsion_scores:
-    print "  " + vector_base_name + "_torsion.push_back(%.4f);"%(s)
-  print
+    print("  " + vector_base_name + "_torsion.push_back(%.4f);"%(s))
+  print()
 
   for s in hbond_scores:
-    print "  " + vector_base_name + "_hbond.push_back(%.4f);"%(s)
-  print
+    print("  " + vector_base_name + "_hbond.push_back(%.4f);"%(s))
+  print()
 
   for s in ss_agreement_scores:
-    print "  " + vector_base_name + "_ss_agreement.push_back(%.4f);"%(s)
-  print "}"
-  print
+    print("  " + vector_base_name + "_ss_agreement.push_back(%.4f);"%(s))
+  print("}")
+  print()
 
   # use
-  print "  std::vector<Real> %s_cb_packing;" % (vector_base_name)
-  print "  std::vector<Real> %s_cb;" % (vector_base_name)
-  print "  std::vector<Real> %s_reduced;" % (vector_base_name)
-  print "  std::vector<Real> %s_torsion;" % (vector_base_name)
-  print "  std::vector<Real> %s_hbond;" % (vector_base_name)
-  print "  std::vector<Real> %s_ss_agreement;" % (vector_base_name)
+  print("  std::vector<Real> %s_cb_packing;" % (vector_base_name))
+  print("  std::vector<Real> %s_cb;" % (vector_base_name))
+  print("  std::vector<Real> %s_reduced;" % (vector_base_name))
+  print("  std::vector<Real> %s_torsion;" % (vector_base_name))
+  print("  std::vector<Real> %s_hbond;" % (vector_base_name))
+  print("  std::vector<Real> %s_ss_agreement;" % (vector_base_name))
   gaps = " " * (12 + len(vector_base_name))
-  print "  Get%sScores(%s_cb_packing," \
-        % (vector_base_name.title(), vector_base_name)
-  print "%s%s_cb," % (gaps, vector_base_name)
-  print "%s%s_reduced," % (gaps, vector_base_name)
-  print "%s%s_torsion," % (gaps, vector_base_name)
-  print "%s%s_hbond," % (gaps, vector_base_name)
-  print "%s%s_ss_agreement);" % (gaps, vector_base_name)
-  print
+  print("  Get%sScores(%s_cb_packing," \
+        % (vector_base_name.title(), vector_base_name))
+  print("%s%s_cb," % (gaps, vector_base_name))
+  print("%s%s_reduced," % (gaps, vector_base_name))
+  print("%s%s_torsion," % (gaps, vector_base_name))
+  print("%s%s_hbond," % (gaps, vector_base_name))
+  print("%s%s_ss_agreement);" % (gaps, vector_base_name))
+  print()
 
 
 selection = prot.Select("rnum=%s:%s"%(str(start_rnum), str(end_rnum)))
diff --git a/extras/code_generation/hydrogen_rules.py b/extras/code_generation/hydrogen_rules.py
index cdcac7e2f869b0b598c3d97d9c56b3916173349e..a5d954773b40292ed95ac842e4ef861dd08b49e5 100644
--- a/extras/code_generation/hydrogen_rules.py
+++ b/extras/code_generation/hydrogen_rules.py
@@ -79,7 +79,7 @@ for i in range(conop.XXX):
 # get hydrogen construction rules and generate code
 for i in range(conop.XXX):
   aa_tlc = conop.AminoAcidToResidueName(conop.AminoAcid(i))
-  print "\n  // " + aa_tlc
+  print("\n  // " + aa_tlc)
   for rule in GetHydrogenRules(ff, aa_tlc):
     # get data
     rule_number = rule[0]
@@ -98,7 +98,7 @@ for i in range(conop.XXX):
     anchors = [GetHeavyAtomName(aa_tlc, a_name) for a_name in rule_anchors]
     # check polarity
     is_polar = False
-    if polar_hydrogens.has_key(aa_tlc):
+    if aa_tlc in polar_hydrogens:
       p_check = [hn in polar_hydrogens[aa_tlc] for hn in hydrogens]
       is_polar = all(p_check)
       # check if our assumption is safe (all polar or none)
@@ -117,9 +117,9 @@ for i in range(conop.XXX):
     args = [aa_tlc, str(rule_rule), str(is_polar).lower()]
     if prot_state is not None:
       args += [prot_state]
-    print "  AddRule_(" + ", ".join(args) + ");"
+    print("  AddRule_(" + ", ".join(args) + ");")
     args = [aa_tlc] + [NameIndex(aa_tlc, an) for an in anchors]
-    print "  SetAnchors_(" + ", ".join(args) + ");"
+    print("  SetAnchors_(" + ", ".join(args) + ");")
     args = [aa_tlc] + ["%s_%s_INDEX" % (aa_tlc, hn) for hn in hydrogens]
-    print "  SetHydrogens_(" + ", ".join(args) + ");"
+    print("  SetHydrogens_(" + ", ".join(args) + ");")
 
diff --git a/extras/code_generation/ideal_bb_param.py b/extras/code_generation/ideal_bb_param.py
index b9ceef405a54ca9f8169e0506a4606170c8d8fd2..86edc2f3d7075ef4f6e763b1d9a05ef56b0ce98e 100644
--- a/extras/code_generation/ideal_bb_param.py
+++ b/extras/code_generation/ideal_bb_param.py
@@ -128,10 +128,10 @@ for aa_name in aa_names:
 
 
 # start code generation
-print "void BBTraceParam(char olc, Real& n_ca_bond, Real& ca_c_bond," 
-print "                  Real& c_n_bond, Real& c_n_ca_angle,"
-print "                  Real& n_ca_c_angle, Real& ca_c_n_angle){"
-print "  switch(olc){"
+print("void BBTraceParam(char olc, Real& n_ca_bond, Real& ca_c_bond,") 
+print("                  Real& c_n_bond, Real& c_n_ca_angle,")
+print("                  Real& n_ca_c_angle, Real& ca_c_n_angle){")
+print("  switch(olc){")
 
 for i in range(20):
 
@@ -142,71 +142,71 @@ for i in range(20):
   else:
     olc = conop.ResidueNameToOneLetterCode(aa_name) 
 
-  print "    case \'%s\':{"%olc
-  print "      n_ca_bond = %f;"%n_ca_bonds[aa_name]
-  print "      ca_c_bond = %f;"%ca_c_bonds[aa_name]
-  print "      c_n_bond = %f;"%c_n_bonds[aa_name]
-  print "      c_n_ca_angle = %f;"%c_n_ca_angles[aa_name]
-  print "      n_ca_c_angle = %f;"%n_ca_c_angles[aa_name]
-  print "      ca_c_n_angle = %f;"%ca_c_n_angles[aa_name]
-  print "      break;"
-  print "    }"
+  print("    case \'%s\':{"%olc)
+  print("      n_ca_bond = %f;"%n_ca_bonds[aa_name])
+  print("      ca_c_bond = %f;"%ca_c_bonds[aa_name])
+  print("      c_n_bond = %f;"%c_n_bonds[aa_name])
+  print("      c_n_ca_angle = %f;"%c_n_ca_angles[aa_name])
+  print("      n_ca_c_angle = %f;"%n_ca_c_angles[aa_name])
+  print("      ca_c_n_angle = %f;"%ca_c_n_angles[aa_name])
+  print("      break;")
+  print("    }")
 
-print "    default:{"
-print "      throw promod3::Error(\"Invalid OneLetterCode observed!\");"
-print "    }"
-print "  }"
-print "}"
+print("    default:{")
+print("      throw promod3::Error(\"Invalid OneLetterCode observed!\");")
+print("    }")
+print("  }")
+print("}")
 
 
 # this is for the unit tests...
-print "bond avg values:"
+print("bond avg values:")
 
 avg = 0.0
-for k,v in n_ca_bonds.iteritems():
+for k,v in n_ca_bonds.items():
   avg += v
 avg /= 20
 
-print "n_ca_bonds", avg
+print("n_ca_bonds", avg)
 
 avg = 0.0
-for k,v in ca_c_bonds.iteritems():
+for k,v in ca_c_bonds.items():
   avg += v
 avg /= 20
 
-print "ca_c_bonds", avg
+print("ca_c_bonds", avg)
 
 avg = 0.0
-for k,v in c_n_bonds.iteritems():
+for k,v in c_n_bonds.items():
   avg += v
 avg /= 20
 
-print "c_n_bonds", avg
+print("c_n_bonds", avg)
 
 
 
-print "angle avg values:"
+print("angle avg values:")
 
 avg = 0.0
-for k,v in c_n_ca_angles.iteritems():
+for k,v in c_n_ca_angles.items():
   avg += v
 avg /= 20
 
-print "c_n_ca_angles", avg
+print("c_n_ca_angles", avg)
 
 avg = 0.0
-for k,v in n_ca_c_angles.iteritems():
+for k,v in n_ca_c_angles.items():
   avg += v
 avg /= 20
 
-print "n_ca_c_angles", avg
+print("n_ca_c_angles", avg)
 
 avg = 0.0
-for k,v in ca_c_n_angles.iteritems():
+for k,v in ca_c_n_angles.items():
   avg += v
 avg /= 20
 
-print "ca_c_n_angles", avg
+print("ca_c_n_angles", avg)
 
 
 
diff --git a/extras/code_generation/sidechain_atom_rule_lookup.py b/extras/code_generation/sidechain_atom_rule_lookup.py
index 684d9682f2368be99359bd3ea5f6aec78c392dd4..56e77653e8b7d3232443336ea0a4359712858a08 100644
--- a/extras/code_generation/sidechain_atom_rule_lookup.py
+++ b/extras/code_generation/sidechain_atom_rule_lookup.py
@@ -264,7 +264,7 @@ for aa in dihedral_info:
 # Let's generate the code to add the rules to the HeavyAtomRuleLookup
 
 for aa in dihedral_info:
-    print("  // " + conop.AminoAcidToResidueName(aa))
+    print(("  // " + conop.AminoAcidToResidueName(aa)))
     for i in range(len(dihedral_info[aa])):
         heavy_atom = dihedral_info[aa][i][0]
         anchor_one = dihedral_info[aa][i][1][0]
@@ -291,8 +291,8 @@ for aa in dihedral_info:
         param.append(str(dihedral_idx))
         param.append("Real("+str(base_dihedral)+')')
 
-        print("  AddRule(" + ", ".join(param[:5]) + ',')
-        print("          " + ", ".join(param[5:]) + ");")
+        print(("  AddRule(" + ", ".join(param[:5]) + ','))
+        print(("          " + ", ".join(param[5:]) + ");"))
 
 
     print("")
@@ -402,7 +402,7 @@ dihedral_definitions[conop.PHE].append(("CA","CB","CG","CD1"))
 
 
 for aa in dihedral_definitions:
-    print("  // " + conop.AminoAcidToResidueName(aa))
+    print(("  // " + conop.AminoAcidToResidueName(aa)))
     for i in range(len(dihedral_definitions[aa])):
         atom_one = dihedral_definitions[aa][i][0]
         atom_two = dihedral_definitions[aa][i][1]
@@ -421,7 +421,7 @@ for aa in dihedral_definitions:
         param.append(str(idx_three))
         param.append(str(idx_four))
 
-        print("  AddChiDefinition(" + ", ".join(param) + ');')
+        print(("  AddChiDefinition(" + ", ".join(param) + ');'))
 
 
     print("")
diff --git a/extras/data_generation/all_atom_scorer/create_default_aa_scorer.py b/extras/data_generation/all_atom_scorer/create_default_aa_scorer.py
index 92c786e6c5bfe5bab02bea88d33938afb2c7ecd4..75addb337a9e401860c8a6fff3705d1fb6f8e41d 100644
--- a/extras/data_generation/all_atom_scorer/create_default_aa_scorer.py
+++ b/extras/data_generation/all_atom_scorer/create_default_aa_scorer.py
@@ -37,8 +37,8 @@ int_opts = int_pot.GetOptions()
 packing_pot = PackingPotential.Load("packing_pot.dat")
 packing_opts = packing_pot.GetOptions()
 
-print "InteractionPotential", int_opts.upper_cutoff, int_opts.number_of_bins, int_opts.sequence_sep
-print "PackingPotential", packing_opts.cutoff, packing_opts.max_counts
+print("InteractionPotential", int_opts.upper_cutoff, int_opts.number_of_bins, int_opts.sequence_sep)
+print("PackingPotential", packing_opts.cutoff, packing_opts.max_counts)
 
 # check whether the provided potentials are parametrized in a way PM can use
 
diff --git a/extras/data_generation/frag_db/build_frag_db.py b/extras/data_generation/frag_db/build_frag_db.py
index b58530ad80e3569d08dec153693d96e84110ece9..d1db4a59eb907ef1b32dc215e681bc065ae68dce 100644
--- a/extras/data_generation/frag_db/build_frag_db.py
+++ b/extras/data_generation/frag_db/build_frag_db.py
@@ -28,7 +28,7 @@ fragdb = loop.FragDB(dist_bin_size,angle_bin_size)
 structure_db = loop.LoadStructureDB()
 
 for i in range(3,15):
-  print "start to add fragments of length ",i
+  print("start to add fragments of length ",i)
   fragdb.AddFragments(i,max_pairwise_rmsd,structure_db)
 
 fragdb.PrintStatistics()
diff --git a/extras/data_generation/rotamer_library/do_bbdep_lib.py b/extras/data_generation/rotamer_library/do_bbdep_lib.py
index 9dab196e971b33a3677160cf42fbe11cd894a4c0..260fd63e6e61a5f356fb399ad0420b3a6c60994d 100644
--- a/extras/data_generation/rotamer_library/do_bbdep_lib.py
+++ b/extras/data_generation/rotamer_library/do_bbdep_lib.py
@@ -20,7 +20,7 @@ from scipy.ndimage import gaussian_filter
 from promod3 import sidechain
 
 if len(sys.argv) != 3:
-  print "USAGE: ost do_lib.py <path_to_data> <aa_name>" 
+  print("USAGE: ost do_lib.py <path_to_data> <aa_name>") 
   sys.exit(0)
 
 # the csv file you created with fetch_data.py
@@ -274,7 +274,7 @@ def DoIt(aa_name, aa_data, n_bins, p_n_67, chi_n_67, out_path,
     for j in range(n_bins):
       current_psi = -np.pi + j*(2*np.pi/n_bins)
 
-      print "processing", aa_name, "phi:",current_phi,"psi:",current_psi
+      print("processing", aa_name, "phi:",current_phi,"psi:",current_psi)
 
       ###############################################################
       # GET THE PROBABILITIES OF ALL CONFIGURATIONS GIVEN PHI / PSI #
diff --git a/extras/data_generation/rotamer_library/do_lib.py b/extras/data_generation/rotamer_library/do_lib.py
index abb4ce3aa993f99454f851c26366eccf72076431..05f2b69325c3c4d4e063a3b3af7422b50967adbb 100644
--- a/extras/data_generation/rotamer_library/do_lib.py
+++ b/extras/data_generation/rotamer_library/do_lib.py
@@ -20,7 +20,7 @@ from scipy.ndimage import gaussian_filter
 from promod3 import sidechain
 
 if len(sys.argv) != 3:
-  print "USAGE: ost do_lib.py <path_to_data> <library_out_path>" 
+  print("USAGE: ost do_lib.py <path_to_data> <library_out_path>") 
   sys.exit(0)
 
 # the csv file you created with fetch_data.py
@@ -144,7 +144,7 @@ def GetChiStats(chi):
 def DoIt(aa_name, data_file_path, library, non_rot_sampling = 30, 
          non_rot_smooth_std = 10):
 
-  print "processing", aa_name
+  print("processing", aa_name)
   
   # Parse CSV file
   data = open(data_file_path, 'r').readlines()
diff --git a/extras/data_generation/rotamer_library/fetch_data.py b/extras/data_generation/rotamer_library/fetch_data.py
index 97ad79363270fe540dacdaab3b1f5f7ed69647ab..5186ba088e8f761efd09fee8dc4f4d6a3f58b21a 100644
--- a/extras/data_generation/rotamer_library/fetch_data.py
+++ b/extras/data_generation/rotamer_library/fetch_data.py
@@ -57,7 +57,7 @@ sidechain_data["HIS"] = list()
 sidechain_data["PHE"] = list()
 
 dihedral_data = dict()
-for k in sidechain_data.keys():
+for k in list(sidechain_data.keys()):
   dihedral_data[k] = list()
 
 infile = open(pisces_filename, 'r')
@@ -69,7 +69,7 @@ for line in data[1:]:
   pdb_id = line[:4]
   chain_id = line[4]
 
-  print "processing", pdb_id
+  print("processing", pdb_id)
 
   prot = None
 
@@ -80,7 +80,7 @@ for line in data[1:]:
       prot = io.LoadPDB(os.path.join(structure_dir, pdb_id.lower() + ".pdb"))
     prot = prot.Select("peptide=true and cname="+chain_id)
   except:
-    print "Failed to get the structure! skip..."
+    print("Failed to get the structure! skip...")
     continue
 
   for r in prot.residues:
@@ -184,7 +184,7 @@ for line in data[1:]:
 
 # data output
 file_content = ["rname, phi, psi, chi1, chi2, chi3, chi4, conf1, conf2, conf3, conf4"]
-for rname, entry_list in sidechain_data.iteritems():
+for rname, entry_list in sidechain_data.items():
   rot_id = sidechain.TLCToRotID(rname)
   for e, dihedral_angles in zip(entry_list, dihedral_data[rname]):
     temp = [rname, "%.4f"%(dihedral_angles[0]), "%.4f"%(dihedral_angles[1]), 
diff --git a/extras/data_generation/structure_db/assign_structure_profiles.py b/extras/data_generation/structure_db/assign_structure_profiles.py
index ed1b57098a16902876dc0085fc08d1e8f3f05f3b..71d892cf972497c2c512f231222ef0abcb623549 100644
--- a/extras/data_generation/structure_db/assign_structure_profiles.py
+++ b/extras/data_generation/structure_db/assign_structure_profiles.py
@@ -56,7 +56,7 @@ for coord_idx in range(structure_db.GetNumCoords()):
   try:
     structure_profile = profile_db.GetProfile(full_id)
   except:
-    print "Could not find structure profile for ", full_id, " skip..."
+    print("Could not find structure profile for ", full_id, " skip...")
     continue
     
   structure_db.SetStructureProfile(coord_idx, structure_profile)
diff --git a/extras/data_generation/structure_db/build_structure_db.py b/extras/data_generation/structure_db/build_structure_db.py
index a4b6252a1f68498713bee4d127b227a9d0051f1a..43a252872fb5ca4b7aea004def98459bdc58a21f 100644
--- a/extras/data_generation/structure_db/build_structure_db.py
+++ b/extras/data_generation/structure_db/build_structure_db.py
@@ -75,16 +75,16 @@ for i,line in enumerate(data):
 
   try:
 
-    print 'processing: ',prot_id,' on line: ',i
+    print('processing: ',prot_id,' on line: ',i)
 
     prot_path = os.path.join(structure_dir, prot_id+'.pdb')
     hhm_path = os.path.join(profile_dir,prot_id+chain_id+".hhm")
 
     if not os.path.exists(prot_path):
-      print "Could not find structure file... skip..."
+      print("Could not find structure file... skip...")
       continue
     if not os.path.exists(prot_path):
-      print "Could not find hhm file... skip..."
+      print("Could not find hhm file... skip...")
       continue
 
     # load and clean full structure
diff --git a/extras/data_generation/structure_db/create_structure_profiles.py b/extras/data_generation/structure_db/create_structure_profiles.py
index eb00492e24de84e80509522a402a7f57aaf20997..d4a558bd8851419d386be73053dd7fb45ab03632 100644
--- a/extras/data_generation/structure_db/create_structure_profiles.py
+++ b/extras/data_generation/structure_db/create_structure_profiles.py
@@ -25,7 +25,7 @@ if len(sys.argv) != 6:
 
   params = ["structure_db_path", "structure_db_source_path",\
             "start", "end", "profile_db_out_path"]
-  print "USAGE: ost create_structure_profiles.py " + ' '.join(params)
+  print("USAGE: ost create_structure_profiles.py " + ' '.join(params))
   sys.exit()
 
 # loads a StructureDB that can be arbitrarily large and estimates the
@@ -62,7 +62,7 @@ structure_db_source = loop.StructureDB.Load(structure_db_source_path)
 profile_db = seq.ProfileDB()
 
 for i in range(start,end):
-  print "processing chain with idx", i
+  print("processing chain with idx", i)
 
   try:
     # generate fragment info object
@@ -72,22 +72,22 @@ for i in range(start,end):
     bb_list = structure_db.GetBackboneList(fragment_info, sequence)
     residue_depths = structure_db.GetResidueDepths(fragment_info)  
 
-    print "id: ", coord_info.id
-    print "chain_name: ", coord_info.chain_name
-    print "sequence length: ", len(sequence)
+    print("id: ", coord_info.id)
+    print("chain_name: ", coord_info.chain_name)
+    print("sequence length: ", len(sequence))
 
     # generate the structure profile given the source StructureDB
     start = time.time()
     profile = structure_db_source.GenerateStructureProfile(bb_list, residue_depths)
     end = time.time()
 
-    print "it took: ", end-start
+    print("it took: ", end-start)
 
     profile_db.AddProfile('_'.join([coord_info.id, coord_info.chain_name]), profile)
 
   except:
     traceback.print_exc()
-    print "failed to create structure profile... skip..."
+    print("failed to create structure profile... skip...")
 
 profile_db.Save(profile_db_out_path)
 
diff --git a/extras/data_generation/structure_db/get_data_from_smtl.py b/extras/data_generation/structure_db/get_data_from_smtl.py
index 48806e713c9677b8ff8c4c35761f141793cdece3..986c10a7ab8cded12258c6a35d5150cd1682326e 100644
--- a/extras/data_generation/structure_db/get_data_from_smtl.py
+++ b/extras/data_generation/structure_db/get_data_from_smtl.py
@@ -54,17 +54,17 @@ for i,line in enumerate(pisces_data[1:]):
 
   prot_id = line[:4]
   chain_id = line[4]
-  print "processing ", prot_id, chain_id
+  print("processing ", prot_id, chain_id)
 
   try:
     bus = tpl_lib.GetAll(prot_id.upper())
   except:
-    print "could not find entry for ", prot_id, chain_id
+    print("could not find entry for ", prot_id, chain_id)
     continue
 
   bu = None
   if len(bus) == 0:
-    print "didnt find any valid biounit!"
+    print("didnt find any valid biounit!")
     continue
   if len(bus) == 1:
     bu = bus[0]
@@ -104,7 +104,7 @@ for i,line in enumerate(pisces_data[1:]):
           # add the actually extracted pdb id and chain name
           fetched_structures.append((prot_id, ch.name))
 
-          print "found the according data..."
+          print("found the according data...")
           break
 
       if found_chain:
diff --git a/extras/data_generation/torsion_sampler/train_torsion_sampler.py b/extras/data_generation/torsion_sampler/train_torsion_sampler.py
index 5e1181b51a80ab88bfee2b020a7ceffd52bfd1cc..9c0ea6fc1609bc135817054f7951448cec83614a 100644
--- a/extras/data_generation/torsion_sampler/train_torsion_sampler.py
+++ b/extras/data_generation/torsion_sampler/train_torsion_sampler.py
@@ -78,7 +78,7 @@ for i,line in enumerate(prot_data[1:]):
     prot_id = line[:4]
     chain_id = line[4]
     try:
-        print 'processing: ',prot_id,' on line: ',i
+        print('processing: ',prot_id,' on line: ',i)
         prot = io.LoadPDB(os.path.join(structure_dir,prot_id+chain_id+'.pdb'), 
                           profile=prof).Select('peptide=true and cname='+chain_id)
         mol.alg.AssignSecStruct(prot)
diff --git a/extras/pre_commit/pm3_csc/base.py b/extras/pre_commit/pm3_csc/base.py
index 17a763716534776a87b9463189a966a0ec0875e7..d2fbf65b13834574e2ce06fc68847eff41de89d3 100644
--- a/extras/pre_commit/pm3_csc/base.py
+++ b/extras/pre_commit/pm3_csc/base.py
@@ -78,7 +78,7 @@ def CheckInstalled(tool):
         retval = subprocess.call("command -v %s" % tool,
                                  stdout=open(os.devnull, 'wb'),
                                  shell=True)
-    except subprocess.CalledProcessError, err:
+    except subprocess.CalledProcessError as err:
         FailMsg("Failed to run 'command -v %s': '%s'" % (tool,
                                                          str(err)), 16)
     if retval > 0:
diff --git a/extras/pre_commit/pm3_csc/filecheck/python.py b/extras/pre_commit/pm3_csc/filecheck/python.py
index 692c42e03cd1621966938b9df5788caab116f49c..a932330e95eb3cb9c9ea5797b565c309f5913dcb 100644
--- a/extras/pre_commit/pm3_csc/filecheck/python.py
+++ b/extras/pre_commit/pm3_csc/filecheck/python.py
@@ -94,7 +94,7 @@ class Python(base.FileCheck):
             "instance. Therefore it should be a '@staticmethod' or not a "+
             "member of this class at all."
         }
-        if msg[1] not in msg_dict.keys():
+        if msg[1] not in list(msg_dict.keys()):
             pm3_csc.FailMsg("Found a pylint message for the first time: "+
                             "'%s'. You can get more information on " % outline +
                             "the kind of issue by running 'pylint "+
diff --git a/extras/pre_commit/pm3_csc/git.py b/extras/pre_commit/pm3_csc/git.py
index cebe8327860f2dbffff7541d141ce816db0fd25d..ed9011ca7adce39c45f74e9055737a24ce10e560 100644
--- a/extras/pre_commit/pm3_csc/git.py
+++ b/extras/pre_commit/pm3_csc/git.py
@@ -30,7 +30,7 @@ def CheckVersion(minversion):
     """
     try:
         gvo = subprocess.check_output(['git', '--version'], shell=False)
-    except subprocess.CalledProcessError, err:
+    except subprocess.CalledProcessError as err:
         base.FailMsg("Failed to run 'git --version': '%s'" % str(err), 1)
     gvs = re.match(r"git version ((:?\d+\.?)+)", gvo)
     if not gvs:
@@ -61,7 +61,7 @@ def CheckWhitespaces(exclude):
         retval = subprocess.call(['git', 'rev-parse', '--verify', 'HEAD'],
                                  stdout=open(os.devnull, 'wb'),
                                  shell=False)
-    except subprocess.CalledProcessError, err:
+    except subprocess.CalledProcessError as err:
         base.FailMsg("Failed to run 'git rev-parse --verify HEAD'': "+
                      "'%s'" % str(err), 6)
     if not retval: # shell returns 0 as True!
@@ -69,7 +69,7 @@ def CheckWhitespaces(exclude):
             di_o = subprocess.check_output(['git', 'diff-index', '-M',
                                             '--cached', '--check', 'HEAD'],
                                            shell=False)
-        except subprocess.CalledProcessError, err:
+        except subprocess.CalledProcessError as err:
             if err.returncode == 2:
                 di_o = err.output
             else:
@@ -117,12 +117,12 @@ def _GetFileType(filepath):
                         '.pdb.gz': 'ukn',
                         '.fasta' : 'ukn',
                         '.fas' : 'ukn'}
-    for ext in known_extensions.keys():
+    for ext in list(known_extensions.keys()):
         if filepath.endswith(ext):
             return known_extensions[ext]
     try:
         fout = subprocess.check_output(['file', filepath], shell=False)
-    except subprocess.CalledProcessError, err:
+    except subprocess.CalledProcessError as err:
         base.FailMsg("Failed to run 'file %s': '%s'" % (filepath, str(err)), 11)
     fout = fout.strip()
     known_file_types = {"%s: a python script text executable" :
@@ -145,7 +145,7 @@ def _GetFileType(filepath):
                         "ukn",
                         "%s: ASCII English text" :
                         "text"}
-    for file_resp in known_file_types.keys():
+    for file_resp in list(known_file_types.keys()):
         if fout == file_resp % filepath:
             return known_file_types[file_resp]
     base.FailMsg("Could not determine file type of '%s'. " % filepath +
@@ -183,7 +183,7 @@ def GetModifiedFiles(exclude, committed_only=True):
     try:
         cdup = subprocess.check_output(['git', 'rev-parse', '--show-cdup'],
                                        shell=False)
-    except subprocess.CalledProcessError, err:
+    except subprocess.CalledProcessError as err:
         base.FailMsg("Failed to run 'git rev-parse --show-cdup': "+
                      "'%s'" % str(err), 10)
     cdup = cdup.strip()
@@ -191,7 +191,7 @@ def GetModifiedFiles(exclude, committed_only=True):
     try:
         gso = subprocess.check_output(['git', 'status', '--porcelain'],
                                       shell=False)
-    except subprocess.CalledProcessError, err:
+    except subprocess.CalledProcessError as err:
         base.FailMsg("Failed to run 'git status --porcelain': '%s'" % str(err),
                      9)
     # prepare exclude list
@@ -220,7 +220,7 @@ def GetModifiedFiles(exclude, committed_only=True):
         if skip:
             continue
         ftype = _GetFileType(tfp)
-        if ftype in file_lists.keys():
+        if ftype in list(file_lists.keys()):
             file_lists[ftype].append(tfp)
         else:
             base.FailMsg("Not supposed to be ever seen. There must be a file "+
diff --git a/extras/sanity_checks/run_pipeline.py b/extras/sanity_checks/run_pipeline.py
index 556ea8f82304f97e6172d9302ece99e87cd4a893..4a39ea5c64384662c532d0964c3a79bee5bdb988 100644
--- a/extras/sanity_checks/run_pipeline.py
+++ b/extras/sanity_checks/run_pipeline.py
@@ -34,5 +34,5 @@ mhandle = modelling.BuildRawModel(aln)
 # build final model
 start = time.time()
 model = modelling.BuildFromRawModel(mhandle)
-print "\n----\nBuildFromRawModel took: ",time.time()-start
+print("\n----\nBuildFromRawModel took: ",time.time()-start)
 io.SavePDB(model, "model.pdb")
diff --git a/extras/sanity_checks/test_all.py b/extras/sanity_checks/test_all.py
index d528c7a0acd0e7c826688bcd73c1b86cc6969b62..0de61ad7f2ca5187539073cf570842eab5f624ba 100644
--- a/extras/sanity_checks/test_all.py
+++ b/extras/sanity_checks/test_all.py
@@ -32,11 +32,11 @@ def run_me(cmd):
     """Run cmd (list of tokens). Returns tuple (return-code, stdout, stderr)."""
     job = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     sout, serr = job.communicate()
-    print 'RUN', cmd, 'return code', job.returncode
+    print('RUN', cmd, 'return code', job.returncode)
     if sout:
-        print 'STDOUT\n', sout.strip()
+        print('STDOUT\n', sout.strip())
     if serr:
-        print 'STDERR\n', serr.strip()
+        print('STDERR\n', serr.strip())
     return job.returncode, sout, serr
 
 # get own path
@@ -44,14 +44,14 @@ my_path = os.path.dirname(os.path.abspath(__file__))
 os.chdir(my_path)
 
 # quick sanity check
-print '-------------- quick sanity check -------------- '
+print('-------------- quick sanity check -------------- ')
 assert(len(promod3.__version__) > 0)
 assert(os.path.exists(promod3.GetProMod3SharedDataPath()))
 rcode, sout, serr = run_me([pm_bin, 'test_module.py'])
 assert(rcode == 0)
 
 # run a pipeline
-print '-------------- run a pipeline -------------- '
+print('-------------- run a pipeline -------------- ')
 rcode, sout, serr = run_me([pm_bin, 'run_pipeline.py'])
 assert(rcode == 0)
 # check that result exists and is readable
@@ -60,7 +60,7 @@ io.LoadPDB('model.pdb')
 os.remove('model.pdb')
 
 # run doctests
-print '-------------- run doctests -------------- '
+print('-------------- run doctests -------------- ')
 sys.path.insert(0, my_path)
 from test_doctests import DocTests
 DocTests.setPmBinary(pm_bin)
@@ -74,7 +74,7 @@ res = unittest.TextTestRunner(verbosity=2).run(suite)
 assert(res.wasSuccessful())
 
 # check C++ interface
-print '-------------- check C++ interface -------------- '
+print('-------------- check C++ interface -------------- ')
 rcode, sout, serr = run_me(['make', 'clean'])
 assert(rcode == 0)
 rcode, sout, serr = run_me(['make', 'run'])
diff --git a/extras/sanity_checks/test_module.py b/extras/sanity_checks/test_module.py
index db97a6d249431ce36feb6f82e0fa32d7a7c4a661..7c0f351460ae86b9e6d6ea2c7208b01e587b690a 100644
--- a/extras/sanity_checks/test_module.py
+++ b/extras/sanity_checks/test_module.py
@@ -16,5 +16,5 @@
 
 import promod3
 
-print "ProMod3-version =", promod3.__version__
-print "ProMod3SharedDataPath =", promod3.GetProMod3SharedDataPath()
\ No newline at end of file
+print("ProMod3-version =", promod3.__version__)
+print("ProMod3SharedDataPath =", promod3.GetProMod3SharedDataPath())
\ No newline at end of file
diff --git a/extras/scoring_weight_training/analyze_bft_candidates.py b/extras/scoring_weight_training/analyze_bft_candidates.py
index 1c1d28690117b22c9320356ef0d9b66fbbd48a4f..1cbc4dbb0d952d1184d568d45e697879269785ea 100644
--- a/extras/scoring_weight_training/analyze_bft_candidates.py
+++ b/extras/scoring_weight_training/analyze_bft_candidates.py
@@ -84,16 +84,16 @@ def PrintStats(label, lc_count, lc_count_wb):
   num_lc = lc_count.sum()
   num_lc_wb = lc_count_wb.sum()
   num_non_zero_wb = np.count_nonzero(lc_count_wb)
-  print label, "- LOOPS", num_loops, num_non_zero_wb, \
+  print(label, "- LOOPS", num_loops, num_non_zero_wb, \
         "LC %d, %.3g" % (num_lc, float(num_lc) / num_loops), \
-        "LC-WB %d, %.3g" % (num_lc_wb, float(num_lc_wb) / num_loops)
+        "LC-WB %d, %.3g" % (num_lc_wb, float(num_lc_wb) / num_loops))
 
 ###############################################################################
 # MAIN
 ###############################################################################
 # check output path
 if not os.path.exists(out_path):
-  print "Creating output folder", out_path
+  print("Creating output folder", out_path)
   os.mkdir(out_path)
 
 # load input data
@@ -103,12 +103,12 @@ loop_data_keys = json_obj["loop_data_keys"]
 first_indices = json_obj["first_indices"]
 loop_lengths = json_obj["loop_lengths"]
 fragment_indices = json_obj["fragment_indices"]
-print "LOADED DATA", bft.nbytes, bft.shape
+print("LOADED DATA", bft.nbytes, bft.shape)
 
 # setup
 wb_idx = loop_data_keys.index(wb_key)
 unique_ll = sorted(set(loop_lengths))
-unique_ll_idx = range(0, len(unique_ll), length_steps)
+unique_ll_idx = list(range(0, len(unique_ll), length_steps))
 length_set = [unique_ll[i:i+length_steps] for i in unique_ll_idx]
 Nloops = len(first_indices)-1
 Nlengths = len(length_set)
diff --git a/extras/scoring_weight_training/analyze_bft_score_correlations.py b/extras/scoring_weight_training/analyze_bft_score_correlations.py
index a399df014ca329be042963d464bed0afa88833af..b5956fa8a906243bbb475de3b1db5a868f966b19 100644
--- a/extras/scoring_weight_training/analyze_bft_score_correlations.py
+++ b/extras/scoring_weight_training/analyze_bft_score_correlations.py
@@ -59,7 +59,7 @@ old_weights = GetOldWeights()
 ###############################################################################
 # check output path
 if not os.path.exists(out_path):
-  print "Creating output folder", out_path
+  print("Creating output folder", out_path)
   os.mkdir(out_path)
 
 # load input data
@@ -69,7 +69,7 @@ loop_data_keys = json_obj["loop_data_keys"]
 first_indices = json_obj["first_indices"]
 loop_lengths = json_obj["loop_lengths"]
 fragment_indices = json_obj["fragment_indices"]
-print "LOADED DATA", bft.nbytes, bft.shape
+print("LOADED DATA", bft.nbytes, bft.shape)
 
 # setup (extra columns = weighted scores)
 Ncols = bft.shape[1] + 1
@@ -85,7 +85,7 @@ for key in old_weights:
 
 # length check
 unique_ll = sorted(set(loop_lengths))
-unique_ll_idx = range(0, len(unique_ll), length_steps)
+unique_ll_idx = list(range(0, len(unique_ll), length_steps))
 length_set = [unique_ll[i:i+length_steps] for i in unique_ll_idx]
 Nlengths = len(length_set)
 cur_idx = 0
@@ -137,7 +137,7 @@ for key in keys_to_check:
     sr_value = stats.spearmanr(to_test_x, to_test_y)[0]
     my_stats[key].append([pr_value, sr_value, np.median(to_test_x),
                           np.mean(to_test_x), np.std(to_test_x)])
-    print "DOING", key + file_suffix
+    print("DOING", key + file_suffix)
 
     # what to show?
     if "prof" in key:
@@ -160,10 +160,10 @@ max_key_len = max(len(key) for key in keys_to_check)
 my_headers = ["%-*s" % (max_key_len, "CRITERIUM"), "length"]
 my_headers += [" Pearson-R", "Spearman-R", "    median", "      mean",
                "       std"]
-print "="*79
-print "STATS TABLE"
-print "-"*79
-print ", ".join(my_headers)
+print("="*79)
+print("STATS TABLE")
+print("-"*79)
+print(", ".join(my_headers))
 for key in keys_to_check:
   for i_l in range(Nlengths+1):
     val_list = ["%-*s" % (max_key_len, key)]
@@ -173,4 +173,4 @@ for key in keys_to_check:
     else:
       val_list += ["all   "]
     val_list += ["%10.5g" % val for val in my_stats[key][i_l]]
-    print ", ".join(val_list)
+    print(", ".join(val_list))
diff --git a/extras/scoring_weight_training/analyze_bft_scores.py b/extras/scoring_weight_training/analyze_bft_scores.py
index a8abdf597f7ea63cbbc0db5fddb494f3d67f77c6..98276d0cf4c0604c8f00c160e41e7f0fb419c95a 100644
--- a/extras/scoring_weight_training/analyze_bft_scores.py
+++ b/extras/scoring_weight_training/analyze_bft_scores.py
@@ -139,7 +139,7 @@ def GenPlot(x, cdf, keys, loop_data_keys, file_name, i_l=None):
 ###############################################################################
 # check output path
 if not os.path.exists(out_path):
-  print "Creating output folder", out_path
+  print("Creating output folder", out_path)
   os.mkdir(out_path)
 
 # load input data
@@ -149,13 +149,13 @@ loop_data_keys = json_obj["loop_data_keys"]
 first_indices = json_obj["first_indices"]
 loop_lengths = json_obj["loop_lengths"]
 fragment_indices = json_obj["fragment_indices"]
-print "LOADED DATA", bft.nbytes, bft.shape
+print("LOADED DATA", bft.nbytes, bft.shape)
 
 # extract all possible data
 # -> ca_rmsds[C][L] = list of ca_rmsd of chosen loop for loops of
 #    length in length_set[L] and using column C as selection criterium
 unique_ll = sorted(set(loop_lengths))
-unique_ll_idx = range(0, len(unique_ll), length_steps)
+unique_ll_idx = list(range(0, len(unique_ll), length_steps))
 length_set = [unique_ll[i:i+length_steps] for i in unique_ll_idx]
 ca_rmsds = []
 # setup (extra columns = random choice, weighted scores)
@@ -316,21 +316,21 @@ sorted_keys = [key for _,key in sorted(to_sort, reverse=True)]
 # show AUC table (row = key, cols = lengths)
 max_key_len = max(len(key) for key in auc_keys)
 my_headers = ["  ALL"] + ["LL%3d" % llr[0] for llr in length_set]
-print "="*79
-print "AUC TABLE"
-print "-"*79
-print ("%-*s " % (max_key_len, "CRITERIUM")) + ", ".join(my_headers)
+print("="*79)
+print("AUC TABLE")
+print("-"*79)
+print(("%-*s " % (max_key_len, "CRITERIUM")) + ", ".join(my_headers))
 for key in sorted_keys:
   val_list = ["%5.2f" % auc[key]]
   val_list += ["%5.2f" % auc_per_ll[key][i_l] for i_l in range(Nlengths)]
-  print ("%-*s:" % (max_key_len, key)) + ", ".join(val_list)
+  print(("%-*s:" % (max_key_len, key)) + ", ".join(val_list))
 if wb_mode != 2:
-  print "-"*79
-  print ("%-*s " % (max_key_len, "CRITERIUM (LOSS)")) + ", ".join(my_headers)
+  print("-"*79)
+  print(("%-*s " % (max_key_len, "CRITERIUM (LOSS)")) + ", ".join(my_headers))
   for key in sorted_keys:
     val_list = ["%5.2f" % auc_loss[key]]
     val_list += ["%5.2f" % auc_loss_per_ll[key][i_l] for i_l in range(Nlengths)]
-    print ("%-*s:" % (max_key_len, key)) + ", ".join(val_list)
+    print(("%-*s:" % (max_key_len, key)) + ", ".join(val_list))
 
 # dump to CSV
 my_headers = ["Criterium", "all lengths"]
diff --git a/extras/scoring_weight_training/analyze_weights.py b/extras/scoring_weight_training/analyze_weights.py
index d04d40a776bdcec8ef8bd921cff45cf7b24452f9..da4f9675ba5a91fa4b46ca674658c227e4aa78a0 100644
--- a/extras/scoring_weight_training/analyze_weights.py
+++ b/extras/scoring_weight_training/analyze_weights.py
@@ -66,7 +66,7 @@ do_not_show = "_AAR" # remove score ids incl. this from plot
 ###############################################################################
 def GetWeightVector(weights, loop_data_keys):
   weight_vector = np.zeros((len(loop_data_keys),), dtype=np.float32)
-  for key, weight in weights.iteritems():
+  for key, weight in weights.items():
     i_c = loop_data_keys.index(key)
     weight_vector[i_c] = weight
   return weight_vector
@@ -85,7 +85,7 @@ def GetStdWeights(keys, bft_std, ll_idx):
 ###############################################################################
 # get input path from command line
 if len(sys.argv) < 2:
-  print usage_string
+  print(usage_string)
   sys.exit(1)
 in_path = sys.argv[1]
 scorer_weights = json.load(open(in_path, "r"))
@@ -102,15 +102,15 @@ fragment_indices = json_obj["fragment_indices"]
 first_indices_ll = json_obj["first_indices_ll"]
 length_set = json_obj["length_set"]
 Nlengths = len(length_set)
-print "LOADED DATA", bft.nbytes, bft.shape
+print("LOADED DATA", bft.nbytes, bft.shape)
 
 # check signs of scores (put to 0 if wrong -> ok for redundant scores)
-for score_id, weights in scorer_weights.iteritems():
-  for key, weight in weights.iteritems():
+for score_id, weights in scorer_weights.items():
+  for key, weight in weights.items():
     if    ("prof" in key and weight >= 0) \
        or ("prof" not in key and weight <= 0):
-      print "REDUNDANT (presumably) score %s for set %s (weight was %g)" \
-            % (key, score_id, weight)
+      print("REDUNDANT (presumably) score %s for set %s (weight was %g)" \
+            % (key, score_id, weight))
       weights[key] = 0
 
 # extract unique SCORES
@@ -128,7 +128,7 @@ for score_id in score_ids:
 # translate to weight vectors (full and per ll range)
 weight_vectors_full = dict()
 weight_vectors_per_ll = {key: [None] * Nlengths for key in score_ids_per_ll}
-for key, weights in scorer_weights.iteritems():
+for key, weights in scorer_weights.items():
   weight_vector = GetWeightVector(weights, loop_data_keys)
   if "_LL" in key:
     ll_pos = key.index("_LL")
@@ -142,7 +142,7 @@ for key, weights in scorer_weights.iteritems():
 ca_rmsd_idx = loop_data_keys.index(ca_key)
 ca_rmsd_col = bft[:, ca_rmsd_idx]
 Nloops = len(first_indices)-1
-auc_calculator = AucCalculator(bft, [], [], ca_rmsd_col, range(Nloops),
+auc_calculator = AucCalculator(bft, [], [], ca_rmsd_col, list(range(Nloops)),
                                first_indices, drmsd, max_rmsd)
 
 # get all CDFs ("_LL" suffix for ones with different loop lengths)
@@ -169,15 +169,15 @@ aucs = {key: cdfs[key].sum() * drmsd for key in cdfs}
 
 # report AUCs
 x = np.arange(0, max_rmsd, drmsd) + drmsd
-print "AUC OLD WEIGHTS = %6.4g, RANDOM = %6.4g" \
-      % (aucs["old_weights"], aucs["random"])
-print "NEW       , PER_LL,    ALL, STD_LL,    STD"
+print("AUC OLD WEIGHTS = %6.4g, RANDOM = %6.4g" \
+      % (aucs["old_weights"], aucs["random"]))
+print("NEW       , PER_LL,    ALL, STD_LL,    STD")
 for key in score_ids:
-  print "%-10s, %6.4f, %6.4f, %6.4f, %6.4f" \
+  print("%-10s, %6.4f, %6.4f, %6.4f, %6.4f" \
         % (key, aucs[key + "_LL"], aucs[key], aucs[key + "_STD_LL"],
-           aucs[key + "_STD"])
+           aucs[key + "_STD"]))
 for combo_key in combo_keys:
-  print "AUC COMBO %s = %6.4g" % (combo_key, aucs[combo_key])
+  print("AUC COMBO %s = %6.4g" % (combo_key, aucs[combo_key]))
 
 # plot cool ones
 rgb_colors = GetRgbColors()
diff --git a/extras/scoring_weight_training/cma.py b/extras/scoring_weight_training/cma.py
index 463a0cc7505caa3e4ca5d05c71724549f958f9f5..bb874c4c9dddb3e56d7df34904d8960a3604bd5f 100644
--- a/extras/scoring_weight_training/cma.py
+++ b/extras/scoring_weight_training/cma.py
@@ -304,14 +304,14 @@ From a python shell::
 # 08/10/24: more refactorizing
 # 10/03/09: upper bound exp(min(1,...)) for step-size control
 
-from __future__ import division
+
 # future is >= 3.0, this code has mainly been used with 2.6 & 2.7
-from __future__ import with_statement
+
 # only necessary for python 2.5 and not in heavy use
-from __future__ import print_function
+
 # available from python 2.6, code should also work without
-from __future__ import absolute_import
-from __future__ import unicode_literals
+
+
 # from __future__ import collections.MutableMapping
 # does not exist in future, otherwise Python 2.5 would work, since 0.91.01
 
@@ -319,7 +319,7 @@ import sys
 if not sys.version.startswith('2'):  # in python 3
     xrange = range
     raw_input = input
-    basestring = str
+    str = str
 else:
     input = raw_input  # in py2, input(x) == eval(raw_input(x))
 
@@ -524,7 +524,7 @@ meta_parameters = MetaParameters()
 #
 def rglen(ar):
     """shortcut for the iterator ``xrange(len(ar))``"""
-    return xrange(len(ar))
+    return range(len(ar))
 
 def is_feasible(x, f):
     """default to check feasibility, see also ``cma_default_options``"""
@@ -945,7 +945,7 @@ class BoundaryHandlerBase(object):
         if self.bounds is None or self.bounds[ib] is None:
             return array(dimension * [sign_ * np.Inf])
         res = []
-        for i in xrange(dimension):
+        for i in range(dimension):
             res.append(self.bounds[ib][min([i, len(self.bounds[ib]) - 1])])
             if res[-1] is None:
                 res[-1] = sign_ * np.Inf
@@ -995,7 +995,7 @@ class BoundaryHandlerBase(object):
                     l[i] = 1
             b = []  # bounds in different format
             try:
-                for i in xrange(max(l)):
+                for i in range(max(l)):
                     b.append([bounds[0][i] if i < l[0] else None,
                               bounds[1][i] if i < l[1] else None])
             except (TypeError, IndexError):
@@ -1243,7 +1243,7 @@ class BoundPenalty(BoundaryHandlerBase):
         # compute varis = sigma**2 * C_ii
         varis = es.sigma**2 * array(N * [es.C] if isscalar(es.C) else (# scalar case
                                 es.C if isscalar(es.C[0]) else  # diagonal matrix case
-                                [es.C[i][i] for i in xrange(N)]))  # full matrix case
+                                [es.C[i][i] for i in range(N)]))  # full matrix case
 
         # relative violation in geno-space
         dmean = (es.mean - es.gp.geno(self.repair(es.gp.pheno(es.mean)))) / varis**0.5
@@ -1433,11 +1433,11 @@ class BoxConstraintsLinQuadTransformation(BoxConstraintsTransformationBase):
         self._lb = array([self.bounds[min((i, max_i))][0]
                           if self.bounds[min((i, max_i))][0] is not None
                           else -np.Inf
-                          for i in xrange(length)], copy=False)
+                          for i in range(length)], copy=False)
         self._ub = array([self.bounds[min((i, max_i))][1]
                           if self.bounds[min((i, max_i))][1] is not None
                           else np.Inf
-                          for i in xrange(length)], copy=False)
+                          for i in range(length)], copy=False)
         lb = self._lb
         ub = self._ub
         # define added values for lower and upper bound
@@ -1879,7 +1879,7 @@ class GenoPheno(object):
             # therefore it is omitted
             if 1 < 3:
                 keys = sorted(self.fixed_values.keys())
-                x = array([x[i] for i in xrange(len(x)) if i not in keys],
+                x = array([x[i] for i in range(len(x)) if i not in keys],
                           copy=False)
         # repair injected solutions
         if repair is not None:
@@ -2236,7 +2236,7 @@ class CMAAdaptSigmaCSA(CMAAdaptSigmaBase):
             self.damps = es.opts['CSA_dampfac'] * 1  # * (1.1 - 1/(es.N+1)**0.5)
         if es.opts['verbose'] > 1 or self.disregard_length_setting or 11 < 3:
             print('SigmaCSA Parameters')
-            for k, v in self.__dict__.items():
+            for k, v in list(self.__dict__.items()):
                 print('  ', k, ':', v)
         self.ps = np.zeros(es.N)
         self._ps_updated_iteration = -1
@@ -2779,7 +2779,7 @@ class CMAEvolutionStrategy(OOOptimizer):
         self.N_pheno = len(self.x0)
 
         self.sigma0 = sigma0
-        if isinstance(sigma0, basestring):
+        if isinstance(sigma0, str):
         # TODO: no real need here (do rather in fmin)
             self.sigma0 = eval(sigma0)  # like '1./N' or 'np.random.rand(1)[0]+1e-2'
         if np.size(self.sigma0) != 1 or np.shape(self.sigma0):
@@ -2788,7 +2788,7 @@ class CMAEvolutionStrategy(OOOptimizer):
 
         # extract/expand options
         N = self.N_pheno
-        assert isinstance(opts['fixed_variables'], (basestring, dict)) \
+        assert isinstance(opts['fixed_variables'], (str, dict)) \
             or opts['fixed_variables'] is None
         # TODO: in case of a string we need to eval the fixed_variables
         if isinstance(opts['fixed_variables'], dict):
@@ -3016,7 +3016,7 @@ class CMAEvolutionStrategy(OOOptimizer):
                 the estimated diagonal of the Jacobian.
                 """
                 eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
-                return (map(x + eps) - map(x - eps)) / (2 * eps)
+                return (list(map(x + eps)) - list(map(x - eps))) / (2 * eps)
             def grad_numerical_sym(x, func, epsilon=None):
                 """return symmetric numerical gradient of func : R^n -> R.
                 """
@@ -3208,7 +3208,7 @@ class CMAEvolutionStrategy(OOOptimizer):
         if len(arz):  # should always be true
             # apply unconditional mirroring, is pretty obsolete
             if new_injections and self.sp.lam_mirr and self.opts['CMA_mirrormethod'] == 0:
-                for i in xrange(self.sp.lam_mirr):
+                for i in range(self.sp.lam_mirr):
                     if 2 * (i + 1) > len(arz):
                         if self.countiter < 4:
                             _print_warning("fewer mirrors generated than given in parameter setting (%d<%d)"
@@ -3460,7 +3460,7 @@ class CMAEvolutionStrategy(OOOptimizer):
         if xmean is None:
             xmean = self.mean  # might have changed in self.ask
         X = []
-        for k in xrange(int(popsize)):
+        for k in range(int(popsize)):
             x, f = X_first.pop(0), None
             rejected = -1
             while rejected < 0 or not is_feasible(x, f):  # rejection sampling
@@ -3483,7 +3483,7 @@ class CMAEvolutionStrategy(OOOptimizer):
                 if is_feasible(x, f) and evaluations > 1:
                     f = aggregation([f] + [(func(x, *args) if kappa == 1 else
                                             func(xmean + kappa * length_normalizer * (x - xmean), *args))
-                                           for _i in xrange(int(evaluations - 1))])
+                                           for _i in range(int(evaluations - 1))])
                 if rejected + 1 % 1000 == 0:
                     print('  %d solutions rejected (f-value NaN or None) at iteration %d' %
                           (rejected, self.countiter))
@@ -3547,7 +3547,7 @@ class CMAEvolutionStrategy(OOOptimizer):
         if number is None:
             number = self.sp.lam_mirr
         res = []
-        for i in xrange(1, number + 1):
+        for i in range(1, number + 1):
             res.append(self.mean_old - pop_sorted[-i])
         return res
 
@@ -3730,7 +3730,7 @@ class CMAEvolutionStrategy(OOOptimizer):
                 if len(check_points):
                     idx = check_points
             except:
-                idx = xrange(sp.popsize)
+                idx = range(sp.popsize)
 
             for k in idx:
                 self.repair_genotype(pop[k])
@@ -3804,7 +3804,7 @@ class CMAEvolutionStrategy(OOOptimizer):
                 if self.sp.neg.cmuexp:
                     tmp = (pop[-sp.neg.mu:] - mold) / (self.sigma * self.sigma_vec)
                     # normalize to constant length (seems preferable in several aspects)
-                    for i in xrange(tmp.shape[0]):
+                    for i in range(tmp.shape[0]):
                         tmp[i, :] *= N**0.5 / self.mahalanobis_norm(
                                  self.sigma_vec * tmp[i, :]) / self.sigma
                     self._Yneg *= 1 - self.sp.neg.cmuexp  # for some reason necessary?
@@ -3818,7 +3818,7 @@ class CMAEvolutionStrategy(OOOptimizer):
             else:  # separable/diagonal linear case
                 assert(c1 + cmu <= 1)
                 Z = np.zeros(N)
-                for k in xrange(sp.mu):
+                for k in range(sp.mu):
                     z = (pop[k] - mold) / (self.sigma * self.sigma_vec)  # TODO see above
                     Z += sp.weights[k] * z * z  # is 1-D
                 self.C = (1 - c1a - cmu) * self.C + c1 * self.pc * self.pc + cmu * Z
@@ -3909,7 +3909,7 @@ class CMAEvolutionStrategy(OOOptimizer):
             fbestever = self.best.f
         s = (' after %i restart' + ('s' if number_of_runs > 1 else '')) \
             % number_of_runs if number_of_runs else ''
-        for k, v in self.stop().items():
+        for k, v in list(self.stop().items()):
             print('termination on %s=%s%s' % (k, str(v), s +
                   (' (%s)' % time_str if time_str else '')))
 
@@ -4241,7 +4241,7 @@ class CMAEvolutionStrategy(OOOptimizer):
         if len(self.C.shape) <= 1:
             return None
         c = self.C.copy()
-        for i in xrange(c.shape[0]):
+        for i in range(c.shape[0]):
             fac = c[i, i]**0.5
             c[:, i] /= fac
             c[i, :] /= fac
@@ -4447,7 +4447,7 @@ class CMAOptions(dict):
     @staticmethod
     def defaults():
         """return a dictionary with default option values and description"""
-        return dict((str(k), str(v)) for k, v in cma_default_options.items())
+        return dict((str(k), str(v)) for k, v in list(cma_default_options.items()))
         # getting rid of the u of u"name" by str(u"name")
         # return dict(cma_default_options)
 
@@ -4547,7 +4547,7 @@ class CMAOptions(dict):
         if s is None:
             super(CMAOptions, self).__init__(CMAOptions.defaults())  # dict.__init__(self, CMAOptions.defaults()) should be the same
             # self = CMAOptions.defaults()
-        elif isinstance(s, basestring):
+        elif isinstance(s, str):
             super(CMAOptions, self).__init__(CMAOptions().match(s))
             # we could return here
         else:
@@ -4590,7 +4590,7 @@ class CMAOptions(dict):
         if val is not None:
             dic = {dict_or_str:val}
 
-        for key, val in dic.items():
+        for key, val in list(dic.items()):
             key = self.corrected_key(key)
             if key not in CMAOptions.defaults():
                 # TODO: find a better solution?
@@ -4677,9 +4677,9 @@ class CMAOptions(dict):
         if loc is None:
             loc = self  # TODO: this hack is not so useful: popsize could be there, but N is missing
         try:
-            if isinstance(val, basestring):
+            if isinstance(val, str):
                 val = val.split('#')[0].strip()  # remove comments
-                if isinstance(val, basestring) and \
+                if isinstance(val, str) and \
                         key.find('filename') < 0:
                         # and key.find('mindx') < 0:
                     val = eval(val, globals(), loc)
@@ -6035,7 +6035,7 @@ class CMADataLogger(BaseDataLogger):
         the new value
 
         """
-        if not nameprefix or not isinstance(nameprefix, basestring):
+        if not nameprefix or not isinstance(nameprefix, str):
             raise _Error('filename prefix must be a nonempty string')
 
         if nameprefix == self.default_prefix:
@@ -6441,7 +6441,7 @@ class CMADataLogger(BaseDataLogger):
         for istart in start_idx:
             istop = stop_idx[stop_idx > istart]
             istop = istop[0] if len(istop) else 0
-            idx = xrange(istart, istop if istop else dat.f.shape[0])
+            idx = range(istart, istop if istop else dat.f.shape[0])
             if len(idx) > 1:
                 semilogy(dat.f[idx, iabscissa], abs(dat.f[idx, 5]) + foffset,
                         'm')  # , markersize=5
@@ -7060,9 +7060,9 @@ class NoiseHandler(object):
                     self.fitre[i] = fagg(func(ask(evals, X_i, self.epsilon), *args))
                 else:
                     self.fitre[i] = fagg([func(ask(1, X_i, self.epsilon)[0], *args)
-                                            for _k in xrange(evals)])
+                                            for _k in range(evals)])
             else:
-                self.fitre[i] = fagg([func(X_i, *args) for _k in xrange(evals)])
+                self.fitre[i] = fagg([func(X_i, *args) for _k in range(evals)])
         self.evaluations_just_done = evals * len(self.idx)
         return self.fit, self.fitre, self.idx
 
@@ -7231,7 +7231,7 @@ class Sections(object):
             return
 
         res = self.res
-        for i in xrange(len(self.basis)):  # i-th coordinate
+        for i in range(len(self.basis)):  # i-th coordinate
             if i not in res:
                 res[i] = {}
             # xx = np.array(self.x)
@@ -7477,12 +7477,12 @@ class Misc(object):
                 l = 0
 
             if l == 0:
-                return array([Mh.cauchy_with_variance_one() for _i in xrange(size)])
+                return array([Mh.cauchy_with_variance_one() for _i in range(size)])
             elif l == 1:
-                return array([Mh.cauchy_with_variance_one() for _i in xrange(size[0])])
+                return array([Mh.cauchy_with_variance_one() for _i in range(size[0])])
             elif l == 2:
-                return array([[Mh.cauchy_with_variance_one() for _i in xrange(size[1])]
-                             for _j in xrange(size[0])])
+                return array([[Mh.cauchy_with_variance_one() for _i in range(size[1])]
+                             for _j in range(size[0])])
             else:
                 raise _Error('len(size) cannot be large than two')
 
@@ -7597,24 +7597,24 @@ class Misc(object):
 
             num_opt = False  # factor 1.5 in 30-D
 
-            for j in xrange(n):
+            for j in range(n):
                 d[j] = V[n - 1][j]  # d is output argument
 
             # Householder reduction to tridiagonal form.
 
-            for i in xrange(n - 1, 0, -1):
+            for i in range(n - 1, 0, -1):
                 # Scale to avoid under/overflow.
                 h = 0.0
                 if not num_opt:
                     scale = 0.0
-                    for k in xrange(i):
+                    for k in range(i):
                         scale = scale + abs(d[k])
                 else:
                     scale = sum(abs(d[0:i]))
 
                 if scale == 0.0:
                     e[i] = d[i - 1]
-                    for j in xrange(i):
+                    for j in range(i):
                         d[j] = V[i - 1][j]
                         V[i][j] = 0.0
                         V[j][i] = 0.0
@@ -7622,7 +7622,7 @@ class Misc(object):
 
                     # Generate Householder vector.
                     if not num_opt:
-                        for k in xrange(i):
+                        for k in range(i):
                             d[k] /= scale
                             h += d[k] * d[k]
                     else:
@@ -7639,19 +7639,19 @@ class Misc(object):
                     h = h - f * g
                     d[i - 1] = f - g
                     if not num_opt:
-                        for j in xrange(i):
+                        for j in range(i):
                             e[j] = 0.0
                     else:
                         e[:i] = 0.0
 
                     # Apply similarity transformation to remaining columns.
 
-                    for j in xrange(i):
+                    for j in range(i):
                         f = d[j]
                         V[j][i] = f
                         g = e[j] + V[j][j] * f
                         if not num_opt:
-                            for k in xrange(j + 1, i):
+                            for k in range(j + 1, i):
                                 g += V[k][j] * d[k]
                                 e[k] += V[k][j] * f
                             e[j] = g
@@ -7661,7 +7661,7 @@ class Misc(object):
 
                     f = 0.0
                     if not num_opt:
-                        for j in xrange(i):
+                        for j in range(i):
                             e[j] /= h
                             f += e[j] * d[j]
                     else:
@@ -7670,16 +7670,16 @@ class Misc(object):
 
                     hh = f / (h + h)
                     if not num_opt:
-                        for j in xrange(i):
+                        for j in range(i):
                             e[j] -= hh * d[j]
                     else:
                         e[:i] -= hh * d[:i]
 
-                    for j in xrange(i):
+                    for j in range(i):
                         f = d[j]
                         g = e[j]
                         if not num_opt:
-                            for k in xrange(j, i):
+                            for k in range(j, i):
                                 V[k][j] -= (f * e[k] + g * d[k])
                         else:
                             V.T[j][j:i] -= (f * e[j:i] + g * d[j:i])
@@ -7692,37 +7692,37 @@ class Misc(object):
 
             # Accumulate transformations.
 
-            for i in xrange(n - 1):
+            for i in range(n - 1):
                 V[n - 1][i] = V[i][i]
                 V[i][i] = 1.0
                 h = d[i + 1]
                 if h != 0.0:
                     if not num_opt:
-                        for k in xrange(i + 1):
+                        for k in range(i + 1):
                             d[k] = V[k][i + 1] / h
                     else:
                         d[:i + 1] = V.T[i + 1][:i + 1] / h
 
-                    for j in xrange(i + 1):
+                    for j in range(i + 1):
                         if not num_opt:
                             g = 0.0
-                            for k in xrange(i + 1):
+                            for k in range(i + 1):
                                 g += V[k][i + 1] * V[k][j]
-                            for k in xrange(i + 1):
+                            for k in range(i + 1):
                                 V[k][j] -= g * d[k]
                         else:
                             g = np.dot(V.T[i + 1][0:i + 1], V.T[j][0:i + 1])
                             V.T[j][:i + 1] -= g * d[:i + 1]
 
                 if not num_opt:
-                    for k in xrange(i + 1):
+                    for k in range(i + 1):
                         V[k][i + 1] = 0.0
                 else:
                     V.T[i + 1][:i + 1] = 0.0
 
 
             if not num_opt:
-                for j in xrange(n):
+                for j in range(n):
                     d[j] = V[n - 1][j]
                     V[n - 1][j] = 0.0
             else:
@@ -7746,7 +7746,7 @@ class Misc(object):
             num_opt = False  # using vectors from numpy makes it faster
 
             if not num_opt:
-                for i in xrange(1, n):  # (int i = 1; i < n; i++):
+                for i in range(1, n):  # (int i = 1; i < n; i++):
                     e[i - 1] = e[i]
             else:
                 e[0:n - 1] = e[1:n]
@@ -7755,7 +7755,7 @@ class Misc(object):
             f = 0.0
             tst1 = 0.0
             eps = 2.0**-52.0
-            for l in xrange(n):  # (int l = 0; l < n; l++) {
+            for l in range(n):  # (int l = 0; l < n; l++) {
 
                 # Find small subdiagonal element
 
@@ -7787,7 +7787,7 @@ class Misc(object):
                         dl1 = d[l + 1]
                         h = g - d[l]
                         if not num_opt:
-                            for i in xrange(l + 2, n):
+                            for i in range(l + 2, n):
                                 d[i] -= h
                         else:
                             d[l + 2:n] -= h
@@ -7805,7 +7805,7 @@ class Misc(object):
                         s2 = 0.0
 
                         # hh = V.T[0].copy()  # only with num_opt
-                        for i in xrange(m - 1, l - 1, -1):  # (int i = m-1; i >= l; i--) {
+                        for i in range(m - 1, l - 1, -1):  # (int i = m-1; i >= l; i--) {
                             c3 = c2
                             c2 = c
                             s2 = s
@@ -7821,7 +7821,7 @@ class Misc(object):
                             # Accumulate transformation.
 
                             if not num_opt:  # overall factor 3 in 30-D
-                                for k in xrange(n):  # (int k = 0; k < n; k++) {
+                                for k in range(n):  # (int k = 0; k < n; k++) {
                                     h = V[k][i + 1]
                                     V[k][i + 1] = s * V[k][i] + c * h
                                     V[k][i] = c * V[k][i] - s * h
@@ -7851,7 +7851,7 @@ class Misc(object):
 
         N = len(C[0])
         if 1 < 3:
-            V = [[x[i] for i in xrange(N)] for x in C]  # copy each "row"
+            V = [[x[i] for i in range(N)] for x in C]  # copy each "row"
             d = N * [0.]
             e = N * [0.]
 
@@ -7873,10 +7873,10 @@ def pprint(to_be_printed):
     except ImportError:
         if isinstance(to_be_printed, dict):
             print('{')
-            for k, v in to_be_printed.items():
-                print("'" + k + "'" if isinstance(k, basestring) else k,
+            for k, v in list(to_be_printed.items()):
+                print("'" + k + "'" if isinstance(k, str) else k,
                       ': ',
-                      "'" + v + "'" if isinstance(k, basestring) else v,
+                      "'" + v + "'" if isinstance(k, str) else v,
                       sep="")
             print('}')
         else:
@@ -7963,8 +7963,8 @@ class Rotation(object):
             rstate = np.random.get_state()
             np.random.seed(self.seed) if self.seed else np.random.seed()
             B = np.random.randn(N, N)
-            for i in xrange(N):
-                for j in xrange(0, i):
+            for i in range(N):
+                for j in range(0, i):
                     B[i] -= np.dot(B[i], B[j]) * B[j]
                 B[i] /= sum(B[i]**2)**0.5
             self.dicMatrices[str(N)] = B
@@ -8135,7 +8135,7 @@ class FFWrapper(object):
             y = np.zeros(len(x) + len(self.index_value_pairs))
             assert len(y) > max(self.index_value_pairs)
             j = 0
-            for i in xrange(len(y)):
+            for i in range(len(y)):
                 if i in self.index_value_pairs:
                     y[i] = self.index_value_pairs[i]
                 else:
@@ -8290,7 +8290,7 @@ class FitnessFunctions(object):
         self.counter += 1
         # return np.random.rand(1)[0]**0 * sum(x**2) + 1 * np.random.rand(1)[0]
         dim = len(x)
-        x = array([x[i % dim] for i in xrange(2 * dim)])
+        x = array([x[i % dim] for i in range(2 * dim)])
         N = 8
         i = self.counter % dim
         # f = sum(x[i:i + N]**2)
@@ -8547,7 +8547,7 @@ class FitnessFunctions(object):
         # ((8 * np.sin(7 * (x[i] - 0.9)**2)**2 ) + (6 * np.sin()))
         res = np.sum((x[i - 1] + 10 * x[i])**2 + 5 * (x[i + 1] - x[i + 2])**2 +
                      (x[i] - 2 * x[i + 1])**4 + 10 * (x[i - 1] - x[i + 2])**4
-                     for i in xrange(1, len(x) - 2))
+                     for i in range(1, len(x) - 2))
         return 1 + res
     def styblinski_tang(self, x):
         """in [-5, 5]
@@ -8565,7 +8565,7 @@ class FitnessFunctions(object):
 
         http://en.wikipedia.org/wiki/Test_functions_for_optimization"""
         s = 0
-        for k in xrange((1+len(x)) // 2):
+        for k in range((1+len(x)) // 2):
             z = x[2 * k]
             y = x[min((2*k + 1, len(x)-1))]
             s += 100 * np.abs(y - 0.01 * z**2)**0.5 + 0.01 * np.abs(z + 10)
@@ -8760,7 +8760,7 @@ def main(argv=None):
             fun = None
         elif argv[1] in ('plot',):
             plot(name=argv[2] if len(argv) > 2 else None)
-            raw_input('press return')
+            input('press return')
             fun = None
         elif len(argv) > 3:
             fun = eval('fcts.' + argv[1])
@@ -8776,7 +8776,7 @@ def main(argv=None):
             sig0 = eval(argv[3])
 
         opts = {}
-        for i in xrange(5, len(argv), 2):
+        for i in range(5, len(argv), 2):
             opts[argv[i - 1]] = eval(argv[i])
 
         # run fmin
diff --git a/extras/scoring_weight_training/collect_optimized_weights.py b/extras/scoring_weight_training/collect_optimized_weights.py
index 02544620abec130adbf7f33310a271a3f81c25bc..c3d52b917369192bf45430be2a9c33c81a69080e 100644
--- a/extras/scoring_weight_training/collect_optimized_weights.py
+++ b/extras/scoring_weight_training/collect_optimized_weights.py
@@ -38,7 +38,7 @@ USAGE: python collect_optimized_weights.py IN_PREFIX OUT_FILE
 ###############################################################################
 # get input path from command line
 if len(sys.argv) < 3:
-  print usage_string
+  print(usage_string)
   sys.exit(1)
 in_prefix = sys.argv[1]
 out_path = sys.argv[2]
@@ -61,7 +61,7 @@ for f in sorted(file_list):
     file_path = os.path.join(in_prefix_dir, f)
     weights = json.load(open(file_path, "r"))
     scorer_weights[my_id] = weights
-    print my_id, {str(k): v for k,v in weights.iteritems()}
+    print(my_id, {str(k): v for k,v in weights.items()})
 
 # dump it
 with open(out_path, "w") as json_file:
diff --git a/extras/scoring_weight_training/generate_bft.py b/extras/scoring_weight_training/generate_bft.py
index 1687e0e24f9653fee838b68017c088ccb3393004..70b80750d0edb3be1f59690ffc6f032d095f1b53 100644
--- a/extras/scoring_weight_training/generate_bft.py
+++ b/extras/scoring_weight_training/generate_bft.py
@@ -66,7 +66,7 @@ for file_name in sorted(json_files):
   # we expect dict
   if type(json_obj) == dict:
     # check for keys (take care for unicode mess)
-    cur_keys = sorted([str(key) for key in json_obj.keys()])
+    cur_keys = sorted([str(key) for key in list(json_obj.keys())])
     if cur_keys == exp_keys:
       # extract data
       frag_start = json_obj["frag_start"]
@@ -97,19 +97,19 @@ for file_name in sorted(json_files):
         loop_lengths.append(loop_data["loop_length"])
         num_lc_range += num_lc
       # report
-      print "ADDED %d LC for LOOPS in [%d,%d[ in %g s" \
-            % (num_lc_range, frag_start, frag_end, time.time() - start_time)
+      print("ADDED %d LC for LOOPS in [%d,%d[ in %g s" \
+            % (num_lc_range, frag_start, frag_end, time.time() - start_time))
 
 # check covered ranges
 max_frag_idx = max(fragment_indices)
 unique_frag_idx = set(fragment_indices)
 if len(unique_frag_idx) != max_frag_idx+1:
   missing_indices = sorted(set(range(max_frag_idx+1)) - unique_frag_idx)
-  print "MISSING LOOPS FOR", missing_indices
+  print("MISSING LOOPS FOR", missing_indices)
 if len(unique_frag_idx) != len(fragment_indices):
   for frag_idx in unique_frag_idx:
     if fragment_indices.count(frag_idx) > 1:
-      print "DUPLICATE LOOPS FOR", frag_idx
+      print("DUPLICATE LOOPS FOR", frag_idx)
 
 # consistency check
 num_loops = len(bft_list)
@@ -129,7 +129,7 @@ for i in range(num_loops):
     first_indices.append(total_num_lc)
     total_num_lc += num_lc
 # last one must be full size
-print "REMOVED", num_loops - len(first_indices), "LOOPS"
+print("REMOVED", num_loops - len(first_indices), "LOOPS")
 first_indices.append(total_num_lc)
 # clean up other lists
 for i in reversed(to_remove):
@@ -144,7 +144,7 @@ assert(len(first_indices) == num_loops + 1)
 
 # BUILD BFT
 num_keys = len(loop_data_keys)
-print "BUILDING BFT with %d rows and %d cols" % (total_num_lc, num_keys)
+print("BUILDING BFT with %d rows and %d cols" % (total_num_lc, num_keys))
 bft = numpy.concatenate(bft_list)
 assert(bft.shape[0] == total_num_lc)
 assert(bft.shape[1] == num_keys)
@@ -167,5 +167,5 @@ for loop_length in set(loop_lengths):
       num_lc = first_indices[i+1] - first_indices[i]
       num_lc_ll += num_lc
       num_loops_ll += 1
-  print "LL", loop_length, "LC", num_lc_ll, "LOOPS", num_loops_ll, \
-        "AVG-LC", float(num_lc_ll) / num_loops_ll
+  print("LL", loop_length, "LC", num_lc_ll, "LOOPS", num_loops_ll, \
+        "AVG-LC", float(num_lc_ll) / num_loops_ll)
diff --git a/extras/scoring_weight_training/generate_bft_chunks.py b/extras/scoring_weight_training/generate_bft_chunks.py
index ef0bcdf35e0e7878feae0af1a52f67a46a06e000..4587bda835806a64b93096ad8df10d037ff48fc4 100644
--- a/extras/scoring_weight_training/generate_bft_chunks.py
+++ b/extras/scoring_weight_training/generate_bft_chunks.py
@@ -133,7 +133,7 @@ def GetLoopData(lc):
 
   # only keep ones that converge in CCD
   orig_indices = lc.ApplyCCD(n_stem, c_stem, torsion_sampler)
-  print "-", len(lc), "LC for LOOP", chain_idx, start_idx, loop_length, len(prof)
+  print("-", len(lc), "LC for LOOP", chain_idx, start_idx, loop_length, len(prof))
 
   # fill lc_data columns -> dict with all keys in lc_data_keys
   lc_data = dict()
@@ -225,7 +225,7 @@ sys.stdout = unbuffered
 
 # check command line args
 if len(sys.argv) < 2 or not sys.argv[1].lower() in ["chunk", "range"]:
-  print usage_string
+  print(usage_string)
   sys.exit(1)
 
 # timing
@@ -285,8 +285,8 @@ lc_data_keys = ["ca_rmsd", "within_bounds", "seq_prof_score"] \
              + sorted(cluster_max_dist.keys())
 
 # choose fragments to analyze
-print "LOOPS in [%d, %d[" % (frag_start, frag_end)
-print '-> mem', resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
+print("LOOPS in [%d, %d[" % (frag_start, frag_end))
+print('-> mem', resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000)
 loop_datas = list()
 max_frag_time = 0
 for fragment in fragments[frag_start:frag_end]:
@@ -350,11 +350,11 @@ for fragment in fragments[frag_start:frag_end]:
   el_time = time.time() - start_time
   cur_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
   est_next_time = (el_time + 2*max_frag_time)/3600
-  print '-> N %d / est_next_time %g / frag_time %g / mem %d' \
-        % (len(loop_datas), est_next_time, frag_time, cur_mem)
+  print('-> N %d / est_next_time %g / frag_time %g / mem %d' \
+        % (len(loop_datas), est_next_time, frag_time, cur_mem))
   # check if we should abort and dump
   if est_next_time > max_time or cur_mem > max_mem:
-    print "ABORTING", len(loop_datas)
+    print("ABORTING", len(loop_datas))
     break
 
 # timing
diff --git a/extras/scoring_weight_training/generate_bft_dbs.py b/extras/scoring_weight_training/generate_bft_dbs.py
index 863f922125b1a2b80d1aef301462b3ca83697be4..1eaa2105e2a9d578be56461710638187d0f8a69f 100644
--- a/extras/scoring_weight_training/generate_bft_dbs.py
+++ b/extras/scoring_weight_training/generate_bft_dbs.py
@@ -56,7 +56,7 @@ dist_bin_size = 1.0
 angle_bin_size = 20
 sub_frag_db = loop.FragDB(dist_bin_size, angle_bin_size)
 for i in range(3,15):
-  print "start to add fragments of length %d" % i
+  print("start to add fragments of length %d" % i)
   sub_frag_db.AddFragments(i, max_pairwise_rmsd, sub_structure_db)
 
 # dump them
diff --git a/extras/scoring_weight_training/generate_bft_testset.py b/extras/scoring_weight_training/generate_bft_testset.py
index ac46ce477ced492e7964d76e6844631dbbc6ee7c..0f03a89a00ce5324d69a34d466f5c192a611b6c5 100644
--- a/extras/scoring_weight_training/generate_bft_testset.py
+++ b/extras/scoring_weight_training/generate_bft_testset.py
@@ -32,7 +32,7 @@ min_coord_size = 50        # min. size for StructureDB entry to be considered
 terminal_res_to_skip = 2   # first and last x residues are never chosen for loop
 num_chains = 4000          # get x random chains from structure DB
 num_loops_per_len = 5000   # number of loops to pick for each loop length
-loop_range = range(3,15)   # range of loop lengths (uniformly distributed)
+loop_range = list(range(3,15))   # range of loop lengths (uniformly distributed)
 random.seed(42)            # fixed seed for reproducibilty
 out_fragments = "fragments.json"  # full path!
 
@@ -69,7 +69,7 @@ while len(coord_indices) < num_chains:
     coord_indices.add(coord_idx)
 
 # extract big lists appending all chains
-print "NUM CHAINS = %d" % num_chains
+print("NUM CHAINS = %d" % num_chains)
 is_coil = []      # 1 = coil, 0 = not, len = all residues
 first_idx = {}    # first_idx[coord_idx] = pos. of 1st res. in is_coil
 back_mapping = [] # back mapping to coord index, len = all residues
@@ -94,7 +94,7 @@ num_residues = len(is_coil)
 assert(len(first_idx) == num_chains)
 assert(len(back_mapping) == num_residues)
 assert(len(res_to_skip) == num_residues)
-print "NUM RESIDUES = %d with %d coils" % (num_residues, sum(is_coil))
+print("NUM RESIDUES = %d with %d coils" % (num_residues, sum(is_coil)))
 
 # extract loops for each loop length (list of [chain_idx, offset, length])
 fragments = []
@@ -109,7 +109,7 @@ for loop_length in loop_range:
     if num_coil >= num_coil_thresh and not to_skip:
       possible_start_indices.append(start_idx)
   # show it
-  print "LOOP LEN %d: %d choices" % (loop_length, len(possible_start_indices))
+  print("LOOP LEN %d: %d choices" % (loop_length, len(possible_start_indices)))
   # random choice
   start_indices = random.sample(possible_start_indices, num_loops_per_len)
   for start_idx in start_indices:
@@ -139,7 +139,7 @@ with open(out_fragments, "w") as json_file:
 
 # just checking...
 chosen_coord_indices = set([f.chain_index for f in fragments])
-print "NEVER CHOSE %d chains" % (num_chains - len(chosen_coord_indices))
+print("NEVER CHOSE %d chains" % (num_chains - len(chosen_coord_indices)))
 # for coord_idx in (coord_indices - chosen_coord_indices):
 #   coord_info = structure_db.GetCoordInfo(coord_idx)
 #   frag_info = loop.FragmentInfo(coord_idx, 0, coord_info.size)
@@ -155,6 +155,6 @@ for fragment in fragments:
     covered[i] += 1
 
 # report overlaps
-print "COVERAGE NEEDED", sum(i*num_loops_per_len for i in loop_range)
+print("COVERAGE NEEDED", sum(i*num_loops_per_len for i in loop_range))
 for i in range(max(covered)+1):
-  print "COVERAGE: %d loops on %d residues" % (i, covered.count(i))
+  print("COVERAGE: %d loops on %d residues" % (i, covered.count(i)))
diff --git a/extras/scoring_weight_training/generate_training_bft.py b/extras/scoring_weight_training/generate_training_bft.py
index 3d4b5984321db8a0bb54c73ee7167a34227f0e71..da3c499b92b3ce76f8c26e9973c5302e8ec8030c 100644
--- a/extras/scoring_weight_training/generate_training_bft.py
+++ b/extras/scoring_weight_training/generate_training_bft.py
@@ -78,7 +78,7 @@ loop_data_keys = json_obj["loop_data_keys"]
 first_indices = json_obj["first_indices"]
 loop_lengths = json_obj["loop_lengths"]
 fragment_indices = json_obj["fragment_indices"]
-print "LOADED DATA", bft.nbytes, bft.shape
+print("LOADED DATA", bft.nbytes, bft.shape)
 
 # setup
 Nrows = bft.shape[0]
@@ -87,7 +87,7 @@ Nloops = len(first_indices)-1
 
 # length extraction
 unique_ll = sorted(set(loop_lengths))
-unique_ll_idx = range(0, len(unique_ll), length_steps)
+unique_ll_idx = list(range(0, len(unique_ll), length_steps))
 length_set = [unique_ll[i:i+length_steps] for i in unique_ll_idx]
 Nlengths = len(length_set)
 
@@ -113,8 +113,8 @@ for i_l in range(Nlengths):
   Nloops_l = len(loop_indices_per_ll[i_l])
   Nloops_train = int(round(train_fraction * Nloops_l))
   Nloops_test = Nloops_l - Nloops_train
-  print "LOOP LENGTH RANGE", length_set[i_l], \
-        "TRAIN", Nloops_train, "TEST", Nloops_test
+  print("LOOP LENGTH RANGE", length_set[i_l], \
+        "TRAIN", Nloops_train, "TEST", Nloops_test)
   train_indices = sorted(random.sample(loop_indices_per_ll[i_l], Nloops_train))
   test_indices = sorted(set(loop_indices_per_ll[i_l]) - set(train_indices))
   assert(len(train_indices) == Nloops_train)
diff --git a/extras/scoring_weight_training/get_weights_code.py b/extras/scoring_weight_training/get_weights_code.py
index 1716a2c340b8a848ba0e3b58acc5a66be53d8134..b48b2067ad0a4df7e877778b4a1921205360e2c4 100644
--- a/extras/scoring_weight_training/get_weights_code.py
+++ b/extras/scoring_weight_training/get_weights_code.py
@@ -36,7 +36,7 @@ USAGE: python get_weights_code.py VAR_NAME SCORE_ID WEIGHTS_FILE
 ###############################################################################
 # get input path from command line
 if len(sys.argv) < 4:
-  print usage_string
+  print(usage_string)
   sys.exit(1)
 var_name = sys.argv[1]
 score_id = sys.argv[2]
@@ -48,11 +48,11 @@ weights = scorer_weights[score_id]
 
 # check redundant scores
 keys = []
-for key, weight in weights.iteritems():
+for key, weight in weights.items():
   if    ("prof" in key and weight >= 0) \
      or ("prof" not in key and weight <= 0):
-    print "REDUNDANT (presumably) score %s for set %s (weight was %g)" \
-          % (key, score_id, weight)
+    print("REDUNDANT (presumably) score %s for set %s (weight was %g)" \
+          % (key, score_id, weight))
   else:
     keys.append(key)
 
@@ -70,4 +70,4 @@ for i in range(Nkeys):
     my_txt += ","
   else:
     my_txt += "}"
-  print my_txt
+  print(my_txt)
diff --git a/extras/scoring_weight_training/optimize_weights.py b/extras/scoring_weight_training/optimize_weights.py
index e6c339c8f31aad2514edf2b251770bcf5902de4d..257f9291c7dc8132a3bca8672354a104c4921d96 100644
--- a/extras/scoring_weight_training/optimize_weights.py
+++ b/extras/scoring_weight_training/optimize_weights.py
@@ -82,21 +82,21 @@ loop_lengths = json_obj["loop_lengths"]
 fragment_indices = json_obj["fragment_indices"]
 first_indices_ll = json_obj["first_indices_ll"]
 length_set = json_obj["length_set"]
-print "LOADED DATA", bft.nbytes, bft.shape
+print("LOADED DATA", bft.nbytes, bft.shape)
 
 # check command line
 if len(sys.argv) < 2:
-  print usage_string
+  print(usage_string)
   sys.exit(1)
 score_ids = sys.argv[1]
 do_all = len(sys.argv) == 2
 if do_all:
   ll_idx = len(length_set)
-  print "DOING ALL LOOPS"
+  print("DOING ALL LOOPS")
 else:
   ll_idx = int(sys.argv[2])
   assert(ll_idx < len(length_set))
-  print "RESTRICTED TO LOOP LENGTHS", length_set[ll_idx]
+  print("RESTRICTED TO LOOP LENGTHS", length_set[ll_idx])
 
 # get scoring keys
 scoring_keys = []
@@ -115,8 +115,8 @@ if base_weight_key not in scoring_keys:
 ca_rmsd_idx = loop_data_keys.index(ca_key)
 base_weight_vector = np.zeros((bft.shape[1],), dtype=np.float32)
 weight_indices = []
-print "=" * 79
-print "INIT weights:"
+print("=" * 79)
+print("INIT weights:")
 for key in scoring_keys:
   # get base weights
   i_c = loop_data_keys.index(key)
@@ -124,7 +124,7 @@ for key in scoring_keys:
   if "prof" in key:
     my_weight = -my_weight
   base_weight_vector[i_c] = my_weight
-  print "- %s: %g" % (key, my_weight)
+  print("- %s: %g" % (key, my_weight))
   # only add indices apart from base key
   if key != base_weight_key:
     weight_indices.append(i_c)
@@ -142,10 +142,10 @@ if not do_all:
 Nloops = len(first_indices)-1
 ca_rmsd_col = bft[:, ca_rmsd_idx]
 auc_calculator = AucCalculator(bft, weight_indices, base_weight_vector,
-                               ca_rmsd_col, range(Nloops), first_indices,
+                               ca_rmsd_col, list(range(Nloops)), first_indices,
                                drmsd, max_rmsd)
 initial_weights = base_weight_vector[weight_indices]
-print "INIT score:", auc_calculator.GetAUCFromWeightI(initial_weights)
+print("INIT score:", auc_calculator.GetAUCFromWeightI(initial_weights))
 
 # estimate runtime
 start_time = time.time()
@@ -155,17 +155,17 @@ el_time = time.time() - start_time
 maxfev = int(10 * max_time * 3600 / el_time)
 
 # let's do it...
-print "=" * 79
-print "OPTIMIZE with at most %d function evaluations" % maxfev
+print("=" * 79)
+print("OPTIMIZE with at most %d function evaluations" % maxfev)
 start_time = time.time()
 res = optimize.minimize(auc_calculator.ToMinimize, initial_weights,
                         method="nelder-mead", options={"maxfev": maxfev})
-print "FITTED in %gs" % (time.time() - start_time)
-print res
-print "=" * 79
+print("FITTED in %gs" % (time.time() - start_time))
+print(res)
+print("=" * 79)
 final_weights = res.x
-print "BEST score:", auc_calculator.GetAUCFromWeightI(final_weights)
-print "BEST weights:"
+print("BEST score:", auc_calculator.GetAUCFromWeightI(final_weights))
+print("BEST weights:")
 cur_idx = 0
 weights_dict = dict()
 for key in scoring_keys:
@@ -174,7 +174,7 @@ for key in scoring_keys:
   else:
     my_weight = final_weights[cur_idx]
     cur_idx += 1
-  print "- %s: %g" % (key, my_weight)
+  print("- %s: %g" % (key, my_weight))
   weights_dict[key] = my_weight
 
 # dump it
diff --git a/extras/scoring_weight_training/optimize_weights_cma.py b/extras/scoring_weight_training/optimize_weights_cma.py
index 88870e37cf908fb163582d1540718256e0a1326d..590a6202e9e5cb5e3f293addccfb2417133f2572 100644
--- a/extras/scoring_weight_training/optimize_weights_cma.py
+++ b/extras/scoring_weight_training/optimize_weights_cma.py
@@ -82,21 +82,21 @@ loop_lengths = json_obj["loop_lengths"]
 fragment_indices = json_obj["fragment_indices"]
 first_indices_ll = json_obj["first_indices_ll"]
 length_set = json_obj["length_set"]
-print "LOADED DATA", bft.nbytes, bft.shape
+print("LOADED DATA", bft.nbytes, bft.shape)
 
 # check command line
 if len(sys.argv) < 2:
-  print usage_string
+  print(usage_string)
   sys.exit(1)
 score_ids = sys.argv[1]
 do_all = len(sys.argv) == 2
 if do_all:
   ll_idx = len(length_set)
-  print "DOING ALL LOOPS"
+  print("DOING ALL LOOPS")
 else:
   ll_idx = int(sys.argv[2])
   assert(ll_idx < len(length_set))
-  print "RESTRICTED TO LOOP LENGTHS", length_set[ll_idx]
+  print("RESTRICTED TO LOOP LENGTHS", length_set[ll_idx])
 
 # get scoring keys
 scoring_keys = []
@@ -115,8 +115,8 @@ if base_weight_key not in scoring_keys:
 ca_rmsd_idx = loop_data_keys.index(ca_key)
 base_weight_vector = np.zeros((bft.shape[1],), dtype=np.float32)
 weight_indices = []
-print "=" * 79
-print "INIT weights:"
+print("=" * 79)
+print("INIT weights:")
 for key in scoring_keys:
   # get base weights
   i_c = loop_data_keys.index(key)
@@ -124,7 +124,7 @@ for key in scoring_keys:
   if "prof" in key:
     my_weight = -my_weight
   base_weight_vector[i_c] = my_weight
-  print "- %s: %g" % (key, my_weight)
+  print("- %s: %g" % (key, my_weight))
   # only add indices apart from base key
   if key != base_weight_key:
     weight_indices.append(i_c)
@@ -142,11 +142,11 @@ if not do_all:
 Nloops = len(first_indices)-1
 ca_rmsd_col = bft[:, ca_rmsd_idx]
 auc_calculator = AucCalculator(bft, weight_indices, base_weight_vector,
-                               ca_rmsd_col, range(Nloops), first_indices,
+                               ca_rmsd_col, list(range(Nloops)), first_indices,
                                drmsd, max_rmsd)
 # init. weights for opt. are all ones (we mult. by base vector)
 initial_weights = np.ones((len(weight_indices),), dtype=np.float32)
-print "INIT score:", auc_calculator.GetAUCFromWeightIN(initial_weights)
+print("INIT score:", auc_calculator.GetAUCFromWeightIN(initial_weights))
 
 # estimate runtime
 start_time = time.time()
@@ -156,24 +156,24 @@ el_time = time.time() - start_time
 maxfev = int(10 * max_time * 3600 / el_time)
 
 # let's do it...
-print "=" * 79
-print "OPTIMIZE with at most %d function evaluations" % maxfev
+print("=" * 79)
+print("OPTIMIZE with at most %d function evaluations" % maxfev)
 start_time = time.time()
-print "-" * 79
+print("-" * 79)
 cma_opts = {'maxfevals': maxfev, 'verb_log': 0, 'bounds': [0, None]}
 # NOTE: do the following to get commented list of options
 # for k in sorted(cma.CMAOptions()): print k,'-',cma.CMAOptions()[k]
 res = cma.fmin(auc_calculator.ToMinimizeN, initial_weights, 0.2,
                options=cma_opts)
 final_weights = res[0]
-print "-" * 79
-print "FITTED in %gs" % (time.time() - start_time)
-print "BEST:", res[0]
-print "-> score:", auc_calculator.GetAUCFromWeightIN(res[0])
-print "MEAN:", res[5]
-print "-> score:", auc_calculator.GetAUCFromWeightIN(res[5])
+print("-" * 79)
+print("FITTED in %gs" % (time.time() - start_time))
+print("BEST:", res[0])
+print("-> score:", auc_calculator.GetAUCFromWeightIN(res[0]))
+print("MEAN:", res[5])
+print("-> score:", auc_calculator.GetAUCFromWeightIN(res[5]))
 
-print "BEST weights:"
+print("BEST weights:")
 cur_idx = 0
 weights_dict = dict()
 for key in scoring_keys:
@@ -182,7 +182,7 @@ for key in scoring_keys:
   else:
     my_weight = final_weights[cur_idx] * base_weight_vector[weight_indices[cur_idx]]
     cur_idx += 1
-  print "- %s: %g" % (key, my_weight)
+  print("- %s: %g" % (key, my_weight))
   weights_dict[key] = my_weight
 
 # dump it
diff --git a/extras/scoring_weight_training/run_all_optimizations.py b/extras/scoring_weight_training/run_all_optimizations.py
index e979fc6d4e01a214a6f163da03817e6817adbbe2..9753c0ace064d03bd5bb8b330bc7de62bd1e6e5f 100644
--- a/extras/scoring_weight_training/run_all_optimizations.py
+++ b/extras/scoring_weight_training/run_all_optimizations.py
@@ -35,10 +35,10 @@ score_ids = ["BB", "BB_DB", "BB_DB_AANR", "BB_AANR", "BB_DB_AAR", "BB_AAR"]
 ###############################################################################
 # check paths
 if not os.path.exists(out_path_sub_scripts):
-  print "Creating output folder", out_path_sub_scripts
+  print("Creating output folder", out_path_sub_scripts)
   os.mkdir(out_path_sub_scripts)
 if not os.path.exists(out_path_out_stuff):
-  print "Creating output folder", out_path_out_stuff
+  print("Creating output folder", out_path_out_stuff)
   os.mkdir(out_path_out_stuff)
 
 # get loop lengths
@@ -53,11 +53,11 @@ for score_id in score_ids:
     if ll_idx < len(length_set):
       cmd_args += " %d" % ll_idx
       file_suffix = "%s_LL%d" % (score_id, ll_idx)
-      print "Executing with args %s for lengths in %s" % \
-            (cmd_args, str(length_set[ll_idx]))
+      print("Executing with args %s for lengths in %s" % \
+            (cmd_args, str(length_set[ll_idx])))
     else:
       file_suffix = score_id
-      print "Executing with args %s for all lengths" % cmd_args
+      print("Executing with args %s for all lengths" % cmd_args)
 
     # setup script to run
     mystdout = os.path.join(out_path_out_stuff, "out_"+file_suffix+".stdout")
diff --git a/loop/data/convert_binaries.py b/loop/data/convert_binaries.py
index 57171075c54e59cc990517f200765c43ee95a347..1dced49d84b11e73da340dd094e9cd8b50ce3c6e 100644
--- a/loop/data/convert_binaries.py
+++ b/loop/data/convert_binaries.py
@@ -20,7 +20,7 @@ import os
 from promod3 import loop
 
 if len(sys.argv) != 4:
-	print "usage: python convert_binaries.py PORTABLE_FILE_IN RAW_FILE_OUT CLASS"
+	print("usage: python convert_binaries.py PORTABLE_FILE_IN RAW_FILE_OUT CLASS")
 	sys.exit(1)
 
 portable_file_in = sys.argv[1]
diff --git a/loop/pymod/__init__.py b/loop/pymod/__init__.py
index 3d3a8d1f0794350f978d5a05ca341d721d226018..a10e1ac4df52cdba101644a4b56a6cf65de6bc03 100644
--- a/loop/pymod/__init__.py
+++ b/loop/pymod/__init__.py
@@ -17,5 +17,5 @@
 """Initialise the loop module."""
 # geom needed for export_backbone
 from ost import geom
-from _loop import *
+from ._loop import *
 
diff --git a/modelling/pymod/__init__.py b/modelling/pymod/__init__.py
index e271378977183e94d53c492d7d465bdad762b378..6e69df18b0a0d049696ca4ff20672630b8366ff3 100644
--- a/modelling/pymod/__init__.py
+++ b/modelling/pymod/__init__.py
@@ -15,13 +15,13 @@
 
 
 """Initialise the modelling module."""
-from _modelling import *
-from _closegaps import *
-from _molprobity import *
-from _pipeline import *
-from _reconstruct_sidechains import *
-from _ring_punches import *
-from _denovo import *
-from _fragger_handle import *
-from _monte_carlo import *
-from _mhandle_helper import *
+from ._modelling import *
+from ._closegaps import *
+from ._molprobity import *
+from ._pipeline import *
+from ._reconstruct_sidechains import *
+from ._ring_punches import *
+from ._denovo import *
+from ._fragger_handle import *
+from ._monte_carlo import *
+from ._mhandle_helper import *
diff --git a/modelling/pymod/_closegaps.py b/modelling/pymod/_closegaps.py
index 4fd329c70738a374ef25c890dfe81ec408745bb3..fef838bf7d36892e8fc34b3973a0c3b6ba81e260 100644
--- a/modelling/pymod/_closegaps.py
+++ b/modelling/pymod/_closegaps.py
@@ -21,9 +21,9 @@ as argument.
 
 # internal
 from promod3 import loop, core, scoring
-from _modelling import *
-from _ring_punches import *
-from _reconstruct_sidechains import *
+from ._modelling import *
+from ._ring_punches import *
+from ._reconstruct_sidechains import *
 # external
 import ost
 import sys
@@ -132,8 +132,8 @@ def _CloseLoopFrame(mhandle, gap_orig, actual_candidates, actual_extended_gaps,
     actual_chain_idx = actual_extended_gaps[0].GetChainIndex()
     actual_chain = mhandle.model.chains[actual_chain_idx]
     # get min_res_num, max_after_resnum
-    min_before_resnum = sys.maxint
-    max_after_resnum = -sys.maxint-1
+    min_before_resnum = sys.maxsize
+    max_after_resnum = -sys.maxsize-1
     for g in actual_extended_gaps:
         min_before_resnum = min(min_before_resnum,
                                 g.before.GetNumber().GetNum())
diff --git a/modelling/pymod/_denovo.py b/modelling/pymod/_denovo.py
index 2ccc6650f435b55a84a73d64d90c0a7c89c82940..82ff21dea3f91b4518327f9e985b9cbeb4c24868 100644
--- a/modelling/pymod/_denovo.py
+++ b/modelling/pymod/_denovo.py
@@ -15,8 +15,8 @@
 
 
 from promod3 import scoring, loop
-from _fragger_handle import FraggerHandle
-from _modelling import *
+from ._fragger_handle import FraggerHandle
+from ._modelling import *
 
 def GenerateDeNovoTrajectories(sequence, 
                                num_trajectories = 200,
diff --git a/modelling/pymod/_pipeline.py b/modelling/pymod/_pipeline.py
index 63117e0f62367bed22ce035066eea9c36268441b..52a8136a487c60e4dcb4204eb037152771f855a3 100644
--- a/modelling/pymod/_pipeline.py
+++ b/modelling/pymod/_pipeline.py
@@ -21,11 +21,11 @@ as argument.
 
 # internal
 from promod3 import loop, sidechain, core
-from _modelling import *
-from _reconstruct_sidechains import *
-from _closegaps import *
-from _ring_punches import *
-from _mhandle_helper import *
+from ._modelling import *
+from ._reconstruct_sidechains import *
+from ._closegaps import *
+from ._ring_punches import *
+from ._mhandle_helper import *
 # external
 import ost
 from ost import mol, conop
@@ -73,7 +73,7 @@ def _GetTopology(ent, settings, force_fields, add_heuristic_hydrogens=False):
         except Exception as ex:
             # report only for debugging
             ost.LogVerbose("Could not create mm topology for ff %d. %s" \
-                           % (i_ff, type(ex).__name__ + ": " + ex.message))
+                           % (i_ff, type(ex).__name__ + ": " + str(ex)))
             continue
         else:
             # all good
diff --git a/modelling/pymod/_reconstruct_sidechains.py b/modelling/pymod/_reconstruct_sidechains.py
index 9ae6bb43fcb930ca594efb9507cac79eeca67a45..1342e9484a765a32490d00d28f7aa97e3ba78667 100644
--- a/modelling/pymod/_reconstruct_sidechains.py
+++ b/modelling/pymod/_reconstruct_sidechains.py
@@ -566,8 +566,8 @@ def ReconstructSidechains(ent, keep_sidechains=False, build_disulfids=True,
             rot_group[sol].ApplyOnResidue(res_handle, consider_hydrogens=False)
             sidechain.ConnectSidechain(res_handle, rotamer_ids[i])
         except:
-            print "there is a backbone atom missing... ", \
-                  res_handle.GetQualifiedName()
+            print("there is a backbone atom missing... ", \
+                  res_handle.GetQualifiedName())
 
 # these methods will be exported into module
 __all__ = ('ReconstructSidechains',)
diff --git a/modelling/pymod/_ring_punches.py b/modelling/pymod/_ring_punches.py
index e1603eb5efffb84b4cd4a001c8e73c97fd6ff369..bb10682af498dc358c4b90b9ba34013221d610a7 100644
--- a/modelling/pymod/_ring_punches.py
+++ b/modelling/pymod/_ring_punches.py
@@ -18,7 +18,7 @@
 import ost
 from ost import geom
 from promod3 import core
-from _reconstruct_sidechains import ReconstructSidechains
+from ._reconstruct_sidechains import ReconstructSidechains
 from collections import namedtuple
 
 def _AddRing(rings, res, atom_names):
diff --git a/modelling/tests/test_close_gaps.py b/modelling/tests/test_close_gaps.py
index 43a2e2d599f62613f9f8379f996112b2f9242627..ff2e2328cfe82bb433e3d1ec24ce567c6d55cb7e 100644
--- a/modelling/tests/test_close_gaps.py
+++ b/modelling/tests/test_close_gaps.py
@@ -32,7 +32,7 @@ class _FetchLog(ost.LogSink):
         levels = ['ERROR', 'WARNING', 'SCRIPT', 'INFO', 'VERBOSE', 'DEBUG',
                   'TRACE']
         level = levels[severity]
-        if not level in self.messages.keys():
+        if not level in list(self.messages.keys()):
             self.messages[level] = list()
         self.messages[level].append(message.strip())
 
diff --git a/modelling/tests/test_pipeline.py b/modelling/tests/test_pipeline.py
index 3a08f74b40498939972ec30162cfa64eebb9dcc3..0ab9462adf75b8ec7a0dd9fcad0237d0ffeb0fef 100644
--- a/modelling/tests/test_pipeline.py
+++ b/modelling/tests/test_pipeline.py
@@ -130,7 +130,7 @@ class _FetchLog(ost.LogSink):
         levels = ['ERROR', 'WARNING', 'SCRIPT', 'INFO', 'VERBOSE', 'DEBUG',
                   'TRACE']
         level = levels[severity]
-        if not level in self.messages.keys():
+        if not level in list(self.messages.keys()):
             self.messages[level] = list()
         self.messages[level].append(message.strip())
 
@@ -357,8 +357,8 @@ class PipelineTests(unittest.TestCase):
             self.assertTrue(issue.is_major())
             self.assertEqual(len(issue.residue_list), 0)
         else:
-            print "OpenMM CPU platform not available. " \
-                  "Skipping almost_on_top check."
+            print("OpenMM CPU platform not available. " \
+                  "Skipping almost_on_top check.")
 
     def testBuildFromRawModel(self):
         '''Check that BuildFromRawModel works.'''
@@ -478,8 +478,8 @@ class PipelineTests(unittest.TestCase):
             settings.Locate("phenix.molprobity",
                             env_name='MOLPROBITY_EXECUTABLE')
         except settings.FileNotFound:
-            print 'phenix.molprobity is missing. Please install it and make '+\
-                  'it available in your PATH to execute test testMolProbity.'
+            print('phenix.molprobity is missing. Please install it and make '+\
+                  'it available in your PATH to execute test testMolProbity.')
             return
         # load 1crn and evaluate it
         prot = io.LoadPDB('1crn', remote=True)
diff --git a/scoring/data/convert_binaries.py b/scoring/data/convert_binaries.py
index 4a6210bf1a76efad00ab6768f686166e1515ec87..bf80e53edc805f69a7b8d841c985e46a0face848 100644
--- a/scoring/data/convert_binaries.py
+++ b/scoring/data/convert_binaries.py
@@ -20,7 +20,7 @@ import os
 from promod3 import scoring
 
 if len(sys.argv) != 4:
-  print "usage: python convert_binaries.py PORTABLE_FILE_IN RAW_FILE_OUT CLASS"
+  print("usage: python convert_binaries.py PORTABLE_FILE_IN RAW_FILE_OUT CLASS")
   sys.exit(1)
 
 portable_file_in = sys.argv[1]
diff --git a/scoring/pymod/__init__.py b/scoring/pymod/__init__.py
index e00f68205fd1fecacb0edb1d4e8582ca3323e9c5..8c2d45f1df47ca1f85e3a80ec82fdcd116f2b25d 100644
--- a/scoring/pymod/__init__.py
+++ b/scoring/pymod/__init__.py
@@ -14,4 +14,4 @@
 # limitations under the License.
 
 
-from _scoring import *
+from ._scoring import *
diff --git a/scripts/bump-version.py b/scripts/bump-version.py
index 112216a30217e41fbdd6d364988b08660b56bcb6..c61bd1c945f49a86063c0b60caa4a636d6a0e101 100644
--- a/scripts/bump-version.py
+++ b/scripts/bump-version.py
@@ -2,9 +2,9 @@
 import sys
 
 if len(sys.argv) < 3:
-  print "USAGE: python scripts/bump-version.py PM3_VERSION OST_VERSION"
-  print "-> *_VERSION format is MAJOR.MINOR.PATCH (e.g. 1.9.1)"
-  print "-> assumption is that git tags will exist for those *_VERSION"
+  print("USAGE: python scripts/bump-version.py PM3_VERSION OST_VERSION")
+  print("-> *_VERSION format is MAJOR.MINOR.PATCH (e.g. 1.9.1)")
+  print("-> assumption is that git tags will exist for those *_VERSION")
   sys.exit(1)
 
 # split up version number
diff --git a/sidechain/data/convert_lib.py b/sidechain/data/convert_lib.py
index 515dc9280f5d3894bdc89f5233633f779cdab8d3..d3be57f1ae7b40965449b9cc58965663a3d14528 100644
--- a/sidechain/data/convert_lib.py
+++ b/sidechain/data/convert_lib.py
@@ -20,7 +20,7 @@ import os
 from promod3 import sidechain
 
 if len(sys.argv) != 4:
-	print "usage: python convert_lib.py PORTABLE_FILE_IN RAW_FILE_OUT CLASS"
+	print("usage: python convert_lib.py PORTABLE_FILE_IN RAW_FILE_OUT CLASS")
 	sys.exit(1)
 
 portable_file_in = sys.argv[1]
diff --git a/sidechain/pymod/__init__.py b/sidechain/pymod/__init__.py
index 286993f2ec39f36072634dab870cf384e0c4e14e..518c77053656c30c61b39061c65e0a2e285b7801 100644
--- a/sidechain/pymod/__init__.py
+++ b/sidechain/pymod/__init__.py
@@ -14,4 +14,4 @@
 # limitations under the License.
 
 from ost import geom
-from _sidechain import *
+from ._sidechain import *