diff --git a/modelling/pymod/_closegaps.py b/modelling/pymod/_closegaps.py
index cadcf021a31ba6a128204d359cc89d2f8e7eb95c..eea79e5e04a68e5bc2ce0591cc33cc1d4476af36 100644
--- a/modelling/pymod/_closegaps.py
+++ b/modelling/pymod/_closegaps.py
@@ -1150,13 +1150,39 @@ def FillLoopsByMonteCarlo(mhandle, torsion_sampler, max_loops_to_search=6,
             gap_idx = new_idx
 
 
-def ModelTermini(mhandle, structure_db = None, fragment_cache_dir = None,
-                 mc_num_loops = 20, avg_sampling_per_position = 600,
-                 scratch_dir = os.getcwd()):
+def ModelTermini(mhandle, structure_db = None, mc_num_loops = 20,
+                 mc_steps = 10000, fragment_cache_dir = None, 
+                 scratch_dir = None):
     '''Try to model termini with Monte Carlo sampling.
-
     Use with care! This is an experimental feature which will increase coverage
-    but we do not assume that the resulting termini are of high quality!
+    but we do not assume that the resulting termini are of high quality! As an
+    optional feature, the function provides a fragment caching functionality. If
+    you call this function several times for a chain with the same SEQRES, you
+    might want to provide a **fragment_cache_dir** and a **scratch_dir**. 
+    The idea is, that **fragment_cache_dir** is a central fragment cache based 
+    on SEQRES md5 hashes to search for fragments of a certain chain. 
+    It copies over the cache file to **scratch_dir**, 
+    does all required magic and then copies the fragments back to 
+    **fragment_cache_dir** so it can be used in later function calls.
+
+    :param mhandle: Modelling handle on which to apply change.
+    :type mhandle:  :class:`ModellingHandle`
+
+    :param structure_db: Database for fragment extraction, 
+                         default db will be loaded if not provided
+    :type structure_db: :class:`~promod3.loop.StructureDB`
+
+    :param mc_num_loops: Number of Monte Carlo trajectories that are generated
+    :param mc_num_loops: :class:`int`
+
+    :param mc_steps:    Number of steps per Monte Carlo trajectory
+    :type mc_steps:     :class:`int`
+
+    :param fragment_cache_dir: Central fragment cache directory
+    :type fragment_cache_dir:  :class:`str`
+
+    :param scratch_dir: Local scratch directory
+    :type scratch_dir:  :class:`str`
     '''
 
     prof_name = 'closegaps::ModelTermini'
@@ -1170,6 +1196,16 @@ def ModelTermini(mhandle, structure_db = None, fragment_cache_dir = None,
         if not os.path.exists(fragment_cache_dir):
             err = "You provided a fragment cache path, that doesn't exist!"
             raise RuntimeError(err)
+
+        # if there is a valid fragment_cache dir, we also need a valid
+        # scratch_dir
+        if not os.path.exists(scratch_dir):
+            err = "If a fragment_cache_dir is provided you also must provide "
+            err += "a valid scratch_dir. "
+            err += scratch_dir
+            err += " seems to be invalid..."
+            raise RuntimeError(err)
+
         fragment_cache = core.FileCache(fragment_cache_dir)
 
     # get terminal gaps (copies as we'll clear them as we go)
@@ -1223,9 +1259,15 @@ def ModelTermini(mhandle, structure_db = None, fragment_cache_dir = None,
                 # yeah, we can load cached stuff
                 fragger_handle.LoadCached(os.path.join(scratch_dir, cache_filename))
 
+        # GenerateTerminiTrajectories takes an average sampling per position
+        # parameter instead of a total number of sampling steps.
+        # we therefore have to transform this value...
+        avg_sampling = max(1, mc_steps / max(1, actual_gap.GetLength() - 9))
+
         # let's get the loop candidates
         candidates = GenerateTerminiTrajectories(mhandle, actual_gap, 20,
-                                                      fragger_handle = fragger_handle)
+                                                 avg_sampling_per_position = avg_sampling,
+                                                 fragger_handle = fragger_handle)
 
         # Once it's done, we copy the fragments back to cache
         # in case of multiple jobs accessing the same data in parallel, this leads