Something went wrong on our end
-
Studer Gabriel authoredStuder Gabriel authored
test_doctests.py 15.23 KiB
# Copyright (c) 2013-2018, SIB - Swiss Institute of Bioinformatics and
# Biozentrum - University of Basel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test example codes from documentation.
Each example code is given as a python script in scripts-folder and may use
data from data-folder.
Scripts are executed with pm within BUILDFOLDER/tests/doc.
Output of scripts shall be checked and cleaned up by caller.
make target: test_doctests.py_run
Don't forget to add new data-files and scripts to CMakeLists.txt!
"""
import unittest
import os
import subprocess
from ost import io, settings
from promod3 import loop
class DocTests(unittest.TestCase):
@classmethod
def setPmBinary(cls, pm_bin):
'''Set location of pm-binary.'''
cls.pm_bin = pm_bin
@classmethod
def setUpClass(cls):
'''Ensure pm-bin is set.'''
if not hasattr(cls, 'pm_bin'):
# get pm binary (assume: run as unit test via cmake)
bld_dir = os.path.abspath(os.path.dirname(os.path.dirname(os.getcwd())))
pm_bin = os.path.join(bld_dir, 'stage', 'bin', 'pm')
cls.pm_bin = pm_bin
def compareLines(self, actual, expected):
"""Compare expected and actual, line-by-line ignoring whitespace."""
lines_actual = actual.splitlines()
lines_expected = expected.splitlines()
self.assertEqual(len(lines_actual), len(lines_expected))
for (la, le) in zip(lines_actual, lines_expected):
self.assertEqual(la.strip(), le.strip())
def runPM(self, script_path, arguments=[]):
"""Run script with pm and given arguments.
Returns tuple (return-code, stdout, stderr).
"""
# launch it
cmd = [self.pm_bin, script_path] + arguments
job = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sout, serr = job.communicate()
return job.returncode, sout, serr
def checkPMRun(self, script_name, arguments=[], expect_rcode=0,
expect_stdout=None, expect_stderr=None):
"""
Run script with pm and test result.
:param script_name: Filename within script-folder to be called.
:param arguments: List of strings of arguments to be passed.
:param expect_rcode: Expected return code from call.
:param expect_stdout: Expected console output to stdout.
:param expect_stderr: Expected console output to stderr.
Note that ost-logs go to stderr by default!
For both expect_stdout and expect_stderr, None can be passed to ignore
console output. If given, it is checked line-by-line, while ignoring
whitespace for each line.
"""
# run it
rcode, sout, serr = self.runPM(os.path.join('scripts', script_name),
arguments)
# check return value
self.assertEqual(rcode, expect_rcode)
# check cmd.line-out if desired
if expect_stdout is not None:
self.compareLines(sout, expect_stdout)
if expect_stderr is not None:
self.compareLines(serr, expect_stderr)
################################################################
def testAction(self):
# we want to ensure that no test_actions.pyc file is created
cur_dir = os.getcwd()
pyc_file = os.path.join(cur_dir, 'scripts', 'test_actions.pyc')
if os.path.exists(pyc_file):
os.remove(pyc_file)
# to emulate action tests, change to actions folder
os.chdir("../actions")
# run it
rcode, sout, serr = self.runPM(os.path.join(cur_dir, 'scripts',
'action_test.py'))
# check return code and last line of output
self.assertEqual(rcode, 0)
out_lines = serr.splitlines()
self.assertEqual(out_lines[-1].strip(), 'OK')
# go back to proper folder (important!)
os.chdir(cur_dir)
# check that no pyc file was created
self.assertFalse(os.path.exists(pyc_file))
def testActionVerbose(self):
# to emulate action tests, change to actions folder
cur_dir = os.getcwd()
os.chdir("../actions")
# run it
rcode, sout, serr = self.runPM(os.path.join(cur_dir, 'scripts',
'action_test_verbose.py'))
# check return code and parts of output
self.assertEqual(rcode, 0)
out_lines = serr.splitlines()
self.assertRegexpMatches(out_lines[0].strip(), "stdout of '.*pm help'")
self.assertEqual(out_lines[1].strip(), "------")
line_nr = 2
while not out_lines[line_nr].strip() == "------":
line_nr += 1
self.assertGreater(line_nr, 2)
self.assertRegexpMatches(out_lines[line_nr+1], "stderr of '.*pm help'")
self.assertEqual(out_lines[line_nr+2].strip(), "------")
self.assertEqual(out_lines[line_nr+3].strip(), "------")
# go back to proper folder (important!)
os.chdir(cur_dir)
################################################################
def testCorePm3argparse(self):
# run it
script_path = os.path.join('scripts', 'core_pm3argparse.py')
rcode, sout, serr = self.runPM(script_path)
# check return code
self.assertEqual(rcode, 2)
# rerun with -h flag
rcode, sout, serr = self.runPM(script_path, ['-h'])
# check return code and some of output
out_lines = serr.splitlines()
self.assertRegexpMatches(out_lines[0], "usage: .*")
self.assertEqual(out_lines[2].strip(),
"Place the description of your script right in the file and import")
self.assertEqual(out_lines[3].strip(),
"it via '__doc__' as description to the parser ('-h', '--help').")
self.assertEqual(out_lines[5].strip(), "optional arguments:")
self.assertGreater(len(out_lines), 5)
def testCoreMsgError(self):
# run it
self.checkPMRun('core_msg_error.py', [], 1, '',
'Something failed!')
def testCoreFileChecks(self):
# run it
self.checkPMRun('core_file_checks.py',
[os.path.join('data', '1eye.pdb')])
################################################################
def testUnittestSidechain(self):
# run it
script_path = os.path.join('scripts', 'unittest_sidechain_reconstruction.py')
rcode, sout, serr = self.runPM(script_path)
# check return code and last line of output
self.assertEqual(rcode, 0)
out_lines = serr.splitlines()
self.assertEqual(out_lines[-1].strip(), 'OK')
def testHelloWorld(self):
# run it
self.checkPMRun('hello_world.py', [], 0)
# check that result exists and is readable
io.LoadPDB('test.pdb')
# clean up
os.remove('test.pdb')
################################################################
def testLoopMain(self):
# just check that it doesn't crash
self.checkPMRun('loop_main.py', [], 0)
def testLoopBackbone(self):
# run it
self.checkPMRun('loop_backbone.py', [], 0,
'Looking at position 0\n' +
'psi: -0.7854\n' +
'Looking at position 1\n' +
'phi: -1.0472\n' +
'psi: -0.7854\n' +
'Looking at position 2\n' +
'phi: -1.0472\n' +
'psi: -0.7854\n' +
'Looking at position 3\n' +
'phi: -1.0472\n' +
'psi: -0.7854\n' +
'Looking at position 4\n' +
'phi: -1.0472\n' +
'psi: -0.7854\n' +
'Looking at position 5\n' +
'phi: -1.0472\n' +
'psi: -0.7854\n' +
'Looking at position 6\n' +
'phi: -1.0472\n' +
'psi: -0.7854\n' +
'Looking at position 7\n' +
'phi: -1.0472')
# check that result exists and is readable
io.LoadPDB('randomized_fragment.pdb')
# clean up
os.remove('randomized_fragment.pdb')
def testLoopStructureDB(self):
# run it
self.checkPMRun('loop_structure_db.py', [], 0)
# check that result exists and is readable
loop.StructureDB.LoadPortable('my_db_one.dat')
loop.StructureDB.LoadPortable('my_db_two.dat')
# clean up
os.remove('my_db_one.dat')
os.remove('my_db_two.dat')
def testLoopFragDB(self):
# run it
self.checkPMRun('loop_frag_db.py', [], 0)
# check that result exists and is readable
io.LoadPDB('0.pdb')
# clean up
os.remove('0.pdb')
def testLoopFragger(self):
# run it
script_path = os.path.join('scripts', 'loop_fragger.py')
rcode, sout, serr = self.runPM(script_path)
# check return code and partial output
self.assertEqual(rcode, 0)
out_lines = sout.splitlines()
self.assertEqual(len(out_lines), 101)
# NOTE: this last output depends on the structure-db!
self.assertEqual(out_lines[-1].strip(),
'Fraction of fragments below 3A: 0.47')
# check that result exists and is readable
loop.FraggerMap.LoadBB('frag_map.dat')
# clean up
os.remove('frag_map.dat')
def testLoopTorsionSampler(self):
# fail-safe: exclude test if python-libs missing
try:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
except ImportError:
print 'Missing python libraries, skipping testLoopFragDB...'
return
# run it
self.checkPMRun('loop_torsion_sampler.py', [], 0)
# check that result exists
self.assertTrue(os.path.isfile('torsion_plot.png'))
# clean up
os.remove('torsion_plot.png')
def testLoopAllAtom(self):
# run it
self.checkPMRun('loop_all_atom.py', [], 0)
# check that result exists and is readable
io.LoadPDB('all_atom_pos.pdb')
io.LoadPDB('all_atom_env.pdb')
# clean up
os.remove('all_atom_pos.pdb')
os.remove('all_atom_env.pdb')
def testLoopMmSysCreation(self):
# just check that it doesn't crash
self.checkPMRun('loop_mm_sys_creation.py', [], 0)
# check that result exists and is readable
io.LoadPDB('mm_sys_output.pdb')
# clean up
os.remove('mm_sys_output.pdb')
################################################################
def testScoringMain(self):
# just check that it doesn't crash
self.checkPMRun('scoring_main.py', [], 0)
################################################################
def testModellingAll(self):
# run it
self.checkPMRun('modelling_all.py', [], 0)
# check that result exists and is readable
io.LoadPDB('model.pdb')
# clean up
os.remove('model.pdb')
def testModellingSteps(self):
# run it
self.checkPMRun('modelling_steps.py', [], 0)
# check that result exists and is readable
io.LoadPDB('model.pdb')
# clean up
os.remove('model.pdb')
def testModellingCloseSmallDeletions(self):
# run it
self.checkPMRun('modelling_close_small_deletions.py', [], 0,
'Number of gaps before: 1\n' +
'Number of gaps after: 0')
def testModellingMergeGapsByDistance(self):
# run it
self.checkPMRun('modelling_merge_gaps_by_distance.py', [], 0,
'Number of gaps before: 2\n' +
'Number of gaps after: 1')
def testModellingFillLoopsByDatabase(self):
# run it
self.checkPMRun('modelling_fill_loops_by_database.py', [], 0,
'Number of gaps before: 1\n' +
'Number of gaps after: 0')
def testModellingFillLoopsByMonteCarlo(self):
# run it
self.checkPMRun('modelling_fill_loops_by_monte_carlo.py', [], 0,
'Number of gaps before: 1\n' +
'Number of gaps after: 0')
def testModellingModelTermini(self):
# run it
self.checkPMRun('modelling_model_termini.py', [], 0,
'Number of gaps before: 2\n' +
'Number of gaps after: 0')
def testModellingMonteCarlo(self):
# run it
self.checkPMRun('modelling_monte_carlo.py', [], 0)
# check that result exists and is readable
io.LoadPDB('sampled_frag.pdb')
# clean up
os.remove('sampled_frag.pdb')
def testModellingLoopCandidates(self):
# run it
self.checkPMRun('modelling_loop_candidates.py', [], 0)
# check that result exists and is readable
io.LoadPDB('modified_crambin.pdb')
# clean up
os.remove('modified_crambin.pdb')
def testModellingLoopScoring(self):
# run it
self.checkPMRun('modelling_loop_scoring.py', [], 0)
# check that result exists and is readable
io.LoadPDB('model.pdb')
# clean up
os.remove('model.pdb')
def testModellingReconstructSidechains(self):
# run it
self.checkPMRun('modelling_reconstruct_sidechains.py', [], 0)
# check that result exists and is readable
io.LoadPDB('sidechain_test_orig.pdb')
io.LoadPDB('sidechain_test_rec.pdb')
# clean up
os.remove('sidechain_test_orig.pdb')
os.remove('sidechain_test_rec.pdb')
def testModellingSidechainReconstructor(self):
# run it
self.checkPMRun('modelling_sidechain_reconstructor.py', [], 0)
# check that result exists and is readable
io.LoadPDB('sc_rec_test.pdb')
# clean up
os.remove('sc_rec_test.pdb')
def testModellingAllAtomRelaxer(self):
# run it
self.checkPMRun('modelling_allatomrelaxer.py', [], 0)
# check that result exists and is readable
io.LoadPDB('aa_relax_test.pdb')
# clean up
os.remove('aa_relax_test.pdb')
################################################################
def testSidechainSteps(self):
# run it
self.checkPMRun('sidechain_steps.py', [], 0)
# check that result exists and is readable
io.LoadPDB('example_reconstruction.pdb')
# clean up
os.remove('example_reconstruction.pdb')
if __name__ == "__main__":
from ost import testutils
testutils.RunTests()