""" Evaluate model with non-polymer/small molecule ligands against reference. Example: ost compare-ligand-structures \\ -m model.pdb \\ -ml ligand.sdf \\ -r reference.cif \\ --lddt-pli --rmsd Structures of polymer entities (proteins and nucleotides) can be given in PDB or mmCIF format. Ligands can be given as path to SDF files containing the ligand for both model (--model-ligands/-ml) and reference (--reference-ligands/-rl). If omitted, ligands will be detected in the model and reference structures. For structures given in mmCIF format, this is based on the annotation as "non polymer entity" (i.e. ligands in the _pdbx_entity_nonpoly mmCIF category) and works reliably. For structures given in legacy PDB format, this is based on the HET records which is usually only set properly on files downloaded from the PDB (and even then, this is not always the case). This is normally not what you want. You should always give ligands as SDF for structures in legacy PDB format. Polymer/oligomeric ligands (saccharides, peptides, nucleotides) are not supported. Only minimal cleanup steps are performed (remove hydrogens and deuteriums, and for structures of polymers only, remove unknown atoms and cleanup element column). Ligands in mmCIF and PDB files must comply with the PDB component dictionary definition, and have properly named residues and atoms, in order for ligand connectivity to be loaded correctly. Ligands loaded from SDF files are exempt from this restriction, meaning any arbitrary ligand can be assessed. Output can be written in two format: JSON (default) or CSV, controlled by the --output-format/-of argument. Without additional options, the JSON ouput is a dictionary with four keys: * "model_ligands": A list of ligands in the model. If ligands were provided explicitly with --model-ligands, elements of the list will be the paths to the ligand SDF file(s). Otherwise, they will be the chain name, residue number and insertion code of the ligand, separated by a dot. * "reference_ligands": A list of ligands in the reference. If ligands were provided explicitly with --reference-ligands, elements of the list will be the paths to the ligand SDF file(s). Otherwise, they will be the chain name, residue number and insertion code of the ligand, separated by a dot. * "status": SUCCESS if everything ran through. In case of failure, the only content of the JSON output will be \"status\" set to FAILURE and an additional key: "traceback". * "ost_version": The OpenStructure version used for computation. Each score is opt-in and the respective results are available in three keys: * "assigned_scores": A list with data for each pair of assigned ligands. Data is yet another dict containing score specific information for that ligand pair. The following keys are there in any case: * "model_ligand": The model ligand * "reference_ligand": The target ligand to which model ligand is assigned to * "score": The score * "coverage": Fraction of model ligand atoms which are covered by target ligand. Will only deviate from 1.0 if --substructure-match is enabled. * "model_ligand_unassigned_reason": Dictionary with unassigned model ligands as key and an educated guess why this happened. * "reference_ligand_unassigned_reason": Dictionary with unassigned target ligands as key and an educated guess why this happened. If --full-results is enabled, another element with key "full_results" is added. This is a list of data items for each pair of model/reference ligands. The data items follow the same structure as in "assigned_scores". If no score for a specific pair of ligands could be computed, "score" and "coverage" are set to null and a key "reason" is added giving an educated guess why this happened. CSV output is a table of comma-separated values, with one line for each reference ligand (or one model ligand if the --by-model-ligand-output flag was set). The following column is always available: * reference_ligand/model_ligand: If reference ligands were provided explicitly with --reference-ligands, elements of the list will be the paths to the ligand SDF file(s). Otherwise, they will be the chain name, residue number and insertion code of the ligand, separated by a dot. If the --by-model-ligand-output flag was set, this will be model ligand instead, following the same rules. If lDDT-PLI was enabled with --lddt-pli, the following columns are added: * "lddt_pli", "lddt_pli_coverage" and "lddt_pli_(model|reference)_ligand" are the lDDT-PLI score result, the corresponding coverage and assigned model ligand (or reference ligand if the --by-model-ligand-output flag was set) if an assignment was found, respectively, empty otherwise. * "lddt_pli_unassigned" is empty if an assignment was found, otherwise it lists the short reason this reference ligand was unassigned. If BiSyRMSD was enabled with --rmsd, the following columns are added: * "rmsd", "rmsd_coverage". "lddt_lp" "bb_rmsd" and "rmsd_(model|reference)_ligand" are the BiSyRMSD, the corresponding coverage, lDDT-LP, backbone RMSD and assigned model ligand (or reference ligand if the --by-model-ligand-output flag was set) if an assignment was found, respectively, empty otherwise. * "rmsd_unassigned" is empty if an assignment was found, otherwise it lists the short reason this reference ligand was unassigned. """ import argparse import csv from io import StringIO import json import os import sys import traceback import ost from ost import io from ost.mol.alg import ligand_scoring_base from ost.mol.alg import ligand_scoring_lddtpli from ost.mol.alg import ligand_scoring_scrmsd def _ParseArgs(): parser = argparse.ArgumentParser(description = __doc__, formatter_class=argparse.RawDescriptionHelpFormatter, prog="ost compare-ligand-structures") parser.add_argument( "-m", "--mdl", "--model", dest="model", required=True, help=("Path to model file.")) parser.add_argument( "-ml", "--mdl-ligands", "--model-ligands", dest="model_ligands", nargs="*", default=None, help=("Path to model ligand files.")) parser.add_argument( "-r", "--ref", "--reference", dest="reference", required=True, help=("Path to reference file.")) parser.add_argument( "-rl", "--ref-ligands", "--reference-ligands", dest="reference_ligands", nargs="*", default=None, help=("Path to reference ligand files.")) parser.add_argument( "-o", "--out", "--output", dest="output", default=None, help=("Output file name. " "Default depends on format: out.json or out.csv")) parser.add_argument( "-mf", "--mdl-format", "--model-format", dest="model_format", required=False, default=None, choices=["pdb", "cif", "mmcif"], help=("Format of model file. pdb reads pdb but also pdb.gz, same " "applies to cif/mmcif. Inferred from filepath if not given.")) parser.add_argument( "-rf", "--reference-format", "--ref-format", dest="reference_format", required=False, default=None, choices=["pdb", "cif", "mmcif"], help=("Format of reference file. pdb reads pdb but also pdb.gz, same " "applies to cif/mmcif. Inferred from filepath if not given.")) parser.add_argument( "-of", "--out-format", "--output-format", dest="output_format", choices=["json", "csv"], default="json", help=("Output format, JSON or CSV, in lowercase. " "default: json")) parser.add_argument( "-csvm", "--by-model-ligand", "--by-model-ligand-output", dest="output_by_model_ligand", default=False, action="store_true", help=("For CSV output, this flag changes the output so that each line " "reports one model ligand, instead of a reference ligand. " "Has no effect with JSON output.")) parser.add_argument( "--csv-extra-header", dest="csv_extra_header", default=None, type=str, help=("Extra header prefix for CSV output. This allows adding " "additional annotations (such as target ID, group, etc) to the " "output")) parser.add_argument( "--csv-extra-data", dest="csv_extra_data", default=None, type=str, help=("Additional data (columns) for CSV output.")) parser.add_argument( "-mb", "--model-biounit", dest="model_biounit", required=False, default=None, type=str, help=("Only has an effect if model is in mmcif format. By default, " "the asymmetric unit (AU) is used for scoring. If there are " "biounits defined in the mmcif file, you can specify the " "ID (as a string) of the one which should be used.")) parser.add_argument( "-rb", "--reference-biounit", dest="reference_biounit", required=False, default=None, type=str, help=("Only has an effect if reference is in mmcif format. By default, " "the asymmetric unit (AU) is used for scoring. If there are " "biounits defined in the mmcif file, you can specify the " "ID (as a string) of the one which should be used.")) parser.add_argument( "-ft", "--fault-tolerant", dest="fault_tolerant", default=False, action="store_true", help=("Fault tolerant parsing.")) parser.add_argument( "-rna", "--residue-number-alignment", dest="residue_number_alignment", default=False, action="store_true", help=("Make alignment based on residue number instead of using " "a global BLOSUM62-based alignment (NUC44 for nucleotides).")) parser.add_argument( "-sm", "--substructure-match", dest="substructure_match", default=False, action="store_true", help=("Allow incomplete (ie partially resolved) target ligands.")) parser.add_argument( "-cd", "--coverage-delta", dest="coverage_delta", default=0.2, type=float, help=("Coverage delta for partial ligand assignment.")) parser.add_argument( '-v', '--verbosity', dest="verbosity", type=int, default=2, help="Set verbosity level. Defaults to 2 (Script).") parser.add_argument( "--full-results", dest="full_results", default=False, action="store_true", help=("Outputs scoring results for all model/reference ligand pairs " "and store as key \"full_results\"")) # arguments relevant for lddt-pli parser.add_argument( "--lddt-pli", dest="lddt_pli", default=False, action="store_true", help=("Compute lDDT-PLI scores and store as key \"lddt_pli\".")) parser.add_argument( "--lddt-pli-radius", dest="lddt_pli_radius", default=6.0, type=float, help=("lDDT inclusion radius for lDDT-PLI.")) parser.add_argument( "--lddt-pli-add-mdl-contacts", dest="lddt_pli_add_mdl_contacts", default=True, action="store_true", help=("Add model contacts when computing lDDT-PLI.")) parser.add_argument( "--no-lddt-pli-add-mdl-contacts", dest="lddt_pli_add_mdl_contacts", default=True, action="store_false", help=("DO NOT add model contacts when computing lDDT-PLI.")) # arguments relevant for rmsd parser.add_argument( "--rmsd", dest="rmsd", default=False, action="store_true", help=("Compute RMSD scores and store as key \"rmsd\".")) parser.add_argument( "--radius", dest="radius", default=4.0, type=float, help=("Inclusion radius to extract reference binding site that is used " "for RMSD computation. Any residue with atoms within this " "distance of the ligand will be included in the binding site.")) parser.add_argument( "--lddt-lp-radius", dest="lddt_lp_radius", default=15.0, type=float, help=("lDDT inclusion radius for lDDT-LP.")) parser.add_argument( "-fbs", "--full-bs-search", dest="full_bs_search", default=False, action="store_true", help=("Enumerate all potential binding sites in the model when " "searching rigid superposition for RMSD computation")) parser.add_argument( "-ms", "--max--symmetries", dest="max_symmetries", default=1e4, type=int, help=("If more than that many isomorphisms exist for a target-ligand " "pair, it will be ignored and reported as unassigned.")) args = parser.parse_args() if args.output is None: args.output = "out.%s" % args.output_format return args def _CheckCompoundLib(): clib = ost.conop.GetDefaultLib() if not clib: ost.LogError("A compound library is required for this action. " "Please refer to the OpenStructure website: " "https://openstructure.org/docs/conop/compoundlib/.") raise RuntimeError("No compound library found") def _GetStructureFormat(structure_path, sformat=None): """Get the structure format and return it as "pdb" or "mmcif". """ if sformat is None: # Determine file format from suffix. ext = structure_path.split(".") if ext[-1] == "gz": ext = ext[:-1] if len(ext) <= 1: raise Exception(f"Could not determine format of file " f"{structure_path}.") sformat = ext[-1].lower() if sformat in ["mmcif", "cif"]: return "mmcif" elif sformat == "pdb": return sformat else: raise Exception(f"Unknown/unsupported file format found for " f"file {structure_path}.") def _LoadStructure(structure_path, sformat, fault_tolerant, bu_id): """Read OST entity either from mmCIF or PDB. The returned structure has structure_path attached as structure name """ # Load the structure if sformat == "mmcif": if bu_id is not None: cif_entity, cif_seqres, cif_info = \ io.LoadMMCIF(structure_path, info=True, seqres=True, fault_tolerant=fault_tolerant) for biounit in cif_info.biounits: if biounit.id == bu_id: entity = ost.mol.alg.CreateBU(cif_entity, biounit) break else: raise RuntimeError(f"No biounit found with ID '{bu_id}'.") else: entity = io.LoadMMCIF(structure_path, fault_tolerant = fault_tolerant) if len(entity.residues) == 0: raise Exception(f"No residues found in file: {structure_path}") else: entity = io.LoadPDB(structure_path, fault_tolerant = fault_tolerant) if len(entity.residues) == 0: raise Exception(f"No residues found in file: {structure_path}") # restore old loglevel and return entity.SetName(structure_path) return entity def _LoadLigands(ligands): """ Load a list of ligands from file names. Return a list of entities oif the same size. """ if ligands is None: return None else: return [_LoadLigand(lig) for lig in ligands] def _LoadLigand(file): """ Load a single ligand from file names. Return an entity. """ ent = ost.io.LoadEntity(file, format="sdf") ed = ent.EditXCS() ed.RenameChain(ent.chains[0], file) ed.UpdateICS() return ent def _CleanupStructure(entity): """Cleans up the structure. Currently only removes hydrogens (and deuterium atoms). """ return ost.mol.CreateEntityFromView(entity.Select( "ele != H and ele != D"), include_exlusive_atoms=False) def _CleanupLigands(ligands): """Clean up a list of structures. """ if ligands is None: return None else: return [_CleanupStructure(lig) for lig in ligands] def _Validate(structure, ligands, legend, fault_tolerant=False): """Validate the structure. If fault_tolerant is True, only warns in case of problems. If False, raise them as ValueErrors. At the moment this chiefly checks if ligands are in the structure and are given explicitly at the same time. """ if ligands is not None: for residue in structure.residues: if residue.is_ligand: msg = "Ligand residue %s found in %s polymer structure" %( residue.qualified_name, legend) if fault_tolerant: ost.LogWarning(msg) else: raise ValueError(msg) def _QualifiedResidueNotation(r): """Return a parsable string of the residue in the format: ChainName.ResidueNumber.InsertionCode.""" resnum = r.number return "{cname}.{rnum}.{ins_code}".format( cname=r.chain.name, rnum=resnum.num, ins_code=resnum.ins_code.strip("\u0000"), ) def _SetupLDDTPLIScorer(model, model_ligands, reference, reference_ligands, args): return ligand_scoring_lddtpli.LDDTPLIScorer(model, reference, model_ligands = model_ligands, target_ligands = reference_ligands, resnum_alignments = args.residue_number_alignment, rename_ligand_chain = True, substructure_match = args.substructure_match, coverage_delta = args.coverage_delta, lddt_pli_radius = args.lddt_pli_radius, add_mdl_contacts = args.lddt_pli_add_mdl_contacts, max_symmetries = args.max_symmetries) def _SetupSCRMSDScorer(model, model_ligands, reference, reference_ligands, args): return ligand_scoring_scrmsd.SCRMSDScorer(model, reference, model_ligands = model_ligands, target_ligands = reference_ligands, resnum_alignments = args.residue_number_alignment, rename_ligand_chain = True, substructure_match = args.substructure_match, coverage_delta = args.coverage_delta, bs_radius = args.radius, lddt_lp_radius = args.lddt_lp_radius, full_bs_search = args.full_bs_search, max_symmetries = args.max_symmetries) def _Process(model, model_ligands, reference, reference_ligands, args): out = dict() ########################## # Setup required scorers # ########################## lddtpli_scorer = None scrmsd_scorer = None if args.lddt_pli: lddtpli_scorer = _SetupLDDTPLIScorer(model, model_ligands, reference, reference_ligands, args) if args.rmsd: scrmsd_scorer = _SetupSCRMSDScorer(model, model_ligands, reference, reference_ligands, args) # basic info on ligands only requires baseclass functionality # doesn't matter which scorer we use scorer = None if lddtpli_scorer is not None: scorer = lddtpli_scorer elif scrmsd_scorer is not None: scorer = scrmsd_scorer else: ost.LogWarning("No score selected, output will be empty.") # just create SCRMSD scorer to fill basic ligand info scorer = _SetupSCRMSDScorer(model, model_ligands, reference, reference_ligands, args) #################################### # Extract / Map ligand information # #################################### if model_ligands is not None: # Replace model ligand by path if len(model_ligands) == len(scorer.model_ligands): # Map ligand => path out["model_ligands"] = args.model_ligands elif len(model_ligands) < len(scorer.model_ligands): # Multi-ligand SDF files were given # Map ligand => path:idx out["model_ligands"] = list() for ligand, filename in zip(model_ligands, args.model_ligands): assert isinstance(ligand, ost.mol.EntityHandle) for i, residue in enumerate(ligand.residues): out["model_ligands"].append(f"{filename}:{i}") else: # This should never happen and would be a bug raise RuntimeError("Fewer ligands in the model scorer " "(%d) than given (%d)" % ( len(scorer.model_ligands), len(model_ligands))) else: # Map ligand => qualified residue out["model_ligands"] = [_QualifiedResidueNotation(l) for l in scorer.model_ligands] if reference_ligands is not None: # Replace reference ligand by path if len(reference_ligands) == len(scorer.target_ligands): # Map ligand => path out["reference_ligands"] = args.reference_ligands elif len(reference_ligands) < len(scorer.target_ligands): # Multi-ligand SDF files were given # Map ligand => path:idx out["reference_ligands"] = list() for ligand, filename in zip(reference_ligands, args.reference_ligands): assert isinstance(ligand, ost.mol.EntityHandle) for i, residue in enumerate(ligand.residues): out["reference_ligands"].append(f"{filename}:{i}") else: # This should never happen and would be a bug raise RuntimeError("Fewer ligands in the reference scorer " "(%d) than given (%d)" % ( len(scorer.target_ligands), len(reference_ligands))) else: # Map ligand => qualified residue out["reference_ligands"] = [_QualifiedResidueNotation(l) for l in scorer.target_ligands] ################## # Compute scores # ################## if args.lddt_pli: LogScript("Computing lDDT-PLI scores") out["lddt_pli"] = dict() out["lddt_pli"]["assigned_scores"] = list() for lig_pair in lddtpli_scorer.assignment: score = float(lddtpli_scorer.score_matrix[lig_pair[0], lig_pair[1]]) coverage = float(lddtpli_scorer.coverage_matrix[lig_pair[0], lig_pair[1]]) aux_data = lddtpli_scorer.aux_matrix[lig_pair[0], lig_pair[1]] target_key = out["reference_ligands"][lig_pair[0]] model_key = out["model_ligands"][lig_pair[1]] out["lddt_pli"]["assigned_scores"].append({"score": score, "coverage": coverage, "lddt_pli_n_contacts": aux_data["lddt_pli_n_contacts"], "model_ligand": model_key, "reference_ligand": target_key, "bs_ref_res": [_QualifiedResidueNotation(r) for r in aux_data["bs_ref_res"]], "bs_mdl_res": [_QualifiedResidueNotation(r) for r in aux_data["bs_mdl_res"]]}) out["lddt_pli"]["model_ligand_unassigned_reason"] = dict() for i in lddtpli_scorer.unassigned_model_ligands: key = out["model_ligands"][i] reason = lddtpli_scorer.guess_model_ligand_unassigned_reason(i) out["lddt_pli"]["model_ligand_unassigned_reason"][key] = reason out["lddt_pli"]["reference_ligand_unassigned_reason"] = dict() for i in lddtpli_scorer.unassigned_target_ligands: key = out["reference_ligands"][i] reason = lddtpli_scorer.guess_target_ligand_unassigned_reason(i) out["lddt_pli"]["reference_ligand_unassigned_reason"][key] = reason if args.full_results: out["lddt_pli"]["full_results"] = list() shape = lddtpli_scorer.score_matrix.shape for ref_lig_idx in range(shape[0]): for mdl_lig_idx in range(shape[1]): state = int(lddtpli_scorer.state_matrix[(ref_lig_idx, mdl_lig_idx)]) target_key = out["reference_ligands"][ref_lig_idx] model_key = out["model_ligands"][mdl_lig_idx] if state == 0: score = float(lddtpli_scorer.score_matrix[(ref_lig_idx, mdl_lig_idx)]) coverage = float(lddtpli_scorer.coverage_matrix[(ref_lig_idx, mdl_lig_idx)]) aux_data = lddtpli_scorer.aux_matrix[(ref_lig_idx, mdl_lig_idx)] out["lddt_pli"]["full_results"].append({"score": score, "coverage": coverage, "lddt_pli_n_contacts": aux_data["lddt_pli_n_contacts"], "model_ligand": model_key, "reference_ligand": target_key, "bs_ref_res": [_QualifiedResidueNotation(r) for r in aux_data["bs_ref_res"]], "bs_mdl_res": [_QualifiedResidueNotation(r) for r in aux_data["bs_mdl_res"]]}) else: reason = lddtpli_scorer.state_decoding[state] out["lddt_pli"]["full_results"].append({"score": None, "coverage": None, "model_ligand": model_key, "reference_ligand": target_key, "reason": reason}) if args.rmsd: LogScript("Computing BiSyRMSD scores") out["rmsd"] = dict() out["rmsd"]["assigned_scores"] = list() for lig_pair in scrmsd_scorer.assignment: score = float(scrmsd_scorer.score_matrix[lig_pair[0], lig_pair[1]]) coverage = float(scrmsd_scorer.coverage_matrix[lig_pair[0], lig_pair[1]]) aux_data = scrmsd_scorer.aux_matrix[lig_pair[0], lig_pair[1]] target_key = out["reference_ligands"][lig_pair[0]] model_key = out["model_ligands"][lig_pair[1]] transform_data = aux_data["transform"].data out["rmsd"]["assigned_scores"].append({"score": score, "coverage": coverage, "lddt_lp": aux_data["lddt_lp"], "bb_rmsd": aux_data["bb_rmsd"], "model_ligand": model_key, "reference_ligand": target_key, "chain_mapping": aux_data["chain_mapping"], "bs_ref_res": [_QualifiedResidueNotation(r) for r in aux_data["bs_ref_res"]], "bs_ref_res_mapped": [_QualifiedResidueNotation(r) for r in aux_data["bs_ref_res_mapped"]], "bs_mdl_res_mapped": [_QualifiedResidueNotation(r) for r in aux_data["bs_mdl_res_mapped"]], "inconsistent_residues": [_QualifiedResidueNotation(r[0]) + \ "-" +_QualifiedResidueNotation(r[1]) for r in aux_data["inconsistent_residues"]], "transform": [transform_data[i:i + 4] for i in range(0, len(transform_data), 4)]}) out["rmsd"]["model_ligand_unassigned_reason"] = dict() for i in scrmsd_scorer.unassigned_model_ligands: key = out["model_ligands"][i] reason = scrmsd_scorer.guess_model_ligand_unassigned_reason(i) out["rmsd"]["model_ligand_unassigned_reason"][key] = reason out["rmsd"]["reference_ligand_unassigned_reason"] = dict() for i in scrmsd_scorer.unassigned_target_ligands: key = out["reference_ligands"][i] reason = scrmsd_scorer.guess_target_ligand_unassigned_reason(i) out["rmsd"]["reference_ligand_unassigned_reason"][key] = reason if args.full_results: out["rmsd"]["full_results"] = list() shape = scrmsd_scorer.score_matrix.shape for ref_lig_idx in range(shape[0]): for mdl_lig_idx in range(shape[1]): state = int(scrmsd_scorer.state_matrix[(ref_lig_idx, mdl_lig_idx)]) target_key = out["reference_ligands"][ref_lig_idx] model_key = out["model_ligands"][mdl_lig_idx] if state == 0: score = float(scrmsd_scorer.score_matrix[(ref_lig_idx, mdl_lig_idx)]) coverage = float(scrmsd_scorer.coverage_matrix[(ref_lig_idx, mdl_lig_idx)]) aux_data = scrmsd_scorer.aux_matrix[(ref_lig_idx, mdl_lig_idx)] transform_data = aux_data["transform"].data out["rmsd"]["full_results"].append({"score": score, "coverage": coverage, "lddt_lp": aux_data["lddt_lp"], "bb_rmsd": aux_data["bb_rmsd"], "model_ligand": model_key, "reference_ligand": target_key, "chain_mapping": aux_data["chain_mapping"], "bs_ref_res": [_QualifiedResidueNotation(r) for r in aux_data["bs_ref_res"]], "bs_ref_res_mapped": [_QualifiedResidueNotation(r) for r in aux_data["bs_ref_res_mapped"]], "bs_mdl_res_mapped": [_QualifiedResidueNotation(r) for r in aux_data["bs_mdl_res_mapped"]], "inconsistent_residues": [_QualifiedResidueNotation(r[0]) + \ "-" +_QualifiedResidueNotation(r[1]) for r in aux_data["inconsistent_residues"]], "transform": [transform_data[i:i + 4] for i in range(0, len(transform_data), 4)]}) else: reason = scrmsd_scorer.state_decoding[state] out["rmsd"]["full_results"].append({"score": None, "coverage": None, "model_ligand": model_key, "reference_ligand": target_key, "reason": reason}) return out def _WriteCSV(out, args): csv_dict = {} if args.output_by_model_ligand: ligand_by = "model_ligand" ligand_other = "reference_ligand" else: ligand_by = "reference_ligand" ligand_other = "model_ligand" # Always fill-in basic reference ligand info fieldnames = [ligand_by] for ligand in out["%ss" % ligand_by]: csv_dict[ligand] = { ligand_by: ligand, } if args.lddt_pli: fieldnames.extend(["lddt_pli", "lddt_pli_coverage", "lddt_pli_%s" % ligand_other, "lddt_pli_unassigned"]) for score in out["lddt_pli"]["assigned_scores"]: csv_dict[score[ligand_by]].update({ ligand_by: score[ligand_by], "lddt_pli": score["score"], "lddt_pli_coverage": score["coverage"], "lddt_pli_%s" % ligand_other: score[ligand_other], }) for ligand, reason in out["lddt_pli"][ "%s_unassigned_reason" % ligand_by].items(): csv_dict[ligand].update({ ligand_by: ligand, "lddt_pli_unassigned": reason[0], }) if args.rmsd: fieldnames.extend(["rmsd", "lddt_lp", "bb_rmsd", "rmsd_coverage", "rmsd_%s" % ligand_other, "rmsd_unassigned"]) for score in out["rmsd"]["assigned_scores"]: csv_dict[score[ligand_by]].update({ ligand_by: score[ligand_by], "rmsd": score["score"], "lddt_lp": score["lddt_lp"], "bb_rmsd": score["bb_rmsd"], "rmsd_coverage": score["coverage"], "rmsd_%s" % ligand_other: score[ligand_other], }) for ligand, reason in out["rmsd"][ "%s_unassigned_reason" % ligand_by].items(): csv_dict[ligand].update({ ligand_by: ligand, "rmsd_unassigned": reason[0], }) if args.csv_extra_header or args.csv_extra_data: extra_csv = StringIO( args.csv_extra_header + os.linesep + args.csv_extra_data) reader = csv.DictReader(extra_csv) extra_data = next(iter(reader)) if None in extra_data: raise ValueError("Not enough columns in --csv-extra-header") fieldnames = reader.fieldnames + fieldnames for ligand, row in csv_dict.items(): row.update(extra_data) with open(args.output, 'w', newline='') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for row in csv_dict.values(): writer.writerow(row) def _Main(): args = _ParseArgs() ost.PushVerbosityLevel(args.verbosity) if args.verbosity < 4: # Hide tracebacks by default # Run script with -v 4 (Verbose) or higher to display them sys.tracebacklimit = 0 _CheckCompoundLib() try: # Load structures LogScript("Loading data") LogInfo("Loading reference structure") reference_format = _GetStructureFormat(args.reference, sformat=args.reference_format) reference = _LoadStructure(args.reference, sformat=reference_format, bu_id=args.reference_biounit, fault_tolerant = args.fault_tolerant) LogInfo("Loading model structure") model_format = _GetStructureFormat(args.model, sformat=args.model_format) model = _LoadStructure(args.model, sformat=model_format, bu_id=args.model_biounit, fault_tolerant = args.fault_tolerant) # Load ligands LogInfo("Loading reference ligands") reference_ligands = _LoadLigands(args.reference_ligands) LogInfo("Loading model ligands") model_ligands = _LoadLigands(args.model_ligands) # Cleanup LogVerbose("Cleaning up reference structure") cleaned_reference = _CleanupStructure(reference) LogVerbose("Cleaning up model structure") cleaned_model = _CleanupStructure(model) LogVerbose("Cleaning up reference ligands") cleaned_reference_ligands = _CleanupLigands(reference_ligands) LogVerbose("Cleaning up model ligands") cleaned_model_ligands = _CleanupLigands(model_ligands) # Validate _Validate(cleaned_model, cleaned_model_ligands, "model", fault_tolerant = args.fault_tolerant) _Validate(cleaned_reference, cleaned_reference_ligands, "reference", fault_tolerant = args.fault_tolerant) out = _Process(cleaned_model, cleaned_model_ligands, cleaned_reference, cleaned_reference_ligands, args) out["ost_version"] = ost.__version__ out["status"] = "SUCCESS" if args.output_format == "json": with open(args.output, 'w') as fh: json.dump(out, fh, indent=4, sort_keys=False) else: _WriteCSV(out, args) LogScript("Saved results in %s" % args.output) except Exception as exc: if args.output_format == "json": out = dict() out["status"] = "FAILURE" out["traceback"] = traceback.format_exc(limit=1000) etype, evalue, tb = sys.exc_info() out["exception"] = " ".join(traceback.format_exception_only(etype, evalue)) with open(args.output, 'w') as fh: json.dump(out, fh, indent=4, sort_keys=False) LogWarning("Error information saved in %s" % args.output) else: LogScript("Error encountered, no output saved") raise if __name__ == '__main__': _Main()