From 41236319cd83113bf0b023bcdfd186832701cad5 Mon Sep 17 00:00:00 2001 From: karolamik13 Date: Mon, 6 Nov 2023 11:11:30 +0100 Subject: [PATCH 1/9] Correction with kwargs at line 552 [calcChHydrogenBonds()] --- prody/proteins/interactions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prody/proteins/interactions.py b/prody/proteins/interactions.py index a87db9e19..5672033de 100644 --- a/prody/proteins/interactions.py +++ b/prody/proteins/interactions.py @@ -549,7 +549,7 @@ def calcChHydrogenBonds(atoms, **kwargs): seq_cutoff = kwargs.pop('seq_cutoff', 25) if len(np.unique(atoms.getChids())) > 1: - HBS_calculations = calcHydrogenBonds(atoms, **kwargs) + HBS_calculations = calcHydrogenBonds(atoms, distA, angle, seq_cutoff) ChainsHBs = [ i for i in HBS_calculations if str(i[2]) != str(i[5]) ] if not ChainsHBs: From 1a5d32a481e32b084d8f71c1924be8f1051b2c3a Mon Sep 17 00:00:00 2001 From: James Krieger Date: Mon, 6 Nov 2023 15:10:53 +0100 Subject: [PATCH 2/9] fix kwargs usage --- prody/proteins/interactions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prody/proteins/interactions.py b/prody/proteins/interactions.py index 5672033de..dffc41bad 100644 --- a/prody/proteins/interactions.py +++ b/prody/proteins/interactions.py @@ -549,7 +549,7 @@ def calcChHydrogenBonds(atoms, **kwargs): seq_cutoff = kwargs.pop('seq_cutoff', 25) if len(np.unique(atoms.getChids())) > 1: - HBS_calculations = calcHydrogenBonds(atoms, distA, angle, seq_cutoff) + HBS_calculations = calcHydrogenBonds(atoms, distA=distA, angle=angle, seq_cutoff=seq_cutoff) ChainsHBs = [ i for i in HBS_calculations if str(i[2]) != str(i[5]) ] if not ChainsHBs: From 39ed604823fa2c3bbf8bd2c632f7068080661927 Mon Sep 17 00:00:00 2001 From: James Krieger Date: Mon, 6 Nov 2023 16:32:24 +0100 Subject: [PATCH 3/9] fix acs issue for cases where atoms is not Atomic --- prody/proteins/pdbfile.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/prody/proteins/pdbfile.py b/prody/proteins/pdbfile.py index 60c0a3992..9fcbe8edf 100644 --- a/prody/proteins/pdbfile.py +++ b/prody/proteins/pdbfile.py @@ -1242,8 +1242,7 @@ def writePDBStream(stream, atoms, csets=None, **kwargs): Default is **False**, which means using hexadecimal instead. NB: ChimeraX seems to prefer hybrid36 and may have problems with hexadecimal. :type hybrid36: bool - """ - initialACSI = atoms.getACSIndex() + """ renumber = kwargs.get('renumber', True) remark = str(atoms) @@ -1262,8 +1261,10 @@ def writePDBStream(stream, atoms, csets=None, **kwargs): if coordsets is None: raise ValueError('atoms does not have any coordinate sets') + had_atoms = False try: acsi = atoms.getACSIndex() + had_atoms = True except AttributeError: try: atoms = atoms.getAtoms() @@ -1438,7 +1439,8 @@ def writePDBStream(stream, atoms, csets=None, **kwargs): num_ter_lines = 0 for m, coords in enumerate(coordsets): - atoms.setACSIndex(m) + if had_atoms: + atoms.setACSIndex(m) anisous = atoms._getAnisous() if anisous is not None: anisous = np.array(anisous * 10000, dtype=int) @@ -1614,7 +1616,8 @@ def writePDBStream(stream, atoms, csets=None, **kwargs): write('END ' + " "*74 + '\n') - atoms.setACSIndex(initialACSI) + if had_atoms: + atoms.setACSIndex(acsi) writePDBStream.__doc__ += _writePDBdoc From 714fe707c1aeb946b4b6f51c87d1f8c9461700e3 Mon Sep 17 00:00:00 2001 From: James Krieger Date: Mon, 6 Nov 2023 16:47:08 +0100 Subject: [PATCH 4/9] add writePDB ens test --- prody/tests/proteins/test_pdbfile.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/prody/tests/proteins/test_pdbfile.py b/prody/tests/proteins/test_pdbfile.py index fa0e8183c..be9f1a4e2 100644 --- a/prody/tests/proteins/test_pdbfile.py +++ b/prody/tests/proteins/test_pdbfile.py @@ -262,6 +262,11 @@ def setUp(self): self.ag = parsePDB(self.pdb['path']) self.tmp = os.path.join(TEMPDIR, 'test.pdb') + self.ens = PDBEnsemble() + self.ens.setAtoms(self.ag) + self.ens.setCoords(self.ag.getCoords()) + self.ens.addCoordset(self.ag.getCoordsets()) + self.ubi = parsePDB(DATA_FILES['1ubi']['path'], secondary=True) self.hex = parsePDB(DATA_FILES['hex']['path']) @@ -446,6 +451,16 @@ def testWritingAltlocModels(self): self.assertEqual(lines1[8], lines2[8], 'writePDB failed to write correct ANISOU line 8 for 6flr selection with altloc None') + + def testWriteEnsembleToPDB(self): + """Test that writePDB can handle ensembles.""" + + out = writePDB(self.tmp, self.ens) + out = parsePDB(out) + self.assertEqual(out.numCoordsets(), self.ens.numCoordsets(), + 'failed to write correct number of models from ensemble') + assert_equal(out.getCoords(), self.ag.getCoordsets(0), + 'failed to write ensemble model 1 coordinates correctly') @dec.slow def tearDown(self): From d780888e76d9feb9a6e28e4c2940658c4bfcff87 Mon Sep 17 00:00:00 2001 From: karolamik13 Date: Mon, 6 Nov 2023 21:50:32 +0100 Subject: [PATCH 5/9] contact_min removed from calcFrequentInteractors() --- prody/proteins/interactions.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/prody/proteins/interactions.py b/prody/proteins/interactions.py index dffc41bad..da2f86f62 100644 --- a/prody/proteins/interactions.py +++ b/prody/proteins/interactions.py @@ -4094,14 +4094,10 @@ def calcFrequentInteractors(self, **kwargs): Selection pointed as None will return all interactions together without ligands separation. :type selection: str - - :arg contacts_min: Minimal number of contacts which residue may form with ligand. - by default is 2. - :type contacts_min: int """ + """ atoms = self._atoms interactions = self._interactions_traj - contacts_min = kwargs.pop('contacts_min', 2) selection = kwargs.pop('selection', None) from collections import Counter From fe56a70c0d6a476265e4d96633a76e50c5731009 Mon Sep 17 00:00:00 2001 From: James Krieger Date: Wed, 8 Nov 2023 16:30:01 +0100 Subject: [PATCH 6/9] look for cif and emd in path --- prody/proteins/ciffile.py | 6 +++++- prody/proteins/emdfile.py | 12 ++++++++++++ prody/proteins/localpdb.py | 27 ++++++++++++++++++++------- 3 files changed, 37 insertions(+), 8 deletions(-) diff --git a/prody/proteins/ciffile.py b/prody/proteins/ciffile.py index 921736ac0..10cbc4ad7 100644 --- a/prody/proteins/ciffile.py +++ b/prody/proteins/ciffile.py @@ -86,7 +86,7 @@ def parseMMCIF(pdb, **kwargs): auto_bonds = SETTINGS.get('auto_bonds') get_bonds = kwargs.get('bonds', auto_bonds) if get_bonds: - LOGGER.warn('Parsing struct_conn information from mmCIF is current unsupported and no bond information is added to the results') + LOGGER.warn('Parsing struct_conn information from mmCIF is currently unsupported and no bond information is added to the results') if not os.path.isfile(pdb): if len(pdb) == 5 and pdb.isalnum(): if chain is None: @@ -105,8 +105,12 @@ def parseMMCIF(pdb, **kwargs): if os.path.isfile(pdb + '.cif'): filename = pdb + '.cif' + LOGGER.debug('CIF file is found in working directory ({0}).' + .format(filename)) elif os.path.isfile(pdb + '.cif.gz'): filename = pdb + '.cif.gz' + LOGGER.debug('CIF file is found in working directory ({0}).' + .format(filename)) else: filename = fetchPDB(pdb, report=True, format='cif', compressed=False) diff --git a/prody/proteins/emdfile.py b/prody/proteins/emdfile.py index 9cd457878..0f87025cb 100644 --- a/prody/proteins/emdfile.py +++ b/prody/proteins/emdfile.py @@ -72,8 +72,12 @@ def parseEMD(emd, **kwargs): if os.path.isfile(emd + '.map'): filename = emd + '.map' + LOGGER.debug('EMD file is found in working directory ({0}).' + .format(filename)) elif os.path.isfile(emd + '.map.gz'): filename = emd + '.map.gz' + LOGGER.debug('EMD file is found in working directory ({0}).' + .format(filename)) else: filename = fetchPDB(emd, report=True, format='emd', compressed=False) @@ -91,6 +95,14 @@ def parseEMD(emd, **kwargs): result = parseEMDStream(emdStream, **kwargs) emdStream.close() + if hasattr(result, 'numAtoms'): + LOGGER.info('Output is an AtomGroup with {0} atoms fitted.'.format(result.numAtoms())) + elif hasattr(result, 'apix'): + LOGGER.info('Output is an EMDMAP with {:4.2f} A/pix.'.format(result.apix[0])) + else: + LOGGER.warn('Atomic data could not be parsed, please ' + 'check the input file.') + return result diff --git a/prody/proteins/localpdb.py b/prody/proteins/localpdb.py index 1deaacdab..eb3c4d066 100644 --- a/prody/proteins/localpdb.py +++ b/prody/proteins/localpdb.py @@ -212,16 +212,15 @@ def fetchPDB(*pdb, **kwargs): if len(pdb) == 1 and isinstance(pdb[0], list): pdb = pdb[0] - if 'format' in kwargs and kwargs.get('format') != 'pdb': - return fetchPDBviaFTP(*pdb, **kwargs) - identifiers = checkIdentifiers(*pdb) folder = kwargs.get('folder', '.') compressed = kwargs.get('compressed') + format = kwargs.get('format') # check *folder* specified by the user, usually pwd ('.') - filedict = findPDBFiles(folder, compressed=compressed) + filedict = findPDBFiles(folder, compressed=compressed, + format=format) filenames = [] not_found = [] @@ -232,6 +231,8 @@ def fetchPDB(*pdb, **kwargs): elif pdb in filedict: filenames.append(filedict[pdb]) exists += 1 + LOGGER.debug('{0} file is found in working directory ({1}).' + .format(format.upper(), sympath(filedict[pdb]))) else: filenames.append(None) not_found.append((i, pdb)) @@ -240,8 +241,8 @@ def fetchPDB(*pdb, **kwargs): if len(filenames) == 1: filenames = filenames[0] if exists: - LOGGER.debug('PDB file is found in working directory ({0}).' - .format(sympath(filenames))) + LOGGER.debug('{0} file is found in working directory ({1}).' + .format(format.upper(), sympath(filedict[pdb]))) return filenames if not isWritable(folder): @@ -414,6 +415,8 @@ def iterPDBFilenames(path=None, sort=False, unique=True, **kwargs): from re import compile, IGNORECASE + format = kwargs.get('format') + if path is None or kwargs.get('mirror') is True: if path is None: path = pathPDBMirror() @@ -436,10 +439,20 @@ def iterPDBFilenames(path=None, sort=False, unique=True, **kwargs): compressed = kwargs.get('compressed') if compressed is None: pdbext = compile('\.(pdb|ent)(\.gz)?$', IGNORECASE) + cifext = compile('\.(cif)(\.gz)?$', IGNORECASE) + emdext = compile('\.(emd|map|mrc)(\.gz)?$', IGNORECASE) elif compressed: pdbext = compile('\.(pdb|ent)\.gz$', IGNORECASE) + cifext = compile('\.(cif)\.gz$', IGNORECASE) + emdext = compile('\.(emd|map|mrc)\.gz$', IGNORECASE) else: pdbext = compile('\.(pdb|ent)$', IGNORECASE) + cifext = compile('\.(cif)$', IGNORECASE) + emdext = compile('\.(emd|map|mrc)$', IGNORECASE) + if format == 'cif': + pdbext = cifext + if format == 'emd': + pdbext = emdext pdbs = [pdb for pdb in iglob(join(path, '*')) if pdbext.search(pdb)] if sort: pdbs.sort(reverse=kwargs.get('reverse')) @@ -476,7 +489,7 @@ def findPDBFiles(path, case=None, **kwargs): pdb = splitext(split(fn)[1])[0] ending = splitext(splitext(split(fn)[1])[0])[1] if ending == 'gz': - pdb = splittext(pdb)[0] + pdb = splitext(pdb)[0] if len(pdb) == 7 and pdb.startswith('pdb'): pdb = pdb[3:] if upper: From 919ec21ed820aa064d94bbcb83a863e787f3e376 Mon Sep 17 00:00:00 2001 From: James Krieger Date: Wed, 8 Nov 2023 16:33:41 +0100 Subject: [PATCH 7/9] remove no nodes warning on parseEMD --- prody/proteins/emdfile.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/prody/proteins/emdfile.py b/prody/proteins/emdfile.py index 0f87025cb..7b5168564 100644 --- a/prody/proteins/emdfile.py +++ b/prody/proteins/emdfile.py @@ -140,8 +140,6 @@ def parseEMDStream(stream, **kwargs): else: make_nodes = False map = True - LOGGER.info('As n_nodes is less than or equal to 0, no nodes will be' - ' made and the raw map will be returned') emd = EMDMAP(stream, min_cutoff, max_cutoff) From 53fbbc860168582bdb864c2521fba17f7c194ee8 Mon Sep 17 00:00:00 2001 From: James Krieger Date: Wed, 8 Nov 2023 17:02:19 +0100 Subject: [PATCH 8/9] don't use reserved word format and don't report found twice --- prody/proteins/localpdb.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/prody/proteins/localpdb.py b/prody/proteins/localpdb.py index eb3c4d066..88c90eb46 100644 --- a/prody/proteins/localpdb.py +++ b/prody/proteins/localpdb.py @@ -216,11 +216,11 @@ def fetchPDB(*pdb, **kwargs): folder = kwargs.get('folder', '.') compressed = kwargs.get('compressed') - format = kwargs.get('format') + format_ = kwargs.get('format') # check *folder* specified by the user, usually pwd ('.') filedict = findPDBFiles(folder, compressed=compressed, - format=format) + format=format_) filenames = [] not_found = [] @@ -231,8 +231,6 @@ def fetchPDB(*pdb, **kwargs): elif pdb in filedict: filenames.append(filedict[pdb]) exists += 1 - LOGGER.debug('{0} file is found in working directory ({1}).' - .format(format.upper(), sympath(filedict[pdb]))) else: filenames.append(None) not_found.append((i, pdb)) @@ -242,7 +240,7 @@ def fetchPDB(*pdb, **kwargs): filenames = filenames[0] if exists: LOGGER.debug('{0} file is found in working directory ({1}).' - .format(format.upper(), sympath(filedict[pdb]))) + .format(format_.upper(), sympath(filedict[pdb]))) return filenames if not isWritable(folder): From bb13becb9cbdb6a12740f63a02a7990b7c633670 Mon Sep 17 00:00:00 2001 From: James Krieger Date: Wed, 8 Nov 2023 18:36:45 +0100 Subject: [PATCH 9/9] reduce parseCIF warnings --- prody/proteins/ciffile.py | 8 ++------ prody/proteins/cifheader.py | 40 ++++++++++++++++++------------------- prody/proteins/starfile.py | 5 +++-- 3 files changed, 25 insertions(+), 28 deletions(-) diff --git a/prody/proteins/ciffile.py b/prody/proteins/ciffile.py index a5f25a754..a0e310047 100644 --- a/prody/proteins/ciffile.py +++ b/prody/proteins/ciffile.py @@ -508,12 +508,8 @@ def _parseMMCIFLines(atomgroup, lines, model, chain, subset, anisou = None siguij = None - try: - data = parseSTARSection(lines, "_atom_site_anisotrop") - x = data[0] # check if data has anything in it - except IndexError: - LOGGER.warn("No anisotropic B factors found") - else: + data = parseSTARSection(lines, "_atom_site_anisotrop", report=False) + if len(data) > 0: anisou = np.zeros((acount, 6), dtype=float) diff --git a/prody/proteins/cifheader.py b/prody/proteins/cifheader.py index 962a86299..adf4fb99b 100644 --- a/prody/proteins/cifheader.py +++ b/prody/proteins/cifheader.py @@ -178,8 +178,8 @@ def _getBiomoltrans(lines): # 2 blocks are needed for this: # _pdbx_struct_assembly_gen: what to apply to which chains # _pdbx_struct_oper_list: everything else - data1 = parseSTARSection(lines, '_pdbx_struct_assembly_gen') - data2 = parseSTARSection(lines, '_pdbx_struct_oper_list') + data1 = parseSTARSection(lines, '_pdbx_struct_assembly_gen', report=False) + data2 = parseSTARSection(lines, '_pdbx_struct_oper_list', report=False) # extracting the data for n, item1 in enumerate(data1): @@ -225,7 +225,7 @@ def _getRelatedEntries(lines): try: key = "_pdbx_database_related" - data = parseSTARSection(lines, key) + data = parseSTARSection(lines, key, report=False) for item in data: dbref = DBRef() dbref.accession = item[key + ".db_id"] @@ -715,8 +715,8 @@ def _getReference(lines): # JRNL double block. Blocks 6 and 7 as copied from COMPND # Block 1 has most info. Block 2 has author info - items1 = parseSTARSection(lines, "_citation") - items2 = parseSTARSection(lines, "_citation_author") + items1 = parseSTARSection(lines, "_citation", report=False) + items2 = parseSTARSection(lines, "_citation_author", report=False) for row in items1: for k, value in row.items(): @@ -767,7 +767,7 @@ def _getPolymers(lines): entities = defaultdict(list) # SEQRES block - items1 = parseSTARSection(lines, '_entity_poly') + items1 = parseSTARSection(lines, '_entity_poly', report=False) for item in items1: chains = item['_entity_poly.pdbx_strand_id'] @@ -781,7 +781,7 @@ def _getPolymers(lines): '_entity_poly.pdbx_seq_one_letter_code_can'].replace(';', '').split()) # DBREF block 1 - items2 = parseSTARSection(lines, '_struct_ref') + items2 = parseSTARSection(lines, '_struct_ref', report=False) for item in items2: entity = item["_struct_ref.id"] @@ -798,7 +798,7 @@ def _getPolymers(lines): poly.dbrefs.append(dbref) # DBREF block 2 - items3 = parseSTARSection(lines, "_struct_ref_seq") + items3 = parseSTARSection(lines, "_struct_ref_seq", report=False) for i, item in enumerate(items3): i += 1 @@ -884,7 +884,7 @@ def _getPolymers(lines): last = temp # MODRES block - data4 = parseSTARSection(lines, "_pdbx_struct_mod_residue") + data4 = parseSTARSection(lines, "_pdbx_struct_mod_residue", report=False) for data in data4: ch = data["_pdbx_struct_mod_residue.label_asym_id"] @@ -904,7 +904,7 @@ def _getPolymers(lines): data["_pdbx_struct_mod_residue.details"])) # SEQADV block - data5 = parseSTARSection(lines, "_struct_ref_seq_dif") + data5 = parseSTARSection(lines, "_struct_ref_seq_dif", report=False) for i, data in enumerate(data5): ch = data["_struct_ref_seq_dif.pdbx_pdb_strand_id"] @@ -964,8 +964,8 @@ def _getPolymers(lines): # COMPND double block. # Block 6 has most info. Block 7 has synonyms - data6 = parseSTARSection(lines, "_entity") - data7 = parseSTARSection(lines, "_entity_name_com") + data6 = parseSTARSection(lines, "_entity", report=False) + data7 = parseSTARSection(lines, "_entity_name_com", report=False) dict_ = {} for molecule in data6: @@ -1045,7 +1045,7 @@ def _getChemicals(lines): # 1st block we need is has info about location in structure # this instance only includes single sugars not branched structures - items = parseSTARSection(lines, "_pdbx_nonpoly_scheme") + items = parseSTARSection(lines, "_pdbx_nonpoly_scheme", report=False) for data in items: resname = data["_pdbx_nonpoly_scheme.mon_id"] @@ -1064,7 +1064,7 @@ def _getChemicals(lines): chemicals[chem.resname].append(chem) # next we get the equivalent one for branched sugars part - items = parseSTARSection(lines, "_pdbx_branch_scheme") + items = parseSTARSection(lines, "_pdbx_branch_scheme", report=False) for data in items: resname = data["_pdbx_branch_scheme.mon_id"] @@ -1080,7 +1080,7 @@ def _getChemicals(lines): chemicals[chem.resname].append(chem) # 2nd block to get has general info e.g. name and formula - items = parseSTARSection(lines, "_chem_comp") + items = parseSTARSection(lines, "_chem_comp", report=False) for data in items: resname = data["_chem_comp.id"] @@ -1155,7 +1155,7 @@ def _getTitle(lines): title = '' try: - data = parseSTARSection(lines, "_struct") + data = parseSTARSection(lines, "_struct", report=False) for item in data: title += item['_struct.title'].upper() except: @@ -1172,7 +1172,7 @@ def _getAuthors(lines): authors = [] try: - data = parseSTARSection(lines, "_audit_author") + data = parseSTARSection(lines, "_audit_author", report=False) for item in data: author = ''.join(item['_audit_author.name'].split(', ')[::-1]) authors.append(author.upper()) @@ -1192,7 +1192,7 @@ def _getSplit(lines): key = "_pdbx_database_related" try: - data, _ = parseSTARSection(lines, key) + data, _ = parseSTARSection(lines, key, report=False) for item in data: if item[key + '.content_type'] == 'split': split.append(item[key + '.db_id']) @@ -1227,7 +1227,7 @@ def _getOther(lines, key=None): data = [] try: - data = parseSTARSection(lines, key) + data = parseSTARSection(lines, key, report=False) except: pass @@ -1242,7 +1242,7 @@ def _getUnobservedSeq(lines): key_unobs = '_pdbx_unobs_or_zero_occ_residues' try: - unobs = parseSTARSection(lines, key_unobs) + unobs = parseSTARSection(lines, key_unobs, report=False) polymers = _getPolymers(lines) except: pass diff --git a/prody/proteins/starfile.py b/prody/proteins/starfile.py index a527daee6..4cd24cccb 100644 --- a/prody/proteins/starfile.py +++ b/prody/proteins/starfile.py @@ -1026,7 +1026,7 @@ def parseImagesFromSTAR(particlesSTAR, **kwargs): return np.array(images), parsed_images_data -def parseSTARSection(lines, key): +def parseSTARSection(lines, key, report=True): """Parse a section of data from *lines* from a STAR file corresponding to a *key* (part before the dot). This can be a loop or data block. @@ -1077,7 +1077,8 @@ def parseSTARSection(lines, key): else: data = [loop_dict["data"]] else: - LOGGER.warn("Could not find {0} in lines.".format(key)) + if report: + LOGGER.warn("Could not find {0} in lines.".format(key)) return [] return data