Dear users,
I am trying to do a parallel job for protein loop refinement by MODELLER9.17. I prepared the scripts as follows and provided all the input files in my working directory and I am running MODELLER through python2.7 (by typing: "python ModellerModelling.py" ). Although I encounter no error through the modeling process, but I doubt if I am doing a parallel job since when I check my workstation CPU usage, only one CPU is engaged (the usage percent does not exceed 100%). Could you please help me know what am I missing?
Best regards,
Sajjad
ModellerModeling.py:
# --- UCSF Chimera Copyright ---
# Copyright (c) 2000 Regents of the University of California.
# All rights reserved. This software provided pursuant to a
# license agreement containing restrictions on its disclosure,
# duplication and use. This notice must be embedded in or
# attached to all copies, including partial copies, of the
# software or any revisions or derivations thereof.
# --- UCSF Chimera Copyright ---
# This script is generated by the Modeller Dialog in Chimera,
# incorporating the settings from the user interface. User can revise
# this script and submit the modified one into the Advanced Option panel.
# Import the Modeller module
from modeller import *
from modeller.automodel import *
from modeller.parallel import *
from mymodel import MyModel
log.minimal()
a.use_parallel_job(j) # Use the job for model building
# Use 16 CPUs in a parallel job on this machine
j = job()
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
j.append(local_slave())
results = j.run_all_tasks()
print("Got model resolution: " + str(results))
mymodel.py:
from modeller import *
from modeller.automodel import *
# ---------------------- namelist.dat --------------------------------
# A "namelist.dat" file contains the file names, which was output from
# the Modeller dialog in Chimera based on user's selection.
# The first line it the name of the target sequence, the remaining
# lines are name of the template structures
namelist = open( 'namelist.dat', 'r' ).read().split('\n')
tarSeq = namelist[0]
template = tuple( [ x.strip() for x in namelist[1:] if x != '' ] )
# ---------------------- namelist.dat --------------------------------
# This instructs Modeller to display all log output.
log.verbose()
# create a new Modeller environment
env = environ()
# Directory of atom/PDB/structure files. It is a relative path, inside
# a temp folder generated by Chimera. User can modify it and add their
# absolute path containing the structure files.
env.io.atom_files_directory = ['.', './template_struc']
# Read in HETATM records from template PDBs
env.io.hetatm = True
# Read in water molecules from template PDBs
env.io.water = True
# create a subclass of automodel or loopmodel, MyModel.
# user can further modify the MyModel class, to override certain routine.
class MyModel(loopmodel):
def select_loop_atoms(self):
from modeller import selection
return selection(
self.residue_range('256', '309'))
def select_atoms(self):
from modeller import selection
return selection(
self.residue_range('256', '309'))
def customised_function(self): pass
#code overrides the special_restraints method
#def special_restraints(self, aln):
#code overrides the special_patches method.
# e.g. to include the addtional disulfides.
#def special_patches(self, aln):
a = MyModel(env, sequence = tarSeq,
# alignment file with template codes and target sequence
alnfile = 'alignment.ali',
# name of initial PDB template
knowns = template[0])
# one fixed model to base loops on
a.starting_model = 1
a.ending_model = 1
# 1 loop models
loopRefinement = True
a.loop.starting_model = 1
a.loop.ending_model = 1000
a.loop.assess_methods=(assess.DOPE, assess.GA341, assess.normalized_dope)
# Assesses the quality of models using
# the DOPE (Discrete Optimized Protein Energy) method (Shen & Sali 2006)
# and the GA341 method (Melo et al 2002, John & Sali 2003)
a.assess_methods = (assess.GA341, assess.normalized_dope)
# ------------------------- build all models --------------------------
a.make()
# ---------- Accesing output data after modeling is complete ----------
# Get a list of all successfully built models from a.outputs
if loopRefinement:
ok_models = filter(lambda x: x['failure'] is None, a.loop.outputs)
else:
ok_models = filter(lambda x: x['failure'] is None, a.outputs)
# Rank the models by index number
#key = 'num'
#ok_models.sort(lambda a,b: cmp(a[key], b[key]))
def numSort(a, b, key="num"):
return cmp(a[key], b[key])
ok_models.sort(numSort)
# Output the list of ok_models to file ok_models.dat
fMoutput = open('ok_models.dat', 'w')
fMoutput.write('File name of aligned model\t GA341\t zDOPE \n')
for m in ok_models:
results = '%s\t' % m['name']
results += '%.5f\t' % m['GA341 score'][0]
results += '%.5f\n' % m['Normalized DOPE score']
fMoutput.write( results )
fMoutput.close()