I have a function that accepts a large array of x,y pairs as an input which does some elaborate curve fitting using numpy and scipy and then returns a single value. To try and speed things up I am trying to have two threads that I feed the data to using Queue.Queue . Once the data is done. I am trying to have the threads terminate and then end the calling process and return control to the shell.
I am trying to understand why I have to resort to a private method in threading.Thread to stop my threads and return control to the commandline.
The self.join() does not end the program. The only way to get back control was to use the private stop method.
def stop(self):
print "STOP CALLED"
self.finished.set()
print "SET DONE"
# self.join(timeout=None) does not work
self._Thread__stop()
Here is an approximation of my code:
class CalcThread(threading.Thread):
def __init__(self,in_queue,out_queue,function):
threading.Thread.__init__(self)
self.in_queue = in_queue
self.out_queue = out_queue
self.function = function
self.finished = threading.Event()
def stop(self):
print "STOP CALLED"
self.finished.set()
print "SET DONE"
self._Thread__stop()
def run(self):
while not self.finished.isSet():
params_for_function = self.in_queue.get()
try:
tm = self.function(paramsforfunction)
self.in_queue.task_done()
self.out_queue.put(tm)
except ValueError as v:
#modify params and reinsert into queue
window = params_for_function["window"]
params_for_function["window"] = window + 1
self.in_queue.put(params_for_function)
def big_calculation(well_id,window,data_arrays):
# do some analysis to calculate tm
return tm
if __name__ == "__main__":
NUM_THREADS = 2
workers = []
in_queue = Queue()
out_queue = Queue()
for i in range(NUM_THREADS):
w = CalcThread(in_queue,out_queue,big_calculation)
w.start()
workers.append(w)
if options.analyze_all:
for i in well_ids:
in_queue.put(dict(well_id=i,window=10,data_arrays=my_data_dict))
in_queue.join()
print "ALL THREADS SEEM TO BE DONE"
# gather data and report it from out_queue
for i in well_ids:
p = out_queue.get()
print p
out_queue.task_done()
# I had to do this to get the out_queue to proceed
if out_queue.qsize() == 0:
out_queue.join()
break
# Calling this stop method does not seem to return control to the command line unless I use threading.Thread private method
for aworker in workers:
aworker.stop()
In general it is a bad idea to kill a thread that modifies shared resource.
CPU intensive tasks in multiple threads are worse than useless in Python unless you release GIL while performing computations. Many numpy
functions do release GIL.
import concurrent.futures # on Python 2.x: pip install futures
calc_args = []
if options.analyze_all:
calc_args.extend(dict(well_id=i,...) for i in well_ids)
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
future_to_args = dict((executor.submit(big_calculation, args), args)
for args in calc_args)
while future_to_args:
for future in concurrent.futures.as_completed(dict(**future_to_args)):
args = future_to_args.pop(future)
if future.exception() is not None:
print('%r generated an exception: %s' % (args,
future.exception()))
if isinstance(future.exception(), ValueError):
#modify params and resubmit
args["window"] += 1
future_to_args[executor.submit(big_calculation, args)] = args
else:
print('f%r returned %r' % (args, future.result()))
print("ALL work SEEMs TO BE DONE")
You could replace ThreadPoolExecutor
by ProcessPoolExecutor
if there is no shared state. Put the code in your main()
function.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With