From 8eb3d879dafafb2044a884efd3ba58422c518f70 Mon Sep 17 00:00:00 2001 From: Morten Kjeldgaard Date: Sat, 31 May 2014 09:53:23 +0200 Subject: [PATCH] Python 3 modifications --- async/__init__.py | 15 ++++---- async/channel.py | 14 +++---- async/mod/zlibmodule.c | 67 +++++++++++++++++++++++++++++++++- async/pool.py | 15 ++++---- async/task.py | 10 ++--- async/test/task.py | 6 +-- async/test/test_channel.py | 6 +-- async/test/test_example.py | 6 +-- async/test/test_graph.py | 2 +- async/test/test_performance.py | 10 ++--- async/test/test_pool.py | 60 +++++++++++++++--------------- async/test/test_task.py | 2 +- async/test/test_thread.py | 4 +- async/thread.py | 8 ++-- async/util.py | 2 +- setup.py | 2 +- 16 files changed, 148 insertions(+), 81 deletions(-) diff --git a/async/__init__.py b/async/__init__.py index 27bd380..0432610 100644 --- a/async/__init__.py +++ b/async/__init__.py @@ -9,19 +9,19 @@ def _init_atexit(): """Setup an at-exit job to be sure our workers are shutdown correctly before the interpreter quits""" import atexit - import thread + from . import thread atexit.register(thread.do_terminate_threads) def _init_signals(): """Assure we shutdown our threads correctly when being interrupted""" import signal - import thread + from . import thread import sys prev_handler = signal.getsignal(signal.SIGINT) def thread_interrupt_handler(signum, frame): thread.do_terminate_threads() - if callable(prev_handler): + if isinstance(prev_handler, collections.Callable): prev_handler(signum, frame) raise KeyboardInterrupt() # END call previous handler @@ -30,7 +30,7 @@ def thread_interrupt_handler(signum, frame): signal.signal(signal.SIGINT, thread_interrupt_handler) except ValueError: # happens if we don't try it from the main thread - print >> sys.stderr, "Failed to setup thread-interrupt handler. This is usually not critical" + print("Failed to setup thread-interrupt handler. This is usually not critical", file=sys.stderr) # END exception handling @@ -41,6 +41,7 @@ def thread_interrupt_handler(signum, frame): # initial imports -from task import * -from pool import * -from channel import * +from .task import * +from .pool import * +from .channel import * +import collections diff --git a/async/channel.py b/async/channel.py index b1306b0..84a142e 100644 --- a/async/channel.py +++ b/async/channel.py @@ -3,12 +3,12 @@ # This module is part of async and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Contains a queue based channel implementation""" -from Queue import ( +from queue import ( Empty, Full ) -from util import ( +from .util import ( AsyncQueue, SyncQueue, ReadOnly @@ -154,7 +154,7 @@ def __init__(self, device): def __iter__(self): return self - def next(self): + def __next__(self): """Implements the iterator protocol, iterating individual items""" items = self.read(1) if items: @@ -220,7 +220,7 @@ def read(self, count=0, block=True, timeout=None): out.append(queue.get(False)) # END for each item else: - for i in xrange(count): + for i in range(count): out.append(queue.get(False)) # END for each item # END handle count @@ -230,7 +230,7 @@ def read(self, count=0, block=True, timeout=None): else: # to get everything into one loop, we set the count accordingly if count == 0: - count = sys.maxint + count = sys.maxsize # END handle count i = 0 @@ -353,9 +353,9 @@ def read(self, count=0, block=True, timeout=None): else: out = list() it = self._iter - for i in xrange(count): + for i in range(count): try: - out.append(it.next()) + out.append(next(it)) except StopIteration: self._empty = True break diff --git a/async/mod/zlibmodule.c b/async/mod/zlibmodule.c index 1dabe69..9b8306f 100644 --- a/async/mod/zlibmodule.c +++ b/async/mod/zlibmodule.c @@ -3,10 +3,21 @@ /* Windows users: read Python's PCbuild\readme.txt */ - #include "Python.h" #include "zlib.h" +#if PY_MAJOR_VERSION >= 3 + #define PyInt_FromLong PyLong_FromLong + #define PyString_FromString PyUnicode_FromString + #define PyString_FromStringAndSize PyUnicode_FromStringAndSize + #define PyString_AS_STRING PyUnicode_AS_UNICODE + #define _PyString_Resize PyUnicode_Resize + #define PyText_AS_UTF8 _PyUnicode_AsString + #define PyText_Check PyUnicode_Check +#endif + + + #ifdef WITH_THREAD #include "pythread.h" @@ -897,6 +908,23 @@ static PyMethodDef Decomp_methods[] = {NULL, NULL} }; + +/* + Py_FindMethod gone in Python 3, so Comp_getattr and Decomp_getattr + have to be rewritten. I googled the following tip somewhere: + + "The same functionality can be achieved with the tp_getattro slot: + implement your special dynamic attributes there, and then call + PyObject_GenericGetAttr for the default behavior. You may have a + look at the implementation of the pyexpat module: + Modules/pyexpat.c, function xmlparse_getattro." + + Looking at xmlparse_getattro [1] it seems it could be readily adopted + here. + + [1] http://svn.python.org/projects/python/branches/py3k/Modules/pyexpat.c +*/ + static PyObject * Comp_getattr(compobject *self, char *name) { @@ -1052,17 +1080,50 @@ PyDoc_STRVAR(zlib_module_documentation, "Compressor objects support compress() and flush() methods; decompressor\n" "objects support decompress() and flush()."); +#if PY_MAJOR_VERSION >= 3 +/* See http://python3porting.com/cextensions.html */ +static struct PyModuleDef zlib_moddef = { + PyModuleDef_HEAD_INIT, + "zlib", + zlib_module_documentation, + -1, + zlib_methods, + NULL, NULL, NULL, NULL +}; +#endif + + + PyMODINIT_FUNC PyInit_zlib(void) { PyObject *m, *ver; + +#if PY_MAJOR_VERSION >= 3 + /* Use this version check as a replacement for Py_InitModule4 */ + ver = PySys_GetObject("version"); + if (ver == NULL || !PyText_Check(ver) || + strncmp(PyText_AS_UTF8(ver), PY_VERSION, 3) != 0) { + PyErr_Format(PyExc_ImportError, + "this module was compiled for Python %c%c%c", + PY_VERSION[0], PY_VERSION[1], PY_VERSION[2]); + return NULL; + } +#endif + Py_TYPE(&Comptype) = &PyType_Type; Py_TYPE(&Decomptype) = &PyType_Type; +#if PY_MAJOR_VERSION >= 3 + m = PyModule_Create(&zlib_moddef); + if (m == NULL) + return m; +#else m = Py_InitModule4("zlib", zlib_methods, zlib_module_documentation, (PyObject*)NULL,PYTHON_API_VERSION); if (m == NULL) return; +#endif ZlibError = PyErr_NewException("zlib.error", NULL, NULL); if (ZlibError != NULL) { @@ -1101,4 +1162,8 @@ PyInit_zlib(void) PyModule_AddObject(m, "ZLIB_VERSION", ver); PyModule_AddStringConstant(m, "__version__", "1.0"); + +#if PY_MAJOR_VERSION >= 3 + return m; +#endif } diff --git a/async/pool.py b/async/pool.py index f429eb9..2bacf2f 100644 --- a/async/pool.py +++ b/async/pool.py @@ -3,24 +3,24 @@ # This module is part of async and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Implementation of a thread-pool working with channels""" -from thread import ( +from .thread import ( WorkerThread, StopProcessing, ) from threading import Lock -from util import ( +from .util import ( AsyncQueue, DummyLock ) -from Queue import ( +from queue import ( Queue, Empty ) -from graph import Graph -from channel import ( +from .graph import Graph +from .channel import ( mkchannel, ChannelWriter, Channel, @@ -31,6 +31,7 @@ import sys import weakref from time import sleep +from functools import reduce __all__ = ('PoolReader', 'Pool', 'ThreadPool') @@ -284,7 +285,7 @@ def _prepare_channel_read(self, task, count): # to process too much. This can be defined per task qput = self._queue.put if numchunks > 1: - for i in xrange(numchunks): + for i in range(numchunks): qput((task.process, chunksize)) # END for each chunk to put else: @@ -297,7 +298,7 @@ def _prepare_channel_read(self, task, count): else: # no workers, so we have to do the work ourselves if numchunks > 1: - for i in xrange(numchunks): + for i in range(numchunks): task.process(chunksize) # END for each chunk to put else: diff --git a/async/task.py b/async/task.py index a585a9f..596f372 100644 --- a/async/task.py +++ b/async/task.py @@ -2,9 +2,9 @@ # # This module is part of async and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php -from graph import Node -from util import ReadOnly -from channel import IteratorReader +from .graph import Node +from .util import ReadOnly +from .channel import IteratorReader import threading import weakref @@ -128,7 +128,7 @@ def process(self, count=0): self._num_writers -= 1 self._wlock.release() # END handle writer count - except Exception, e: + except Exception as e: # be sure our task is not scheduled again self.set_done() @@ -226,7 +226,7 @@ def reader(self): """:return: input channel from which we read""" # the instance is bound in its instance method - lets use this to keep # the refcount at one ( per consumer ) - return self._read.im_self + return self._read.__self__ def set_read(self, read): """Adjust the read method to the given one""" diff --git a/async/test/task.py b/async/test/task.py index d8162e6..25c00d1 100644 --- a/async/test/task.py +++ b/async/test/task.py @@ -39,7 +39,7 @@ def _assert(self, pc, fc, check_scheduled=False): :return: self""" self.lock.acquire() if self.item_count != fc: - print self.item_count, fc + print(self.item_count, fc) assert self.item_count == fc self.lock.release() @@ -166,7 +166,7 @@ def add_task_chain(p, ni, count=1, fail_setup=list(), feeder_channel=None, id_of tasks = [feeder] inrc = frc - for tc in xrange(count): + for tc in range(count): t = transformercls(inrc, tc+id_offset, None) t.fun = make_proxy_method(t) @@ -198,7 +198,7 @@ def make_iterator_task(ni, taskcls=TestThreadTask, **kwargs): """:return: task which yields ni items :param taskcls: the actual iterator type to use :param kwargs: additional kwargs to be passed to the task""" - t = taskcls(iter(range(ni)), 'iterator', None, **kwargs) + t = taskcls(iter(list(range(ni))), 'iterator', None, **kwargs) if isinstance(t, _TestTaskBase): t.fun = make_proxy_method(t) return t diff --git a/async/test/test_channel.py b/async/test/test_channel.py index 9d9822e..44ae4e3 100644 --- a/async/test/test_channel.py +++ b/async/test/test_channel.py @@ -3,7 +3,7 @@ # This module is part of async and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Channel testing""" -from lib import * +from .lib import * from async.channel import * import time @@ -83,7 +83,7 @@ def post_read(items): # ITERATOR READER - reader = IteratorReader(iter(range(10))) + reader = IteratorReader(iter(list(range(10)))) assert len(reader.read(2)) == 2 assert len(reader.read(0)) == 8 # its empty now @@ -95,7 +95,7 @@ def post_read(items): # test general read-iteration - its supported by all readers - reader = IteratorReader(iter(range(10))) + reader = IteratorReader(iter(list(range(10)))) assert len(list(reader)) == 10 # NOTE: its thread-safety is tested by the pool diff --git a/async/test/test_example.py b/async/test/test_example.py index 22cbbda..b5b9243 100644 --- a/async/test/test_example.py +++ b/async/test/test_example.py @@ -3,7 +3,7 @@ # This module is part of async and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Module containing examples from the documentaiton""" -from lib import * +from .lib import * from async.pool import * from async.task import * @@ -25,7 +25,7 @@ def test_usage(self): assert p.size() == 1 # A task performing processing on items from an iterator - t = IteratorThreadTask(iter(range(10)), "power", lambda i: i*i) + t = IteratorThreadTask(iter(list(range(10))), "power", lambda i: i*i) reader = p.add_task(t) # read all items - they where procesed by worker 1 @@ -34,7 +34,7 @@ def test_usage(self): # chaining - t = IteratorThreadTask(iter(range(10)), "power", lambda i: i*i) + t = IteratorThreadTask(iter(list(range(10))), "power", lambda i: i*i) reader = p.add_task(t) # chain both by linking their readers diff --git a/async/test/test_graph.py b/async/test/test_graph.py index 7484b98..48c99ff 100644 --- a/async/test/test_graph.py +++ b/async/test/test_graph.py @@ -3,7 +3,7 @@ # This module is part of async and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Channel testing""" -from lib import * +from .lib import * from async.graph import * import time diff --git a/async/test/test_performance.py b/async/test/test_performance.py index b314bb5..fb35ff1 100644 --- a/async/test/test_performance.py +++ b/async/test/test_performance.py @@ -3,8 +3,8 @@ # This module is part of async and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Channel testing""" -from lib import * -from task import * +from .lib import * +from .task import * from async.pool import * from async.thread import terminate_threads @@ -24,7 +24,7 @@ def test_base(self): # when adjusting the amount of threads pool = ThreadPool(0) ni = 1000 # number of items to process - print self.max_threads + print(self.max_threads) for num_threads in range(self.max_threads*2 + 1): pool.set_size(num_threads) for num_transformers in (1, 5, 10): @@ -42,14 +42,14 @@ def test_base(self): reader = rcs[-1] st = time.time() if read_mode == 1: - for i in xrange(ni): + for i in range(ni): assert len(reader.read(1)) == 1 # END for each item to read else: assert len(reader.read(0)) == ni # END handle read mode elapsed = time.time() - st - print >> sys.stderr, fmt % (num_threads, ni, num_transformers, elapsed, ni / elapsed) + print(fmt % (num_threads, ni, num_transformers, elapsed, ni / elapsed), file=sys.stderr) # END for each read-mode # END for each amount of processors # END for each thread count diff --git a/async/test/test_pool.py b/async/test/test_pool.py index 95e6eec..a9beaf6 100644 --- a/async/test/test_pool.py +++ b/async/test/test_pool.py @@ -3,8 +3,8 @@ # This module is part of async and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Pool testing""" -from lib import * -from task import * +from .lib import * +from .task import * from async.pool import * from async.thread import terminate_threads @@ -23,7 +23,7 @@ class TestThreadPool(TestBase): def _assert_single_task(self, p, async=False): """Performs testing in a synchronized environment""" - print >> sys.stderr, "Threadpool: Starting single task (async = %i) with %i threads" % (async, p.size()) + print("Threadpool: Starting single task (async = %i) with %i threads" % (async, p.size()), file=sys.stderr) null_tasks = p.num_tasks() # in case we had some before # add a simple task @@ -44,7 +44,7 @@ def _assert_single_task(self, p, async=False): # pull the result completely - we should get one task, which calls its # function once. In sync mode, the order matches - print "read(0)" + print("read(0)") items = rc.read() assert len(items) == ni task._assert(1, ni) @@ -61,7 +61,7 @@ def _assert_single_task(self, p, async=False): rc = p.add_task(task) assert p.num_tasks() == 1 + null_tasks st = time.time() - print "read(1) * %i" % ni + print("read(1) * %i" % ni) for i in range(ni): items = rc.read(1) assert len(items) == 1 @@ -71,7 +71,7 @@ def _assert_single_task(self, p, async=False): assert i == items[0] # END for each item elapsed = time.time() - st - print >> sys.stderr, "Threadpool: processed %i individual items, with %i threads, one at a time, in %f s ( %f items / s )" % (ni, p.size(), elapsed, ni / elapsed) + print("Threadpool: processed %i individual items, with %i threads, one at a time, in %f s ( %f items / s )" % (ni, p.size(), elapsed, ni / elapsed), file=sys.stderr) # it couldn't yet notice that the input is depleted as we pulled exaclty # ni items - the next one would remove it. Instead, we delete our channel @@ -86,17 +86,17 @@ def _assert_single_task(self, p, async=False): task = make_task() task.min_count = ni / 2 rc = p.add_task(task) - print "read(1)" + print("read(1)") items = rc.read(1) assert len(items) == 1 and items[0] == 0 # processes ni / 2 - print "read(1)" + print("read(1)") items = rc.read(1) assert len(items) == 1 and items[0] == 1 # processes nothing # rest - it has ni/2 - 2 on the queue, and pulls ni-2 # It wants too much, so the task realizes its done. The task # doesn't care about the items in its output channel nri = ni-2 - print "read(%i)" % nri + print("read(%i)" % nri) items = rc.read(nri) assert len(items) == nri p.remove_task(task) @@ -106,7 +106,7 @@ def _assert_single_task(self, p, async=False): # its already done, gives us no more, its still okay to use it though # as a task doesn't have to be in the graph to allow reading its produced # items - print "read(0) on closed" + print("read(0) on closed") # it can happen that a thread closes the channel just a tiny fraction of time # after we check this, so the test fails, although it is nearly closed. # When we start reading, we should wake up once it sends its signal @@ -124,13 +124,13 @@ def _assert_single_task(self, p, async=False): # count is still at ni / 2 - here we want more than that # 2 steps with n / 4 items, + 1 step with n/4 items to get + 2 nri = ni / 2 + 2 - print "read(%i) chunksize set" % nri + print("read(%i) chunksize set" % nri) items = rc.read(nri) assert len(items) == nri # have n / 4 - 2 items on queue, want n / 4 in first chunk, cause 1 processing # ( 4 in total ). Still want n / 4 - 2 in second chunk, causing another processing nri = ni / 2 - 2 - print "read(%i) chunksize set" % nri + print("read(%i) chunksize set" % nri) items = rc.read(nri) assert len(items) == nri @@ -152,7 +152,7 @@ def _assert_single_task(self, p, async=False): task.max_chunksize = ni / 4 # match previous setup rc = p.add_task(task) st = time.time() - print "read(1) * %i, chunksize set" % ni + print("read(1) * %i, chunksize set" % ni) for i in range(ni): if async: assert len(rc.read(1)) == 1 @@ -162,7 +162,7 @@ def _assert_single_task(self, p, async=False): # END pull individual items # too many processing counts ;) elapsed = time.time() - st - print >> sys.stderr, "Threadpool: processed %i individual items in chunks of %i, with %i threads, one at a time, in %f s ( %f items / s )" % (ni, ni/4, p.size(), elapsed, ni / elapsed) + print("Threadpool: processed %i individual items in chunks of %i, with %i threads, one at a time, in %f s ( %f items / s )" % (ni, ni/4, p.size(), elapsed, ni / elapsed), file=sys.stderr) task._assert(ni, ni) assert p.num_tasks() == 1 + null_tasks @@ -174,7 +174,7 @@ def _assert_single_task(self, p, async=False): task.min_count = ni / 4 task.max_chunksize = ni / 4 # match previous setup rc = p.add_task(task) - print "read(1) * %i, min_count%i + chunksize" % (ni, task.min_count) + print("read(1) * %i, min_count%i + chunksize" % (ni, task.min_count)) for i in range(ni): items = rc.read(1) assert len(items) == 1 @@ -191,7 +191,7 @@ def _assert_single_task(self, p, async=False): task = make_task() task.should_fail = True rc = p.add_task(task) - print "read(0) with failure" + print("read(0) with failure") assert len(rc.read()) == 0 # failure on first item assert isinstance(task.error(), AssertionError) @@ -208,7 +208,7 @@ def _assert_single_task(self, p, async=False): assert task.is_done() assert isinstance(task.error(), AssertionError) - print >> sys.stderr, "done with everything" + print("done with everything", file=sys.stderr) @@ -217,7 +217,7 @@ def _assert_async_dependent_tasks(self, pool): # This will also verify that the channel-close mechanism works # t1 -> t2 -> t3 - print >> sys.stderr, "Threadpool: starting async dependency test in %i threads" % pool.size() + print("Threadpool: starting async dependency test in %i threads" % pool.size(), file=sys.stderr) null_tasks = pool.num_tasks() ni = 1000 count = 3 @@ -234,7 +234,7 @@ def _assert_async_dependent_tasks(self, pool): st = time.time() items = rcs[-1].read() elapsed = time.time() - st - print len(items), ni + print(len(items), ni) assert len(items) == ni del(rcs) assert pool.num_tasks() == 0 # tasks depleted, all done, no handles @@ -243,21 +243,21 @@ def _assert_async_dependent_tasks(self, pool): time.sleep(0.15) assert sys.getrefcount(ts[-1]) == 2 # ts + call assert sys.getrefcount(ts[0]) == 2 # ts + call - print >> sys.stderr, "Dependent Tasks: evaluated %i items of %i dependent in %f s ( %i items / s )" % (ni, aic, elapsed, ni / elapsed) + print("Dependent Tasks: evaluated %i items of %i dependent in %f s ( %i items / s )" % (ni, aic, elapsed, ni / elapsed), file=sys.stderr) # read(1) ######### ts, rcs = make_task() st = time.time() - for i in xrange(ni): + for i in range(ni): items = rcs[-1].read(1) assert len(items) == 1 # END for each item to pull elapsed_single = time.time() - st # another read yields nothing, its empty assert len(rcs[-1].read()) == 0 - print >> sys.stderr, "Dependent Tasks: evaluated %i items with read(1) of %i dependent in %f s ( %i items / s )" % (ni, aic, elapsed_single, ni / elapsed_single) + print("Dependent Tasks: evaluated %i items with read(1) of %i dependent in %f s ( %i items / s )" % (ni, aic, elapsed_single, ni / elapsed_single), file=sys.stderr) # read with min-count size @@ -270,14 +270,14 @@ def _assert_async_dependent_tasks(self, pool): nri = ni / 4 ts[-1].min_count = nri st = time.time() - for i in xrange(ni): + for i in range(ni): items = rcs[-1].read(1) assert len(items) == 1 # END for each item to read elapsed_minsize = time.time() - st # its empty assert len(rcs[-1].read()) == 0 - print >> sys.stderr, "Dependent Tasks: evaluated %i items with read(1), min_size=%i, of %i dependent in %f s ( %i items / s )" % (ni, nri, aic, elapsed_minsize, ni / elapsed_minsize) + print("Dependent Tasks: evaluated %i items with read(1), min_size=%i, of %i dependent in %f s ( %i items / s )" % (ni, nri, aic, elapsed_minsize, ni / elapsed_minsize), file=sys.stderr) # it should have been a bit faster at least, and most of the time it is # Sometimes, its not, mainly because: @@ -314,13 +314,13 @@ def _assert_async_dependent_tasks(self, pool): assert p2.num_tasks() == len(p2ts)-1 # first is None # reading from the last one will evaluate all pools correctly - print "read(0) multi-pool" + print("read(0) multi-pool") st = time.time() items = p2rcs[-1].read() elapsed = time.time() - st assert len(items) == ni - print >> sys.stderr, "Dependent Tasks: evaluated 2 connected pools and %i items with read(0), of %i dependent tasks in %f s ( %i items / s )" % (ni, aic + aic-1, elapsed, ni / elapsed) + print("Dependent Tasks: evaluated 2 connected pools and %i items with read(0), of %i dependent tasks in %f s ( %i items / s )" % (ni, aic + aic-1, elapsed, ni / elapsed), file=sys.stderr) # loose the handles of the second pool to allow others to go as well @@ -335,17 +335,17 @@ def _assert_async_dependent_tasks(self, pool): assert p2.num_tasks() == len(p2ts) - 1 # Test multi-read(1) - print "read(1) * %i" % ni + print("read(1) * %i" % ni) reader = rcs[-1] st = time.time() - for i in xrange(ni): + for i in range(ni): items = reader.read(1) assert len(items) == 1 # END for each item to get elapsed = time.time() - st del(reader) # decrement refcount - print >> sys.stderr, "Dependent Tasks: evaluated 2 connected pools and %i items with read(1), of %i dependent tasks in %f s ( %i items / s )" % (ni, aic + aic-1, elapsed, ni / elapsed) + print("Dependent Tasks: evaluated 2 connected pools and %i items with read(1), of %i dependent tasks in %f s ( %i items / s )" % (ni, aic + aic-1, elapsed, ni / elapsed), file=sys.stderr) # another read is empty assert len(rcs[-1].read()) == 0 @@ -482,5 +482,5 @@ def test_base(self): ########################### self._assert_async_dependent_tasks(p) - print >> sys.stderr, "Done with everything" + print("Done with everything", file=sys.stderr) diff --git a/async/test/test_task.py b/async/test/test_task.py index d2e3cd0..e587c88 100644 --- a/async/test/test_task.py +++ b/async/test/test_task.py @@ -3,7 +3,7 @@ # This module is part of async and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Channel testing""" -from lib import * +from .lib import * from async.util import * from async.task import * diff --git a/async/test/test_thread.py b/async/test/test_thread.py index 737c207..e5e5158 100644 --- a/async/test/test_thread.py +++ b/async/test/test_thread.py @@ -4,9 +4,9 @@ # the New BSD License: http://www.opensource.org/licenses/bsd-license.php # -*- coding: utf-8 -*- """ Test thead classes and functions""" -from lib import * +from .lib import * from async.thread import * -from Queue import Queue +from queue import Queue import time class TestWorker(WorkerThread): diff --git a/async/thread.py b/async/thread.py index 9150f9a..7aacf6d 100644 --- a/async/thread.py +++ b/async/thread.py @@ -7,7 +7,7 @@ __docformat__ = "restructuredtext" import threading import inspect -import Queue +import queue import sys @@ -140,7 +140,7 @@ def __init__(self, inq = None): super(WorkerThread, self).__init__() self.inq = inq if inq is None: - self.inq = Queue.Queue() + self.inq = queue.Queue() @classmethod def stop(cls, *args): @@ -171,7 +171,7 @@ def run(self): try: rval = None if inspect.ismethod(routine): - if routine.im_self is None: + if routine.__self__ is None: rval = routine(self, arg) else: rval = routine(arg) @@ -190,7 +190,7 @@ def run(self): del(tasktuple) except StopProcessing: break - except Exception,e: + except Exception as e: sys.stderr.write("%s: Task %s raised unhandled exception: %s - this really shouldn't happen !\n" % (self.getName(), str(tasktuple), str(e))) continue # just continue # END routine exception handling diff --git a/async/util.py b/async/util.py index e671659..16955ff 100644 --- a/async/util.py +++ b/async/util.py @@ -11,7 +11,7 @@ _time, ) -from Queue import ( +from queue import ( Empty, ) diff --git a/setup.py b/setup.py index 2aa87f8..2fb1dda 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ def run(self): try: build_ext.run(self) except Exception: - print "Ignored failure when building extensions, pure python modules will be used instead" + print("Ignored failure when building extensions, pure python modules will be used instead") # END ignore errors def get_data_files(self):