diff --git a/Lib/multiprocessing/heap.py b/Lib/multiprocessing/heap.py index 1834d0a1eb2..0a25ef05c7f 100644 --- a/Lib/multiprocessing/heap.py +++ b/Lib/multiprocessing/heap.py @@ -101,6 +101,8 @@ class Heap(object): self._stop_to_block = {} self._allocated_blocks = set() self._arenas = [] + # list of pending blocks to free - see free() comment below + self._pending_free_blocks = [] @staticmethod def _roundup(n, alignment): @@ -175,15 +177,39 @@ class Heap(object): return start, stop - def free(self, block): - # free a block returned by malloc() - assert os.getpid() == self._lastpid - self._lock.acquire() - try: + def _free_pending_blocks(self): + # Free all the blocks in the pending list - called with the lock held. + while True: + try: + block = self._pending_free_blocks.pop() + except IndexError: + break self._allocated_blocks.remove(block) self._free(block) - finally: - self._lock.release() + + def free(self, block): + # free a block returned by malloc() + # Since free() can be called asynchronously by the GC, it could happen + # that it's called while self._lock is held: in that case, + # self._lock.acquire() would deadlock (issue #12352). To avoid that, a + # trylock is used instead, and if the lock can't be acquired + # immediately, the block is added to a list of blocks to be freed + # synchronously sometimes later from malloc() or free(), by calling + # _free_pending_blocks() (appending and retrieving from a list is not + # strictly thread-safe but under cPython it's atomic thanks to the GIL). + assert os.getpid() == self._lastpid + if not self._lock.acquire(False): + # can't acquire the lock right now, add the block to the list of + # pending blocks to free + self._pending_free_blocks.append(block) + else: + # we hold the lock + try: + self._free_pending_blocks() + self._allocated_blocks.remove(block) + self._free(block) + finally: + self._lock.release() def malloc(self, size): # return a block of right size (possibly rounded up) @@ -191,6 +217,7 @@ class Heap(object): if os.getpid() != self._lastpid: self.__init__() # reinitialize after fork self._lock.acquire() + self._free_pending_blocks() try: size = self._roundup(max(size,1), self._alignment) (arena, start, stop) = self._malloc(size) diff --git a/Lib/test/test_multiprocessing.py b/Lib/test/test_multiprocessing.py index 3e467d5ea0b..e9d03293bf7 100644 --- a/Lib/test/test_multiprocessing.py +++ b/Lib/test/test_multiprocessing.py @@ -1580,6 +1580,8 @@ class _TestHeap(BaseTestCase): # verify the state of the heap all = [] occupied = 0 + heap._lock.acquire() + self.addCleanup(heap._lock.release) for L in list(heap._len_to_seq.values()): for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, @@ -1597,6 +1599,28 @@ class _TestHeap(BaseTestCase): self.assertTrue((arena != narena and nstart == 0) or (stop == nstart)) + def test_free_from_gc(self): + # Check that freeing of blocks by the garbage collector doesn't deadlock + # (issue #12352). + # Make sure the GC is enabled, and set lower collection thresholds to + # make collections more frequent (and increase the probability of + # deadlock). + if not gc.isenabled(): + gc.enable() + self.addCleanup(gc.disable) + thresholds = gc.get_threshold() + self.addCleanup(gc.set_threshold, *thresholds) + gc.set_threshold(10) + + # perform numerous block allocations, with cyclic references to make + # sure objects are collected asynchronously by the gc + for i in range(5000): + a = multiprocessing.heap.BufferWrapper(1) + b = multiprocessing.heap.BufferWrapper(1) + # circular references + a.buddy = b + b.buddy = a + # # # diff --git a/Misc/NEWS b/Misc/NEWS index e68a4e3126b..1e98768a764 100644 --- a/Misc/NEWS +++ b/Misc/NEWS @@ -107,6 +107,9 @@ Core and Builtins Library ------- +- Issue #12352: Fix a deadlock in multiprocessing.Heap when a block is freed by + the garbage collector while the Heap lock is held. + - Issue #985064: Make plistlib more resilient to faulty input plists. Patch by Mher Movsisyan.