bsddb code updated to version 4.7.3pre2. This code is the same than

Python 2.6 one, since the intention is to keep an unified 2.x/3.x
codebase.

The Python code is automatically translated using "2to3". Please, do not
update this code in Python 3.0 by hand. Update the 2.6 one and then do
"2to3".
This commit is contained in:
Jesus Cea 2008-08-31 14:12:11 +00:00
parent 73c96dbf34
commit 6ba3329c27
33 changed files with 5396 additions and 2692 deletions

View File

@ -33,18 +33,25 @@
#----------------------------------------------------------------------
"""Support for BerkeleyDB 3.3 through 4.4 with a simple interface.
"""Support for Berkeley DB 4.0 through 4.7 with a simple interface.
For the full featured object oriented interface use the bsddb.db module
instead. It mirrors the Sleepycat BerkeleyDB C API.
instead. It mirrors the Oracle Berkeley DB C API.
"""
import sys
absolute_import = (sys.version_info[0] >= 3)
try:
if __name__ == 'bsddb3':
# import _pybsddb binary as it should be the more recent version from
# a standalone pybsddb addon package than the version included with
# python as bsddb._bsddb.
import _pybsddb
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import _pybsddb")
else :
import _pybsddb
_bsddb = _pybsddb
from bsddb3.dbutils import DeadlockWrap as _DeadlockWrap
else:
@ -64,10 +71,18 @@ error = db.DBError # So bsddb.error will mean something...
#----------------------------------------------------------------------
import sys, os, collections
import sys, os
from weakref import ref
class _iter_mixin(collections.MutableMapping):
if sys.version_info[0:2] <= (2, 5) :
import UserDict
MutableMapping = UserDict.DictMixin
else :
import collections
MutableMapping = collections.MutableMapping
class _iter_mixin(MutableMapping):
def _make_iter_cursor(self):
cur = _DeadlockWrap(self.db.cursor)
key = id(cur)
@ -81,64 +96,89 @@ class _iter_mixin(collections.MutableMapping):
return lambda ref: self._cursor_refs.pop(key, None)
def __iter__(self):
self._kill_iteration = False
self._in_iter += 1
try:
cur = self._make_iter_cursor()
try:
cur = self._make_iter_cursor()
# FIXME-20031102-greg: race condition. cursor could
# be closed by another thread before this call.
# FIXME-20031102-greg: race condition. cursor could
# be closed by another thread before this call.
# since we're only returning keys, we call the cursor
# methods with flags=0, dlen=0, dofs=0
key = _DeadlockWrap(cur.first, 0,0,0)[0]
yield key
# since we're only returning keys, we call the cursor
# methods with flags=0, dlen=0, dofs=0
key = _DeadlockWrap(cur.first, 0,0,0)[0]
yield key
next = cur.next
while 1:
try:
key = _DeadlockWrap(next, 0,0,0)[0]
yield key
except _bsddb.DBCursorClosedError:
cur = self._make_iter_cursor()
# FIXME-20031101-greg: race condition. cursor could
# be closed by another thread before this call.
_DeadlockWrap(cur.set, key,0,0,0)
next = cur.next
except _bsddb.DBNotFoundError:
return
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
return
next = cur.__next__
while 1:
try:
key = _DeadlockWrap(next, 0,0,0)[0]
yield key
except _bsddb.DBCursorClosedError:
if self._kill_iteration:
raise RuntimeError('Database changed size '
'during iteration.')
cur = self._make_iter_cursor()
# FIXME-20031101-greg: race condition. cursor could
# be closed by another thread before this call.
_DeadlockWrap(cur.set, key,0,0,0)
next = cur.__next__
except _bsddb.DBNotFoundError:
pass
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
pass
# When Python 2.3 not supported in bsddb3, we can change this to "finally"
except :
self._in_iter -= 1
raise
self._in_iter -= 1
def iteritems(self):
if not self.db:
return
self._kill_iteration = False
self._in_iter += 1
try:
cur = self._make_iter_cursor()
try:
cur = self._make_iter_cursor()
# FIXME-20031102-greg: race condition. cursor could
# be closed by another thread before this call.
# FIXME-20031102-greg: race condition. cursor could
# be closed by another thread before this call.
kv = _DeadlockWrap(cur.first)
key = kv[0]
yield kv
kv = _DeadlockWrap(cur.first)
key = kv[0]
yield kv
next = cur.__next__
while 1:
try:
kv = _DeadlockWrap(next)
key = kv[0]
yield kv
except _bsddb.DBCursorClosedError:
if self._kill_iteration:
raise RuntimeError('Database changed size '
'during iteration.')
cur = self._make_iter_cursor()
# FIXME-20031101-greg: race condition. cursor could
# be closed by another thread before this call.
_DeadlockWrap(cur.set, key,0,0,0)
next = cur.__next__
except _bsddb.DBNotFoundError:
pass
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
pass
# When Python 2.3 not supported in bsddb3, we can change this to "finally"
except :
self._in_iter -= 1
raise
self._in_iter -= 1
next = cur.next
while 1:
try:
kv = _DeadlockWrap(next)
key = kv[0]
yield kv
except _bsddb.DBCursorClosedError:
cur = self._make_iter_cursor()
# FIXME-20031101-greg: race condition. cursor could
# be closed by another thread before this call.
_DeadlockWrap(cur.set, key,0,0,0)
next = cur.next
except _bsddb.DBNotFoundError:
return
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
return
class _DBWithCursor(_iter_mixin):
"""
@ -166,13 +206,12 @@ class _DBWithCursor(_iter_mixin):
# a collection of all DBCursor objects currently allocated
# by the _iter_mixin interface.
self._cursor_refs = {}
self._in_iter = 0
self._kill_iteration = False
def __del__(self):
self.close()
def __repr__(self):
return repr(dict(self.iteritems()))
def _checkCursor(self):
if self.dbc is None:
self.dbc = _DeadlockWrap(self.db.cursor)
@ -181,7 +220,7 @@ class _DBWithCursor(_iter_mixin):
self.saved_dbc_key = None
# This method is needed for all non-cursor DB calls to avoid
# BerkeleyDB deadlocks (due to being opened with DB_INIT_LOCK
# Berkeley DB deadlocks (due to being opened with DB_INIT_LOCK
# and DB_THREAD to be thread safe) when intermixing database
# operations that use the cursor internally with those that don't.
def _closeCursors(self, save=1):
@ -195,7 +234,7 @@ class _DBWithCursor(_iter_mixin):
pass
_DeadlockWrap(c.close)
del c
for cref in self._cursor_refs.values():
for cref in list(self._cursor_refs.values()):
c = cref()
if c is not None:
_DeadlockWrap(c.close)
@ -211,6 +250,12 @@ class _DBWithCursor(_iter_mixin):
self._checkOpen()
return _DeadlockWrap(lambda: len(self.db)) # len(self.db)
if sys.version_info[0:2] >= (2, 6) :
def __repr__(self) :
if self.isOpen() :
return repr(dict(_DeadlockWrap(self.db.items)))
return repr(dict())
def __getitem__(self, key):
self._checkOpen()
return _DeadlockWrap(lambda: self.db[key]) # self.db[key]
@ -218,6 +263,8 @@ class _DBWithCursor(_iter_mixin):
def __setitem__(self, key, value):
self._checkOpen()
self._closeCursors()
if self._in_iter and key not in self:
self._kill_iteration = True
def wrapF():
self.db[key] = value
_DeadlockWrap(wrapF) # self.db[key] = value
@ -225,6 +272,8 @@ class _DBWithCursor(_iter_mixin):
def __delitem__(self, key):
self._checkOpen()
self._closeCursors()
if self._in_iter and key in self:
self._kill_iteration = True
def wrapF():
del self.db[key]
_DeadlockWrap(wrapF) # del self.db[key]
@ -248,17 +297,15 @@ class _DBWithCursor(_iter_mixin):
self._checkOpen()
return _DeadlockWrap(self.db.has_key, key)
__contains__ = has_key
def set_location(self, key):
self._checkOpen()
self._checkCursor()
return _DeadlockWrap(self.dbc.set_range, key)
def next(self):
def __next__(self):
self._checkOpen()
self._checkCursor()
rv = _DeadlockWrap(self.dbc.next)
rv = _DeadlockWrap(self.dbc.__next__)
return rv
def previous(self):
@ -287,146 +334,6 @@ class _DBWithCursor(_iter_mixin):
self._checkOpen()
return _DeadlockWrap(self.db.sync)
class _ExposedProperties:
@property
def _cursor_refs(self):
return self.db._cursor_refs
class StringKeys(collections.MutableMapping, _ExposedProperties):
"""Wrapper around DB object that automatically encodes
all keys as UTF-8; the keys must be strings."""
def __init__(self, db):
self.db = db
def __len__(self):
return len(self.db)
def __getitem__(self, key):
return self.db[key.encode("utf-8")]
def __setitem__(self, key, value):
self.db[key.encode("utf-8")] = value
def __delitem__(self, key):
del self.db[key.encode("utf-8")]
def __iter__(self):
for k in self.db:
yield k.decode("utf-8")
def close(self):
self.db.close()
def keys(self):
for k in self.db.keys():
yield k.decode("utf-8")
def has_key(self, key):
return self.db.has_key(key.encode("utf-8"))
__contains__ = has_key
def values(self):
return self.db.values()
def items(self):
for k,v in self.db.items():
yield k.decode("utf-8"), v
def set_location(self, key):
return self.db.set_location(key.encode("utf-8"))
def next(self):
key, value = self.db.next()
return key.decode("utf-8"), value
def previous(self):
key, value = self.db.previous()
return key.decode("utf-8"), value
def first(self):
key, value = self.db.first()
return key.decode("utf-8"), value
def last(self):
key, value = self.db.last()
return key.decode("utf-8"), value
def set_location(self, key):
key, value = self.db.set_location(key.encode("utf-8"))
return key.decode("utf-8"), value
def sync(self):
return self.db.sync()
class StringValues(collections.MutableMapping, _ExposedProperties):
"""Wrapper around DB object that automatically encodes
and decodes all values as UTF-8; input values must be strings."""
def __init__(self, db):
self.db = db
def __len__(self):
return len(self.db)
def __getitem__(self, key):
return self.db[key].decode("utf-8")
def __setitem__(self, key, value):
self.db[key] = value.encode("utf-8")
def __delitem__(self, key):
del self.db[key]
def __iter__(self):
return iter(self.db)
def close(self):
self.db.close()
def keys(self):
return self.db.keys()
def has_key(self, key):
return self.db.has_key(key)
__contains__ = has_key
def values(self):
for v in self.db.values():
yield v.decode("utf-8")
def items(self):
for k,v in self.db.items():
yield k, v.decode("utf-8")
def set_location(self, key):
return self.db.set_location(key)
def next(self):
key, value = self.db.next()
return key, value.decode("utf-8")
def previous(self):
key, value = self.db.previous()
return key, value.decode("utf-8")
def first(self):
key, value = self.db.first()
return key, value.decode("utf-8")
def last(self):
key, value = self.db.last()
return key, value.decode("utf-8")
def set_location(self, key):
key, value = self.db.set_location(key)
return key, value.decode("utf-8")
def sync(self):
return self.db.sync()
#----------------------------------------------------------------------
# Compatibility object factory functions
@ -507,12 +414,12 @@ def _checkflag(flag, file):
elif flag == 'n':
flags = db.DB_CREATE
#flags = db.DB_CREATE | db.DB_TRUNCATE
# we used db.DB_TRUNCATE flag for this before but BerkeleyDB
# we used db.DB_TRUNCATE flag for this before but Berkeley DB
# 4.2.52 changed to disallowed truncate with txn environments.
if file is not None and os.path.isfile(file):
os.unlink(file)
else:
raise error("flags should be one of 'r', 'w', 'c' or 'n', not "+repr(flag))
raise error("flags should be one of 'r', 'w', 'c' or 'n'")
return flags | db.DB_THREAD
#----------------------------------------------------------------------
@ -520,16 +427,14 @@ def _checkflag(flag, file):
# This is a silly little hack that allows apps to continue to use the
# DB_THREAD flag even on systems without threads without freaking out
# BerkeleyDB.
# Berkeley DB.
#
# This assumes that if Python was built with thread support then
# BerkeleyDB was too.
# Berkeley DB was too.
try:
import _thread
del _thread
if db.version() < (3, 3, 0):
db.DB_THREAD = 0
except ImportError:
db.DB_THREAD = 0

View File

@ -37,15 +37,24 @@
# case we ever want to augment the stuff in _db in any way. For now
# it just simply imports everything from _db.
if __name__.startswith('bsddb3.'):
# import _pybsddb binary as it should be the more recent version from
# a standalone pybsddb addon package than the version included with
# python as bsddb._bsddb.
from _pybsddb import *
from _pybsddb import __version__
else:
from _bsddb import *
from _bsddb import __version__
import sys
absolute_import = (sys.version_info[0] >= 3)
if version() < (3, 2, 0):
raise ImportError("correct BerkeleyDB symbols not found. Perhaps python was statically linked with an older version?")
if not absolute_import :
if __name__.startswith('bsddb3.') :
# import _pybsddb binary as it should be the more recent version from
# a standalone pybsddb addon package than the version included with
# python as bsddb._bsddb.
from _pybsddb import *
from _pybsddb import __version__
else:
from _bsddb import *
from _bsddb import __version__
else :
# Because this syntaxis is not valid before Python 2.5
if __name__.startswith('bsddb3.') :
exec("from ._pybsddb import *")
exec("from ._pybsddb import __version__")
else :
exec("from ._bsddb import *")
exec("from ._bsddb import __version__")

View File

@ -21,12 +21,24 @@
# added to _bsddb.c.
#
from . import db
import sys
absolute_import = (sys.version_info[0] >= 3)
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import db")
else :
from . import db
try:
from collections import MutableMapping
except ImportError:
class MutableMapping: pass
if sys.version_info[0:2] <= (2, 5) :
try:
from UserDict import DictMixin
except ImportError:
# DictMixin is new in Python 2.3
class DictMixin: pass
MutableMapping = DictMixin
else :
import collections
MutableMapping = collections.MutableMapping
class DBEnv:
def __init__(self, *args, **kwargs):
@ -95,9 +107,8 @@ class DBEnv:
def set_get_returns_none(self, *args, **kwargs):
return self._cobj.set_get_returns_none(*args, **kwargs)
if db.version() >= (4,0):
def log_stat(self, *args, **kwargs):
return self._cobj.log_stat(*args, **kwargs)
def log_stat(self, *args, **kwargs):
return self._cobj.log_stat(*args, **kwargs)
if db.version() >= (4,1):
def dbremove(self, *args, **kwargs):
@ -115,7 +126,7 @@ class DBEnv:
class DB(MutableMapping):
def __init__(self, dbenv, *args, **kwargs):
# give it the proper DBEnv C object that its expecting
self._cobj = db.DB(dbenv._cobj, *args, **kwargs)
self._cobj = db.DB(*(dbenv._cobj,) + args, **kwargs)
# TODO are there other dict methods that need to be overridden?
def __len__(self):
@ -126,8 +137,10 @@ class DB(MutableMapping):
self._cobj[key] = value
def __delitem__(self, arg):
del self._cobj[arg]
def __iter__(self):
return iter(self.keys())
if sys.version_info[0:2] >= (2, 6) :
def __iter__(self) :
return self._cobj.__iter__()
def append(self, *args, **kwargs):
return self._cobj.append(*args, **kwargs)
@ -163,8 +176,6 @@ class DB(MutableMapping):
return self._cobj.key_range(*args, **kwargs)
def has_key(self, *args, **kwargs):
return self._cobj.has_key(*args, **kwargs)
def __contains__(self, key):
return self._cobj.has_key(key)
def items(self, *args, **kwargs):
return self._cobj.items(*args, **kwargs)
def keys(self, *args, **kwargs):

View File

@ -29,6 +29,7 @@ From:
"""
import errno
import string
class DBRecIO:
def __init__(self, db, key, txn=None):
@ -38,6 +39,7 @@ class DBRecIO:
self.len = None
self.pos = 0
self.closed = 0
self.softspace = 0
def close(self):
if not self.closed:
@ -82,9 +84,9 @@ class DBRecIO:
if self.closed:
raise ValueError, "I/O operation on closed file"
if self.buflist:
self.buf = self.buf + ''.join(self.buflist)
self.buf = self.buf + string.joinfields(self.buflist, '')
self.buflist = []
i = self.buf.find('\n', self.pos)
i = string.find(self.buf, '\n', self.pos)
if i < 0:
newpos = self.len
else:
@ -133,7 +135,7 @@ class DBRecIO:
self.pos = newpos
def writelines(self, list):
self.write(''.join(list))
self.write(string.joinfields(list, ''))
def flush(self):
if self.closed:
@ -158,14 +160,14 @@ def _test():
if f.getvalue() != text:
raise RuntimeError, 'write failed'
length = f.tell()
print('File length =', length)
print 'File length =', length
f.seek(len(lines[0]))
f.write(lines[1])
f.seek(0)
print('First line =', repr(f.readline()))
print 'First line =', repr(f.readline())
here = f.tell()
line = f.readline()
print('Second line =', repr(line))
print 'Second line =', repr(line)
f.seek(-len(line), 1)
line2 = f.read(len(line))
if line != line2:
@ -177,8 +179,8 @@ def _test():
line2 = f.read()
if line != line2:
raise RuntimeError, 'bad result after seek back from EOF'
print('Read', len(list), 'more lines')
print('File length =', f.tell())
print 'Read', len(list), 'more lines'
print 'File length =', f.tell()
if f.tell() != length:
raise RuntimeError, 'bad length'
f.close()

View File

@ -32,21 +32,43 @@ storage.
import pickle
import sys
import sys
absolute_import = (sys.version_info[0] >= 3)
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import db")
else :
from . import db
#At version 2.3 cPickle switched to using protocol instead of bin
if sys.version_info[:3] >= (2, 3, 0):
HIGHEST_PROTOCOL = pickle.HIGHEST_PROTOCOL
def _dumps(object, protocol):
return pickle.dumps(object, protocol=protocol)
from collections import MutableMapping
# In python 2.3.*, "cPickle.dumps" accepts no
# named parameters. "pickle.dumps" accepts them,
# so this seems a bug.
if sys.version_info[:3] < (2, 4, 0):
def _dumps(object, protocol):
return pickle.dumps(object, protocol)
else :
def _dumps(object, protocol):
return pickle.dumps(object, protocol=protocol)
else:
HIGHEST_PROTOCOL = None
def _dumps(object, protocol):
return pickle.dumps(object, bin=protocol)
class MutableMapping: pass
from . import db
_unspecified = object()
if sys.version_info[0:2] <= (2, 5) :
try:
from UserDict import DictMixin
except ImportError:
# DictMixin is new in Python 2.3
class DictMixin: pass
MutableMapping = DictMixin
else :
import collections
MutableMapping = collections.MutableMapping
#------------------------------------------------------------------------
@ -135,13 +157,15 @@ class DBShelf(MutableMapping):
def keys(self, txn=None):
if txn is not None:
if txn != None:
return self.db.keys(txn)
else:
return self.db.keys()
return list(self.db.keys())
if sys.version_info[0:2] >= (2, 6) :
def __iter__(self) :
return self.db.__iter__()
def __iter__(self):
return iter(self.keys())
def open(self, *args, **kwargs):
self.db.open(*args, **kwargs)
@ -157,14 +181,14 @@ class DBShelf(MutableMapping):
if self._closed:
return '<DBShelf @ 0x%x - closed>' % (id(self))
else:
return repr(dict(self.iteritems()))
return repr(dict(iter(self.items())))
def items(self, txn=None):
if txn is not None:
if txn != None:
items = self.db.items(txn)
else:
items = self.db.items()
items = list(self.db.items())
newitems = []
for k, v in items:
@ -172,12 +196,12 @@ class DBShelf(MutableMapping):
return newitems
def values(self, txn=None):
if txn is not None:
if txn != None:
values = self.db.values(txn)
else:
values = self.db.values()
values = list(self.db.values())
return map(pickle.loads, values)
return list(map(pickle.loads, values))
#-----------------------------------
# Other methods
@ -194,24 +218,28 @@ class DBShelf(MutableMapping):
def associate(self, secondaryDB, callback, flags=0):
def _shelf_callback(priKey, priData, realCallback=callback):
data = pickle.loads(priData)
# Safe in Python 2.x because expresion short circuit
if sys.version_info[0] < 3 or isinstance(priData, bytes) :
data = pickle.loads(priData)
else :
data = pickle.loads(bytes(priData, "iso8859-1")) # 8 bits
return realCallback(priKey, data)
return self.db.associate(secondaryDB, _shelf_callback, flags)
def get(self, key, default=_unspecified, txn=None, flags=0):
# If no default is given, we must not pass one to the
# extension module, so that an exception can be raised if
# set_get_returns_none is turned off.
if default is _unspecified:
data = self.db.get(key, txn=txn, flags=flags)
# if this returns, the default value would be None
default = None
else:
data = self.db.get(key, default, txn=txn, flags=flags)
if data is default:
return data
return pickle.loads(data)
#def get(self, key, default=None, txn=None, flags=0):
def get(self, *args, **kw):
# We do it with *args and **kw so if the default value wasn't
# given nothing is passed to the extension module. That way
# an exception can be raised if set_get_returns_none is turned
# off.
data = self.db.get(*args, **kw)
try:
return pickle.loads(data)
except (EOFError, TypeError, pickle.UnpicklingError):
return data # we may be getting the default value, or None,
# so it doesn't need unpickled.
def get_both(self, key, value, txn=None, flags=0):
data = _dumps(value, self.protocol)
@ -234,10 +262,6 @@ class DBShelf(MutableMapping):
raise NotImplementedError
def __contains__(self, key):
return self.db.has_key(key)
#----------------------------------------------
# Methods allowed to pass-through to self.db
#
@ -331,7 +355,11 @@ class DBShelfCursor:
return None
else:
key, data = rec
return key, pickle.loads(data)
# Safe in Python 2.x because expresion short circuit
if sys.version_info[0] < 3 or isinstance(data, bytes) :
return key, pickle.loads(data)
else :
return key, pickle.loads(bytes(data, "iso8859-1")) # 8 bits
#----------------------------------------------
# Methods allowed to pass-through to self.dbc

View File

@ -13,30 +13,29 @@
# -- Gregory P. Smith <greg@krypto.org>
# This provides a simple database table interface built on top of
# the Python BerkeleyDB 3 interface.
# the Python Berkeley DB 3 interface.
#
_cvsid = '$Id$'
import re
import sys
import copy
import struct
import random
import pickle
import struct
import pickle as pickle
from bsddb.db import *
# All table names, row names etc. must be ASCII strings
# However, rowids, when represented as strings, are latin-1 encoded
def _E(s):
return s.encode("ascii")
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db
except ImportError:
# For Python 2.3
from bsddb import db
# XXX(nnorwitz): is this correct? DBIncompleteError is conditional in _bsddb.c
try:
DBIncompleteError
except NameError:
if not hasattr(db,"DBIncompleteError") :
class DBIncompleteError(Exception):
pass
db.DBIncompleteError = DBIncompleteError
class TableDBError(Exception):
pass
@ -51,22 +50,22 @@ class Cond:
class ExactCond(Cond):
"""Acts as an exact match condition function"""
def __init__(self, strtomatch, encoding="utf-8"):
self.strtomatch = strtomatch.encode(encoding)
def __init__(self, strtomatch):
self.strtomatch = strtomatch
def __call__(self, s):
return s == self.strtomatch
class PrefixCond(Cond):
"""Acts as a condition function for matching a string prefix"""
def __init__(self, prefix, encoding="utf-8"):
self.prefix = prefix.encode(encoding)
def __init__(self, prefix):
self.prefix = prefix
def __call__(self, s):
return s[:len(self.prefix)] == self.prefix
class PostfixCond(Cond):
"""Acts as a condition function for matching a string postfix"""
def __init__(self, postfix, encoding="utf-8"):
self.postfix = postfix.encode(encoding)
def __init__(self, postfix):
self.postfix = postfix
def __call__(self, s):
return s[-len(self.postfix):] == self.postfix
@ -76,7 +75,7 @@ class LikeCond(Cond):
string. Case insensitive and % signs are wild cards.
This isn't perfect but it should work for the simple common cases.
"""
def __init__(self, likestr, re_flags=re.IGNORECASE, encoding="utf-8"):
def __init__(self, likestr, re_flags=re.IGNORECASE):
# escape python re characters
chars_to_escape = '.*+()[]?'
for char in chars_to_escape :
@ -84,18 +83,8 @@ class LikeCond(Cond):
# convert %s to wildcards
self.likestr = likestr.replace('%', '.*')
self.re = re.compile('^'+self.likestr+'$', re_flags)
self.encoding = encoding
def __call__(self, s):
return self.re.match(s.decode(self.encoding))
def CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
return self.re.match(s)
#
# keys used to store database metadata
@ -104,7 +93,7 @@ _table_names_key = '__TABLE_NAMES__' # list of the tables in this db
_columns = '._COLUMNS__' # table_name+this key contains a list of columns
def _columns_key(table):
return _E(table + _columns)
return table + _columns
#
# these keys are found within table sub databases
@ -114,20 +103,21 @@ _rowid = '._ROWID_.' # this+rowid+this key contains a unique entry for each
# row in the table. (no data is stored)
_rowid_str_len = 8 # length in bytes of the unique rowid strings
def _data_key(table, col, rowid):
return _E(table + _data + col + _data) + rowid
return table + _data + col + _data + rowid
def _search_col_data_key(table, col):
return _E(table + _data + col + _data)
return table + _data + col + _data
def _search_all_data_key(table):
return _E(table + _data)
return table + _data
def _rowid_key(table, rowid):
return _E(table + _rowid) + rowid + _E(_rowid)
return table + _rowid + rowid + _rowid
def _search_rowid_key(table):
return _E(table + _rowid)
return table + _rowid
def contains_metastrings(s) :
"""Verify that the given string does not contain any
@ -146,43 +136,110 @@ def contains_metastrings(s) :
class bsdTableDB :
def __init__(self, filename, dbhome, create=0, truncate=0, mode=0o600,
recover=0, dbflags=0):
"""bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0o600)
"""bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
Open database name in the dbhome BerkeleyDB directory.
Open database name in the dbhome Berkeley DB directory.
Use keyword arguments when calling this constructor.
"""
self.db = None
myflags = DB_THREAD
myflags = db.DB_THREAD
if create:
myflags |= DB_CREATE
flagsforenv = (DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG |
DB_INIT_TXN | dbflags)
myflags |= db.DB_CREATE
flagsforenv = (db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG |
db.DB_INIT_TXN | dbflags)
# DB_AUTO_COMMIT isn't a valid flag for env.open()
try:
dbflags |= DB_AUTO_COMMIT
dbflags |= db.DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv = flagsforenv | DB_RECOVER
self.env = DBEnv()
flagsforenv = flagsforenv | db.DB_RECOVER
self.env = db.DBEnv()
# enable auto deadlock avoidance
self.env.set_lk_detect(DB_LOCK_DEFAULT)
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
self.env.open(dbhome, myflags | flagsforenv)
if truncate:
myflags |= DB_TRUNCATE
self.db = DB(self.env)
myflags |= db.DB_TRUNCATE
self.db = db.DB(self.env)
# this code relies on DBCursor.set* methods to raise exceptions
# rather than returning None
self.db.set_get_returns_none(1)
# allow duplicate entries [warning: be careful w/ metadata]
self.db.set_flags(DB_DUP)
self.db.open(filename, DB_BTREE, dbflags | myflags, mode)
self.db.set_flags(db.DB_DUP)
self.db.open(filename, db.DB_BTREE, dbflags | myflags, mode)
self.dbfilename = filename
if sys.version_info[0] >= 3 :
class cursor_py3k(object) :
def __init__(self, dbcursor) :
self._dbcursor = dbcursor
def close(self) :
return self._dbcursor.close()
def set_range(self, search) :
v = self._dbcursor.set_range(bytes(search, "iso8859-1"))
if v != None :
v = (v[0].decode("iso8859-1"),
v[1].decode("iso8859-1"))
return v
def __next__(self) :
v = getattr(self._dbcursor, "next")()
if v != None :
v = (v[0].decode("iso8859-1"),
v[1].decode("iso8859-1"))
return v
class db_py3k(object) :
def __init__(self, db) :
self._db = db
def cursor(self, txn=None) :
return cursor_py3k(self._db.cursor(txn=txn))
def has_key(self, key, txn=None) :
return getattr(self._db,"has_key")(bytes(key, "iso8859-1"),
txn=txn)
def put(self, key, value, flags=0, txn=None) :
key = bytes(key, "iso8859-1")
if value != None :
value = bytes(value, "iso8859-1")
return self._db.put(key, value, flags=flags, txn=txn)
def put_bytes(self, key, value, txn=None) :
key = bytes(key, "iso8859-1")
return self._db.put(key, value, txn=txn)
def get(self, key, txn=None, flags=0) :
key = bytes(key, "iso8859-1")
v = self._db.get(key, txn=txn, flags=flags)
if v != None :
v = v.decode("iso8859-1")
return v
def get_bytes(self, key, txn=None, flags=0) :
key = bytes(key, "iso8859-1")
return self._db.get(key, txn=txn, flags=flags)
def delete(self, key, txn=None) :
key = bytes(key, "iso8859-1")
return self._db.delete(key, txn=txn)
def close (self) :
return self._db.close()
self.db = db_py3k(self.db)
else : # Python 2.x
pass
# Initialize the table names list if this is a new database
txn = self.env.txn_begin()
try:
if not self.db.has_key(_E(_table_names_key), txn):
self.db.put(_E(_table_names_key), pickle.dumps([], 1), txn=txn)
if not getattr(self.db, "has_key")(_table_names_key, txn):
getattr(self.db, "put_bytes", self.db.put) \
(_table_names_key, pickle.dumps([], 1), txn=txn)
# Yes, bare except
except:
txn.abort()
@ -206,13 +263,13 @@ class bsdTableDB :
def checkpoint(self, mins=0):
try:
self.env.txn_checkpoint(mins)
except DBIncompleteError:
except db.DBIncompleteError:
pass
def sync(self):
try:
self.db.sync()
except DBIncompleteError:
except db.DBIncompleteError:
pass
def _db_print(self) :
@ -223,13 +280,13 @@ class bsdTableDB :
key, data = cur.first()
while 1:
print(repr({key: data}))
next = cur.next()
next = next(cur)
if next:
key, data = next
else:
cur.close()
return
except DBNotFoundError:
except db.DBNotFoundError:
cur.close()
@ -239,6 +296,7 @@ class bsdTableDB :
raises TableDBError if it already exists or for other DB errors.
"""
assert isinstance(columns, list)
txn = None
try:
# checking sanity of the table and column names here on
@ -252,29 +310,33 @@ class bsdTableDB :
"bad column name: contains reserved metastrings")
columnlist_key = _columns_key(table)
if self.db.has_key(columnlist_key):
if getattr(self.db, "has_key")(columnlist_key):
raise TableAlreadyExists("table already exists")
txn = self.env.txn_begin()
# store the table's column info
self.db.put(columnlist_key, pickle.dumps(columns, 1), txn=txn)
getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
pickle.dumps(columns, 1), txn=txn)
# add the table name to the tablelist
tablelist = pickle.loads(self.db.get(_E(_table_names_key), txn=txn,
flags=DB_RMW))
tablelist = pickle.loads(getattr(self.db, "get_bytes",
self.db.get) (_table_names_key, txn=txn, flags=db.DB_RMW))
tablelist.append(table)
# delete 1st, in case we opened with DB_DUP
self.db.delete(_E(_table_names_key), txn=txn)
self.db.put(_E(_table_names_key), pickle.dumps(tablelist, 1), txn=txn)
self.db.delete(_table_names_key, txn=txn)
getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
txn = None
except DBError as dberror:
raise TableDBError(dberror.args[1])
finally:
except db.DBError as dberror:
if txn:
txn.abort()
txn = None
if sys.version_info[0] < 3 :
raise TableDBError(dberror[1])
else :
raise TableDBError(dberror.args[1])
def ListTableColumns(self, table):
"""Return a list of columns in the given table.
@ -285,9 +347,10 @@ class bsdTableDB :
raise ValueError("bad table name: contains reserved metastrings")
columnlist_key = _columns_key(table)
if not self.db.has_key(columnlist_key):
if not getattr(self.db, "has_key")(columnlist_key):
return []
pickledcolumnlist = self.db.get(columnlist_key)
pickledcolumnlist = getattr(self.db, "get_bytes",
self.db.get)(columnlist_key)
if pickledcolumnlist:
return pickle.loads(pickledcolumnlist)
else:
@ -295,7 +358,7 @@ class bsdTableDB :
def ListTables(self):
"""Return a list of tables in this database."""
pickledtablelist = self.db.get(_E(_table_names_key))
pickledtablelist = self.db.get_get(_table_names_key)
if pickledtablelist:
return pickle.loads(pickledtablelist)
else:
@ -311,6 +374,7 @@ class bsdTableDB :
all of its current columns.
"""
assert isinstance(columns, list)
try:
self.CreateTable(table, columns)
except TableAlreadyExists:
@ -322,7 +386,8 @@ class bsdTableDB :
# load the current column list
oldcolumnlist = pickle.loads(
self.db.get(columnlist_key, txn=txn, flags=DB_RMW))
getattr(self.db, "get_bytes",
self.db.get)(columnlist_key, txn=txn, flags=db.DB_RMW))
# create a hash table for fast lookups of column names in the
# loop below
oldcolumnhash = {}
@ -340,7 +405,7 @@ class bsdTableDB :
if newcolumnlist != oldcolumnlist :
# delete the old one first since we opened with DB_DUP
self.db.delete(columnlist_key, txn=txn)
self.db.put(columnlist_key,
getattr(self.db, "put_bytes", self.db.put)(columnlist_key,
pickle.dumps(newcolumnlist, 1),
txn=txn)
@ -348,19 +413,22 @@ class bsdTableDB :
txn = None
self.__load_column_info(table)
except DBError as dberror:
raise TableDBError(dberror.args[1])
finally:
except db.DBError as dberror:
if txn:
txn.abort()
if sys.version_info[0] < 3 :
raise TableDBError(dberror[1])
else :
raise TableDBError(dberror.args[1])
def __load_column_info(self, table) :
"""initialize the self.__tablecolumns dict"""
# check the column names
try:
tcolpickles = self.db.get(_columns_key(table))
except DBNotFoundError:
tcolpickles = getattr(self.db, "get_bytes",
self.db.get)(_columns_key(table))
except db.DBNotFoundError:
raise TableDBError("unknown table: %r" % (table,))
if not tcolpickles:
raise TableDBError("unknown table: %r" % (table,))
@ -376,13 +444,16 @@ class bsdTableDB :
blist = []
for x in range(_rowid_str_len):
blist.append(random.randint(0,255))
newid = bytes(blist)
newid = struct.pack('B'*_rowid_str_len, *blist)
if sys.version_info[0] >= 3 :
newid = newid.decode("iso8859-1") # 8 bits
# Guarantee uniqueness by adding this key to the database
try:
self.db.put(_rowid_key(table, newid), None, txn=txn,
flags=DB_NOOVERWRITE)
except DBKeyExistError:
flags=db.DB_NOOVERWRITE)
except db.DBKeyExistError:
pass
else:
unique = 1
@ -394,15 +465,16 @@ class bsdTableDB :
"""Insert(table, datadict) - Insert a new row into the table
using the keys+values from rowdict as the column values.
"""
txn = None
try:
if not self.db.has_key(_columns_key(table)):
if not getattr(self.db, "has_key")(_columns_key(table)):
raise TableDBError("unknown table")
# check the validity of each column name
if table not in self.__tablecolumns:
self.__load_column_info(table)
for column in rowdict.keys() :
for column in list(rowdict.keys()) :
if not self.__tablecolumns[table].count(column):
raise TableDBError("unknown column: %r" % (column,))
@ -411,14 +483,14 @@ class bsdTableDB :
rowid = self.__new_rowid(table, txn=txn)
# insert the row values into the table database
for column, dataitem in rowdict.items():
for column, dataitem in list(rowdict.items()):
# store the value
self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
txn.commit()
txn = None
except DBError as dberror:
except db.DBError as dberror:
# WIBNI we could just abort the txn and re-raise the exception?
# But no, because TableDBError is not related to DBError via
# inheritance, so it would be backwards incompatible. Do the next
@ -427,11 +499,10 @@ class bsdTableDB :
if txn:
txn.abort()
self.db.delete(_rowid_key(table, rowid))
txn = None
raise TableDBError(dberror.args[1]).with_traceback(info[2])
finally:
if txn:
txn.abort()
if sys.version_info[0] < 3 :
raise TableDBError(dberror[1]).with_traceback(info[2])
else :
raise TableDBError(dberror.args[1]).with_traceback(info[2])
def Modify(self, table, conditions={}, mappings={}):
@ -445,13 +516,13 @@ class bsdTableDB :
condition callable expecting the data string as an argument and
returning the new string for that column.
"""
try:
matching_rowids = self.__Select(table, [], conditions)
# modify only requested columns
columns = mappings.keys()
for rowid in matching_rowids.keys():
rowid = rowid.encode("latin-1")
columns = list(mappings.keys())
for rowid in list(matching_rowids.keys()):
txn = None
try:
for column in columns:
@ -464,7 +535,7 @@ class bsdTableDB :
self.db.delete(
_data_key(table, column, rowid),
txn=txn)
except DBNotFoundError:
except db.DBNotFoundError:
# XXXXXXX row key somehow didn't exist, assume no
# error
dataitem = None
@ -477,12 +548,16 @@ class bsdTableDB :
txn = None
# catch all exceptions here since we call unknown callables
finally:
except:
if txn:
txn.abort()
raise
except DBError as dberror:
raise TableDBError(dberror.args[1])
except db.DBError as dberror:
if sys.version_info[0] < 3 :
raise TableDBError(dberror[1])
else :
raise TableDBError(dberror.args[1])
def Delete(self, table, conditions={}):
"""Delete(table, conditions) - Delete items matching the given
@ -492,37 +567,41 @@ class bsdTableDB :
condition functions expecting the data string as an
argument and returning a boolean.
"""
try:
matching_rowids = self.__Select(table, [], conditions)
# delete row data from all columns
columns = self.__tablecolumns[table]
for rowid in matching_rowids.keys():
for rowid in list(matching_rowids.keys()):
txn = None
try:
txn = self.env.txn_begin()
for column in columns:
# delete the data key
try:
self.db.delete(_data_key(table, column,
rowid.encode("latin-1")),
self.db.delete(_data_key(table, column, rowid),
txn=txn)
except DBNotFoundError:
except db.DBNotFoundError:
# XXXXXXX column may not exist, assume no error
pass
try:
self.db.delete(_rowid_key(table, rowid.encode("latin-1")), txn=txn)
except DBNotFoundError:
self.db.delete(_rowid_key(table, rowid), txn=txn)
except db.DBNotFoundError:
# XXXXXXX row key somehow didn't exist, assume no error
pass
txn.commit()
txn = None
finally:
except db.DBError as dberror:
if txn:
txn.abort()
except DBError as dberror:
raise TableDBError(dberror.args[1])
raise
except db.DBError as dberror:
if sys.version_info[0] < 3 :
raise TableDBError(dberror[1])
else :
raise TableDBError(dberror.args[1])
def Select(self, table, columns, conditions={}):
@ -541,10 +620,13 @@ class bsdTableDB :
if columns is None:
columns = self.__tablecolumns[table]
matching_rowids = self.__Select(table, columns, conditions)
except DBError as dberror:
raise TableDBError(dberror.args[1])
except db.DBError as dberror:
if sys.version_info[0] < 3 :
raise TableDBError(dberror[1])
else :
raise TableDBError(dberror.args[1])
# return the matches as a list of dictionaries
return matching_rowids.values()
return list(matching_rowids.values())
def __Select(self, table, columns, conditions):
@ -595,8 +677,19 @@ class bsdTableDB :
# leave all unknown condition callables alone as equals
return 0
conditionlist = list(conditions.items())
conditionlist.sort(key=CmpToKey(cmp_conditions))
if sys.version_info[0] < 3 :
conditionlist = list(conditions.items())
conditionlist.sort(cmp_conditions)
else : # Insertion Sort. Please, improve
conditionlist = []
for i in list(conditions.items()) :
for j, k in enumerate(conditionlist) :
r = cmp_conditions(k, i)
if r == 1 :
conditionlist.insert(j, i)
break
else :
conditionlist.append(i)
# Apply conditions to column data to find what we want
cur = self.db.cursor()
@ -614,7 +707,7 @@ class bsdTableDB :
key, data = cur.set_range(searchkey)
while key[:len(searchkey)] == searchkey:
# extract the rowid from the key
rowid = key[-_rowid_str_len:].decode("latin-1")
rowid = key[-_rowid_str_len:]
if rowid not in rejected_rowids:
# if no condition was specified or the condition
@ -629,11 +722,15 @@ class bsdTableDB :
del matching_rowids[rowid]
rejected_rowids[rowid] = rowid
key, data = cur.next()
key, data = next(cur)
except DBError as dberror:
if dberror.args[0] != DB_NOTFOUND:
raise
except db.DBError as dberror:
if sys.version_info[0] < 3 :
if dberror[0] != db.DB_NOTFOUND:
raise
else :
if dberror.args[0] != db.DB_NOTFOUND:
raise
continue
cur.close()
@ -644,17 +741,20 @@ class bsdTableDB :
# extract any remaining desired column data from the
# database for the matching rows.
if len(columns) > 0:
for rowid, rowdata in matching_rowids.items():
rowid = rowid.encode("latin-1")
for rowid, rowdata in list(matching_rowids.items()):
for column in columns:
if column in rowdata:
continue
try:
rowdata[column] = self.db.get(
_data_key(table, column, rowid))
except DBError as dberror:
if dberror.args[0] != DB_NOTFOUND:
raise
except db.DBError as dberror:
if sys.version_info[0] < 3 :
if dberror[0] != db.DB_NOTFOUND:
raise
else :
if dberror.args[0] != db.DB_NOTFOUND:
raise
rowdata[column] = None
# return the matches
@ -677,7 +777,7 @@ class bsdTableDB :
while 1:
try:
key, data = cur.set_range(table_key)
except DBNotFoundError:
except db.DBNotFoundError:
break
# only delete items in this table
if key[:len(table_key)] != table_key:
@ -689,7 +789,7 @@ class bsdTableDB :
while 1:
try:
key, data = cur.set_range(table_key)
except DBNotFoundError:
except db.DBNotFoundError:
break
# only delete items in this table
if key[:len(table_key)] != table_key:
@ -700,15 +800,17 @@ class bsdTableDB :
# delete the tablename from the table name list
tablelist = pickle.loads(
self.db.get(_E(_table_names_key), txn=txn, flags=DB_RMW))
getattr(self.db, "get_bytes", self.db.get)(_table_names_key,
txn=txn, flags=db.DB_RMW))
try:
tablelist.remove(table)
except ValueError:
# hmm, it wasn't there, oh well, that's what we want.
pass
# delete 1st, incase we opened with DB_DUP
self.db.delete(_E(_table_names_key), txn=txn)
self.db.put(_E(_table_names_key), pickle.dumps(tablelist, 1), txn=txn)
self.db.delete(_table_names_key, txn=txn)
getattr(self.db, "put_bytes", self.db.put)(_table_names_key,
pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
txn = None
@ -716,8 +818,10 @@ class bsdTableDB :
if table in self.__tablecolumns:
del self.__tablecolumns[table]
except DBError as dberror:
raise TableDBError(dberror.args[1])
finally:
except db.DBError as dberror:
if txn:
txn.abort()
if sys.version_info[0] < 3 :
raise TableDBError(dberror[1])
else :
raise TableDBError(dberror.args[1])

View File

@ -19,8 +19,20 @@
#
#------------------------------------------------------------------------
import time
from . import db
#
# import the time.sleep function in a namespace safe way to allow
# "from bsddb.dbutils import *"
#
from time import sleep as _sleep
import sys
absolute_import = (sys.version_info[0] >= 3)
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import db")
else :
from . import db
# always sleep at least N seconds between retrys
_deadlock_MinSleepTime = 1.0/128
@ -54,22 +66,17 @@ def DeadlockWrap(function, *_args, **_kwargs):
while True:
try:
return function(*_args, **_kwargs)
except db.DBLockDeadlockError as e:
except db.DBLockDeadlockError:
if _deadlock_VerboseFile:
_deadlock_VerboseFile.write(
'bsddb.dbutils.DeadlockWrap: ' +
'sleeping %1.3f\n' % sleeptime)
time.sleep(sleeptime)
'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
_sleep(sleeptime)
# exponential backoff in the sleep time
sleeptime *= 2
if sleeptime > _deadlock_MaxSleepTime:
sleeptime = _deadlock_MaxSleepTime
max_retries -= 1
if max_retries == -1:
if _deadlock_VerboseFile:
_deadlock_VerboseFile.write(
'bsddb.dbutils.DeadlockWrap: ' +
'max_retries reached, reraising %s\n' % e)
raise

View File

@ -1,48 +0,0 @@
# http://bugs.python.org/issue1413192
#
# See the bug report for details.
# The problem was that the env was deallocated prior to the txn.
import shutil
import tempfile
from test.support import catch_warning
import warnings
try:
# For Pythons w/distutils and add-on pybsddb
from bsddb3 import db
except ImportError:
# For Python >= 2.3 builtin bsddb distribution
from bsddb import db
env_name = tempfile.mkdtemp()
# Wrap test operation in a class so we can control destruction rather than
# waiting for the controlling Python executable to exit
class Context:
def __init__(self):
self.env = db.DBEnv()
self.env.open(env_name,
db.DB_CREATE | db.DB_INIT_TXN | db.DB_INIT_MPOOL)
self.the_txn = self.env.txn_begin()
self.map = db.DB(self.env)
self.map.open('xxx.db', "p",
db.DB_HASH, db.DB_CREATE, 0o666, txn=self.the_txn)
del self.env
del self.the_txn
with catch_warning():
warnings.filterwarnings('ignore', 'DBTxn aborted in destructor')
context = Context()
del context
# try not to leave a turd
try:
shutil.rmtree(env_name)
except EnvironmentError:
pass

View File

@ -6,18 +6,377 @@ import os
import unittest
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db
import bsddb3 as bsddb
except ImportError:
# For Python 2.3
from bsddb import db
import bsddb
verbose = False
if sys.version_info[0] >= 3 :
charset = "iso8859-1" # Full 8 bit
class cursor_py3k(object) :
def __init__(self, db, *args, **kwargs) :
self._dbcursor = db.cursor(*args, **kwargs)
def __getattr__(self, v) :
return getattr(self._dbcursor, v)
def _fix(self, v) :
if v == None : return None
key, value = v
if isinstance(key, bytes) :
key = key.decode(charset)
return (key, value.decode(charset))
def __next__(self) :
v = getattr(self._dbcursor, "next")()
return self._fix(v)
def previous(self) :
v = self._dbcursor.previous()
return self._fix(v)
def last(self) :
v = self._dbcursor.last()
return self._fix(v)
def set(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._dbcursor.set(k)
return self._fix(v)
def set_recno(self, num) :
v = self._dbcursor.set_recno(num)
return self._fix(v)
def set_range(self, k, dlen=-1, doff=-1) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._dbcursor.set_range(k, dlen=dlen, doff=doff)
return self._fix(v)
def dup(self, flags=0) :
cursor = self._dbcursor.dup(flags)
return dup_cursor_py3k(cursor)
def next_dup(self) :
v = self._dbcursor.next_dup()
return self._fix(v)
def put(self, key, value, flags=0, dlen=-1, doff=-1) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
return self._dbcursor.put(key, value, flags=flags, dlen=dlen,
doff=doff)
def current(self, flags=0, dlen=-1, doff=-1) :
v = self._dbcursor.current(flags=flags, dlen=dlen, doff=doff)
return self._fix(v)
def first(self) :
v = self._dbcursor.first()
return self._fix(v)
def pget(self, key=None, data=None, flags=0) :
# Incorrect because key can be a bare number,
# but enough to pass testsuite
if isinstance(key, int) and (data==None) and (flags==0) :
flags = key
key = None
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(data, int) and (flags==0) :
flags = data
data = None
if isinstance(data, str) :
data = bytes(data, charset)
v=self._dbcursor.pget(key=key, data=data, flags=flags)
if v != None :
v1, v2, v3 = v
if isinstance(v1, bytes) :
v1 = v1.decode(charset)
if isinstance(v2, bytes) :
v2 = v2.decode(charset)
v = (v1, v2, v3.decode(charset))
return v
def join_item(self) :
v = self._dbcursor.join_item()
if v != None :
v = v.decode(charset)
return v
def get(self, *args, **kwargs) :
l = len(args)
if l == 2 :
k, f = args
if isinstance(k, str) :
k = bytes(k, "iso8859-1")
args = (k, f)
elif l == 3 :
k, d, f = args
if isinstance(k, str) :
k = bytes(k, charset)
if isinstance(d, str) :
d = bytes(d, charset)
args =(k, d, f)
v = self._dbcursor.get(*args, **kwargs)
if v != None :
k, v = v
if isinstance(k, bytes) :
k = k.decode(charset)
v = (k, v.decode(charset))
return v
def get_both(self, key, value) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
v=self._dbcursor.get_both(key, value)
return self._fix(v)
class dup_cursor_py3k(cursor_py3k) :
def __init__(self, dbcursor) :
self._dbcursor = dbcursor
class DB_py3k(object) :
def __init__(self, *args, **kwargs) :
args2=[]
for i in args :
if isinstance(i, DBEnv_py3k) :
i = i._dbenv
args2.append(i)
args = tuple(args2)
for k, v in list(kwargs.items()) :
if isinstance(v, DBEnv_py3k) :
kwargs[k] = v._dbenv
self._db = bsddb._db.DB_orig(*args, **kwargs)
def __contains__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
return getattr(self._db, "has_key")(k)
def __getitem__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
v = self._db[k]
if v != None :
v = v.decode(charset)
return v
def __setitem__(self, k, v) :
if isinstance(k, str) :
k = bytes(k, charset)
if isinstance(v, str) :
v = bytes(v, charset)
self._db[k] = v
def __delitem__(self, k) :
if isinstance(k, str) :
k = bytes(k, charset)
del self._db[k]
def __getattr__(self, v) :
return getattr(self._db, v)
def __len__(self) :
return len(self._db)
def has_key(self, k, txn=None) :
if isinstance(k, str) :
k = bytes(k, charset)
return self._db.has_key(k, txn=txn)
def put(self, key, value, txn=None, flags=0, dlen=-1, doff=-1) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
return self._db.put(key, value, flags=flags, txn=txn, dlen=dlen,
doff=doff)
def append(self, value, txn=None) :
if isinstance(value, str) :
value = bytes(value, charset)
return self._db.append(value, txn=txn)
def get_size(self, key) :
if isinstance(key, str) :
key = bytes(key, charset)
return self._db.get_size(key)
def get(self, key, default="MagicCookie", txn=None, flags=0, dlen=-1, doff=-1) :
if isinstance(key, str) :
key = bytes(key, charset)
if default != "MagicCookie" : # Magic for 'test_get_none.py'
v=self._db.get(key, default=default, txn=txn, flags=flags,
dlen=dlen, doff=doff)
else :
v=self._db.get(key, txn=txn, flags=flags,
dlen=dlen, doff=doff)
if (v != None) and isinstance(v, bytes) :
v = v.decode(charset)
return v
def pget(self, key, txn=None) :
if isinstance(key, str) :
key = bytes(key, charset)
v=self._db.pget(key, txn=txn)
if v != None :
v1, v2 = v
if isinstance(v1, bytes) :
v1 = v1.decode(charset)
v = (v1, v2.decode(charset))
return v
def get_both(self, key, value, txn=None, flags=0) :
if isinstance(key, str) :
key = bytes(key, charset)
if isinstance(value, str) :
value = bytes(value, charset)
v=self._db.get_both(key, value, txn=txn, flags=flags)
if v != None :
v = v.decode(charset)
return v
def delete(self, key, txn=None) :
if isinstance(key, str) :
key = bytes(key, charset)
return self._db.delete(key, txn=txn)
def keys(self) :
k = list(self._db.keys())
if len(k) and isinstance(k[0], bytes) :
return [i.decode(charset) for i in list(self._db.keys())]
else :
return k
def items(self) :
data = list(self._db.items())
if not len(data) : return data
data2 = []
for k, v in data :
if isinstance(k, bytes) :
k = k.decode(charset)
data2.append((k, v.decode(charset)))
return data2
def associate(self, secondarydb, callback, flags=0, txn=None) :
class associate_callback(object) :
def __init__(self, callback) :
self._callback = callback
def callback(self, key, data) :
if isinstance(key, str) :
key = key.decode(charset)
data = data.decode(charset)
key = self._callback(key, data)
if (key != bsddb._db.DB_DONOTINDEX) and isinstance(key,
str) :
key = bytes(key, charset)
return key
return self._db.associate(secondarydb._db,
associate_callback(callback).callback, flags=flags, txn=txn)
def cursor(self, txn=None, flags=0) :
return cursor_py3k(self._db, txn=txn, flags=flags)
def join(self, cursor_list) :
cursor_list = [i._dbcursor for i in cursor_list]
return dup_cursor_py3k(self._db.join(cursor_list))
class DBEnv_py3k(object) :
def __init__(self, *args, **kwargs) :
self._dbenv = bsddb._db.DBEnv_orig(*args, **kwargs)
def __getattr__(self, v) :
return getattr(self._dbenv, v)
class DBSequence_py3k(object) :
def __init__(self, db, *args, **kwargs) :
self._db=db
self._dbsequence = bsddb._db.DBSequence_orig(db._db, *args, **kwargs)
def __getattr__(self, v) :
return getattr(self._dbsequence, v)
def open(self, key, *args, **kwargs) :
return self._dbsequence.open(bytes(key, charset), *args, **kwargs)
def get_key(self) :
return self._dbsequence.get_key().decode(charset)
def get_dbp(self) :
return self._db
import string
string.letters=[chr(i) for i in range(65,91)]
bsddb._db.DBEnv_orig = bsddb._db.DBEnv
bsddb._db.DB_orig = bsddb._db.DB
bsddb._db.DBSequence_orig = bsddb._db.DBSequence
def do_proxy_db_py3k(flag) :
flag2 = do_proxy_db_py3k.flag
do_proxy_db_py3k.flag = flag
if flag :
bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = DBEnv_py3k
bsddb.DB = bsddb.db.DB = bsddb._db.DB = DB_py3k
bsddb._db.DBSequence = DBSequence_py3k
else :
bsddb.DBEnv = bsddb.db.DBEnv = bsddb._db.DBEnv = bsddb._db.DBEnv_orig
bsddb.DB = bsddb.db.DB = bsddb._db.DB = bsddb._db.DB_orig
bsddb._db.DBSequence = bsddb._db.DBSequence_orig
return flag2
do_proxy_db_py3k.flag = False
do_proxy_db_py3k(True)
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbtables, dbutils, dbshelve, \
hashopen, btopen, rnopen, dbobj
except ImportError:
# For Python 2.3
from bsddb import db, dbtables, dbutils, dbshelve, \
hashopen, btopen, rnopen, dbobj
try:
from bsddb3 import test_support
except ImportError:
from test import test_support
try:
if sys.version_info[0] < 3 :
from threading import Thread, currentThread
del Thread, currentThread
else :
from threading import Thread, current_thread
del Thread, current_thread
have_threads = True
except ImportError:
have_threads = False
verbose = 0
if 'verbose' in sys.argv:
verbose = True
verbose = 1
sys.argv.remove('verbose')
if 'silent' in sys.argv: # take care of old flag, just in case
verbose = False
verbose = 0
sys.argv.remove('silent')
@ -28,11 +387,71 @@ def print_versions():
print('bsddb.db.version(): %s' % (db.version(), ))
print('bsddb.db.__version__: %s' % db.__version__)
print('bsddb.db.cvsid: %s' % db.cvsid)
print('py module: %s' % bsddb.__file__)
print('extension module: %s' % bsddb._bsddb.__file__)
print('python version: %s' % sys.version)
print('My pid: %s' % os.getpid())
print('-=' * 38)
def get_new_path(name) :
get_new_path.mutex.acquire()
try :
import os
path=os.path.join(get_new_path.prefix,
name+"_"+str(os.getpid())+"_"+str(get_new_path.num))
get_new_path.num+=1
finally :
get_new_path.mutex.release()
return path
def get_new_environment_path() :
path=get_new_path("environment")
import os
try:
os.makedirs(path,mode=0o700)
except os.error:
test_support.rmtree(path)
os.makedirs(path)
return path
def get_new_database_path() :
path=get_new_path("database")
import os
if os.path.exists(path) :
os.remove(path)
return path
# This path can be overriden via "set_test_path_prefix()".
import os, os.path
get_new_path.prefix=os.path.join(os.sep,"tmp","z-Berkeley_DB")
get_new_path.num=0
def get_test_path_prefix() :
return get_new_path.prefix
def set_test_path_prefix(path) :
get_new_path.prefix=path
def remove_test_path_directory() :
test_support.rmtree(get_new_path.prefix)
if have_threads :
import threading
get_new_path.mutex=threading.Lock()
del threading
else :
class Lock(object) :
def acquire(self) :
pass
def release(self) :
pass
get_new_path.mutex=Lock()
del Lock
class PrintInfoFakeTest(unittest.TestCase):
def testPrintVersions(self):
print_versions()
@ -41,30 +460,26 @@ class PrintInfoFakeTest(unittest.TestCase):
# This little hack is for when this module is run as main and all the
# other modules import it so they will still be able to get the right
# verbose setting. It's confusing but it works.
try:
import test_all
except ImportError:
pass
else:
if sys.version_info[0] < 3 :
from . import test_all
test_all.verbose = verbose
else :
import sys
print("Work to do!", file=sys.stderr)
def suite():
try:
# this is special, it used to segfault the interpreter
import test_1413192
except:
pass
def suite(module_prefix='', timing_check=None):
test_modules = [
'test_associate',
'test_basics',
'test_compat',
'test_compare',
'test_compat',
'test_cursor_pget_bug',
'test_dbobj',
'test_dbshelve',
'test_dbtables',
'test_env_close',
'test_distributed_transactions',
'test_early_close',
'test_get_none',
'test_join',
'test_lock',
@ -72,15 +487,21 @@ def suite():
'test_pickle',
'test_queue',
'test_recno',
'test_thread',
'test_replication',
'test_sequence',
'test_cursor_pget_bug',
'test_thread',
]
alltests = unittest.TestSuite()
for name in test_modules:
module = __import__(name)
#module = __import__(name)
# Do it this way so that suite may be called externally via
# python's Lib/test/test_bsddb3.
module = __import__(module_prefix+name, globals(), locals(), name)
alltests.addTest(module.test_suite())
if timing_check:
alltests.addTest(unittest.makeSuite(timing_check))
return alltests

View File

@ -2,32 +2,13 @@
TestCases for DB.associate.
"""
import shutil
import sys, os
import tempfile
import sys, os, string
import time
from pprint import pprint
try:
from threading import Thread, current_thread
have_threads = 1
except ImportError:
have_threads = 0
import unittest
from bsddb.test.test_all import verbose
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbshelve
except ImportError:
# For Python 2.3
from bsddb import db, dbshelve
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
from .test_all import db, dbshelve, test_support, verbose, have_threads, \
get_new_environment_path
#----------------------------------------------------------------------
@ -97,15 +78,7 @@ musicdata = {
class AssociateErrorTestCase(unittest.TestCase):
def setUp(self):
self.filename = self.__class__.__name__ + '.db'
homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
self.homeDir = homeDir
try:
os.mkdir(homeDir)
except os.error:
import glob
files = glob.glob(os.path.join(self.homeDir, '*'))
for file in files:
os.remove(file)
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
@ -128,7 +101,7 @@ class AssociateErrorTestCase(unittest.TestCase):
secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE)
# dupDB has been configured to allow duplicates, it can't
# associate with a secondary. BerkeleyDB will return an error.
# associate with a secondary. Berkeley DB will return an error.
try:
def f(a,b): return a+b
dupDB.associate(secDB, f)
@ -153,15 +126,7 @@ class AssociateTestCase(unittest.TestCase):
def setUp(self):
self.filename = self.__class__.__name__ + '.db'
homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
self.homeDir = homeDir
try:
os.mkdir(homeDir)
except os.error:
import glob
files = glob.glob(os.path.join(self.homeDir, '*'))
for file in files:
os.remove(file)
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK | db.DB_THREAD | self.envFlags)
@ -170,13 +135,13 @@ class AssociateTestCase(unittest.TestCase):
self.closeDB()
self.env.close()
self.env = None
shutil.rmtree(self.homeDir)
test_support.rmtree(self.homeDir)
def addDataToDB(self, d, txn=None):
for key, value in musicdata.items():
for key, value in list(musicdata.items()):
if type(self.keytype) == type(''):
key = ("%02d" % key).encode("utf-8")
d.put(key, '|'.join(value).encode("utf-8"), txn=txn)
key = "%02d" % key
d.put(key, '|'.join(value), txn=txn)
def createDB(self, txn=None):
self.cur = None
@ -246,14 +211,14 @@ class AssociateTestCase(unittest.TestCase):
def finish_test(self, secDB, txn=None):
# 'Blues' should not be in the secondary database
vals = secDB.pget(b'Blues', txn=txn)
assert vals == None, vals
vals = secDB.pget('Blues', txn=txn)
self.assertEqual(vals, None, vals)
vals = secDB.pget(b'Unknown', txn=txn)
assert vals[0] == 99 or vals[0] == b'99', vals
vals[1].index(b'Unknown')
vals[1].index(b'Unnamed')
vals[1].index(b'unknown')
vals = secDB.pget('Unknown', txn=txn)
self.assert_(vals[0] == 99 or vals[0] == '99', vals)
vals[1].index('Unknown')
vals[1].index('Unnamed')
vals[1].index('unknown')
if verbose:
print("Primary key traversal:")
@ -262,14 +227,14 @@ class AssociateTestCase(unittest.TestCase):
rec = self.cur.first()
while rec is not None:
if type(self.keytype) == type(''):
assert int(rec[0]) # for primary db, key is a number
self.assert_(int(rec[0])) # for primary db, key is a number
else:
assert rec[0] and type(rec[0]) == type(0)
self.assert_(rec[0] and type(rec[0]) == type(0))
count = count + 1
if verbose:
print(rec)
rec = self.cur.next()
assert count == len(musicdata) # all items accounted for
rec = getattr(self.cur, "next")()
self.assertEqual(count, len(musicdata)) # all items accounted for
if verbose:
@ -278,38 +243,39 @@ class AssociateTestCase(unittest.TestCase):
count = 0
# test cursor pget
vals = self.cur.pget(b'Unknown', flags=db.DB_LAST)
assert vals[1] == 99 or vals[1] == b'99', vals
assert vals[0] == b'Unknown'
vals[2].index(b'Unknown')
vals[2].index(b'Unnamed')
vals[2].index(b'unknown')
vals = self.cur.pget('Unknown', flags=db.DB_LAST)
self.assert_(vals[1] == 99 or vals[1] == '99', vals)
self.assertEqual(vals[0], 'Unknown')
vals[2].index('Unknown')
vals[2].index('Unnamed')
vals[2].index('unknown')
vals = self.cur.pget(b'Unknown', data=b'wrong value', flags=db.DB_GET_BOTH)
assert vals == None, vals
vals = self.cur.pget('Unknown', data='wrong value', flags=db.DB_GET_BOTH)
self.assertEqual(vals, None, vals)
rec = self.cur.first()
assert rec[0] == b"Jazz"
self.assertEqual(rec[0], "Jazz")
while rec is not None:
count = count + 1
if verbose:
print(rec)
rec = self.cur.next()
rec = getattr(self.cur, "next")()
# all items accounted for EXCEPT for 1 with "Blues" genre
assert count == len(musicdata)-1
self.assertEqual(count, len(musicdata)-1)
self.cur = None
def getGenre(self, priKey, priData):
assert type(priData) == type(b"")
priData = priData.decode("utf-8")
self.assertEqual(type(priData), type(""))
genre = priData.split('|')[2]
if verbose:
print('getGenre key: %r data: %r' % (priKey, priData))
genre = priData.split('|')[2]
if genre == 'Blues':
return db.DB_DONOTINDEX
else:
return genre.encode("utf-8")
return genre
#----------------------------------------------------------------------
@ -380,21 +346,21 @@ class ShelveAssociateTestCase(AssociateTestCase):
filetype=self.dbtype)
def addDataToDB(self, d):
for key, value in musicdata.items():
for key, value in list(musicdata.items()):
if type(self.keytype) == type(''):
key = ("%02d" % key).encode("utf-8")
key = "%02d" % key
d.put(key, value) # save the value as is this time
def getGenre(self, priKey, priData):
assert type(priData) == type(())
self.assertEqual(type(priData), type(()))
if verbose:
print('getGenre key: %r data: %r' % (priKey, priData))
genre = priData[2]
if genre == 'Blues':
return db.DB_DONOTINDEX
else:
return genre.encode("utf-8")
return genre
class ShelveAssociateHashTestCase(ShelveAssociateTestCase):
@ -418,15 +384,17 @@ class ThreadedAssociateTestCase(AssociateTestCase):
t2 = Thread(target = self.writer2,
args = (d, ))
t1.setDaemon(True)
t2.setDaemon(True)
t1.start()
t2.start()
t1.join()
t2.join()
def writer1(self, d):
for key, value in musicdata.items():
for key, value in list(musicdata.items()):
if type(self.keytype) == type(''):
key = ("%02d" % key).encode("utf-8")
key = "%02d" % key
d.put(key, '|'.join(value))
def writer2(self, d):
@ -452,24 +420,23 @@ class ThreadedAssociateRecnoTestCase(ShelveAssociateTestCase):
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (3, 3, 11):
suite.addTest(unittest.makeSuite(AssociateErrorTestCase))
suite.addTest(unittest.makeSuite(AssociateErrorTestCase))
suite.addTest(unittest.makeSuite(AssociateHashTestCase))
suite.addTest(unittest.makeSuite(AssociateBTreeTestCase))
suite.addTest(unittest.makeSuite(AssociateRecnoTestCase))
suite.addTest(unittest.makeSuite(AssociateHashTestCase))
suite.addTest(unittest.makeSuite(AssociateBTreeTestCase))
suite.addTest(unittest.makeSuite(AssociateRecnoTestCase))
if db.version() >= (4, 1):
suite.addTest(unittest.makeSuite(AssociateBTreeTxnTestCase))
if db.version() >= (4, 1):
suite.addTest(unittest.makeSuite(AssociateBTreeTxnTestCase))
suite.addTest(unittest.makeSuite(ShelveAssociateHashTestCase))
suite.addTest(unittest.makeSuite(ShelveAssociateBTreeTestCase))
suite.addTest(unittest.makeSuite(ShelveAssociateRecnoTestCase))
suite.addTest(unittest.makeSuite(ShelveAssociateHashTestCase))
suite.addTest(unittest.makeSuite(ShelveAssociateBTreeTestCase))
suite.addTest(unittest.makeSuite(ShelveAssociateRecnoTestCase))
if have_threads:
suite.addTest(unittest.makeSuite(ThreadedAssociateHashTestCase))
suite.addTest(unittest.makeSuite(ThreadedAssociateBTreeTestCase))
suite.addTest(unittest.makeSuite(ThreadedAssociateRecnoTestCase))
if have_threads:
suite.addTest(unittest.makeSuite(ThreadedAssociateHashTestCase))
suite.addTest(unittest.makeSuite(ThreadedAssociateBTreeTestCase))
suite.addTest(unittest.makeSuite(ThreadedAssociateRecnoTestCase))
return suite

View File

@ -4,29 +4,17 @@ various DB flags, etc.
"""
import os
import sys
import errno
import string
import tempfile
from pprint import pprint
import unittest
import time
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db
except ImportError:
# For Python 2.3
from bsddb import db
from .test_all import db, test_support, verbose, get_new_environment_path, \
get_new_database_path
from bsddb.test.test_all import verbose
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
DASH = '-'
DASH = b'-'
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
#----------------------------------------------------------------------
@ -38,8 +26,8 @@ class VersionTestCase(unittest.TestCase):
print('bsddb.db.version(): %s' % (info, ))
print(db.DB_VERSION_STRING)
print('-=' * 20)
assert info == (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR,
db.DB_VERSION_PATCH)
self.assertEqual(info, (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR,
db.DB_VERSION_PATCH))
#----------------------------------------------------------------------
@ -57,10 +45,7 @@ class BasicTestCase(unittest.TestCase):
def setUp(self):
if self.useEnv:
homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
self.homeDir = homeDir
test_support.rmtree(homeDir)
os.mkdir(homeDir)
self.homeDir=get_new_environment_path()
try:
self.env = db.DBEnv()
self.env.set_lg_max(1024*1024)
@ -68,17 +53,14 @@ class BasicTestCase(unittest.TestCase):
self.env.set_tx_timestamp(int(time.time()))
self.env.set_flags(self.envsetflags, 1)
self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
old_tempfile_tempdir = tempfile.tempdir
tempfile.tempdir = self.homeDir
self.filename = os.path.split(tempfile.mktemp())[1]
tempfile.tempdir = old_tempfile_tempdir
self.filename = "test"
# Yes, a bare except is intended, since we're re-raising the exc.
except:
test_support.rmtree(homeDir)
test_support.rmtree(self.homeDir)
raise
else:
self.env = None
self.filename = tempfile.mktemp()
self.filename = get_new_database_path()
# create and open the DB
self.d = db.DB(self.env)
@ -100,13 +82,6 @@ class BasicTestCase(unittest.TestCase):
if self.env is not None:
self.env.close()
test_support.rmtree(self.homeDir)
## XXX(nnorwitz): is this comment stil valid?
## Make a new DBEnv to remove the env files from the home dir.
## (It can't be done while the env is open, nor after it has been
## closed, so we make a new one to do it.)
#e = db.DBEnv()
#e.remove(self.homeDir)
#os.remove(os.path.join(self.homeDir, self.filename))
else:
os.remove(self.filename)
@ -117,15 +92,13 @@ class BasicTestCase(unittest.TestCase):
for x in range(self._numKeys//2):
key = '%04d' % (self._numKeys - x) # insert keys in reverse order
key = key.encode("utf-8")
data = self.makeData(key)
d.put(key, data, _txn)
d.put(b'empty value', b'', _txn)
d.put('empty value', '', _txn)
for x in range(self._numKeys//2-1):
key = '%04d' % x # and now some in forward order
key = key.encode("utf-8")
data = self.makeData(key)
d.put(key, data, _txn)
@ -151,49 +124,57 @@ class BasicTestCase(unittest.TestCase):
print('\n', '-=' * 30)
print("Running %s.test01_GetsAndPuts..." % self.__class__.__name__)
for key in [b'0001', b'0100', b'0400', b'0700', b'0999']:
for key in ['0001', '0100', '0400', '0700', '0999']:
data = d.get(key)
if verbose:
print(data)
assert d.get(b'0321') == b'0321-0321-0321-0321-0321'
self.assertEqual(d.get('0321'), '0321-0321-0321-0321-0321')
# By default non-existant keys return None...
assert d.get(b'abcd') == None
self.assertEqual(d.get('abcd'), None)
# ...but they raise exceptions in other situations. Call
# set_get_returns_none() to change it.
try:
d.delete(b'abcd')
d.delete('abcd')
except db.DBNotFoundError as val:
assert val.args[0] == db.DB_NOTFOUND
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print(val)
else:
self.fail("expected exception")
d.put(b'abcd', b'a new record')
assert d.get(b'abcd') == b'a new record'
d.put('abcd', 'a new record')
self.assertEqual(d.get('abcd'), 'a new record')
d.put(b'abcd', b'same key')
d.put('abcd', 'same key')
if self.dbsetflags & db.DB_DUP:
assert d.get(b'abcd') == b'a new record'
self.assertEqual(d.get('abcd'), 'a new record')
else:
assert d.get(b'abcd') == b'same key'
self.assertEqual(d.get('abcd'), 'same key')
try:
d.put(b'abcd', b'this should fail', flags=db.DB_NOOVERWRITE)
d.put('abcd', 'this should fail', flags=db.DB_NOOVERWRITE)
except db.DBKeyExistError as val:
assert val.args[0] == db.DB_KEYEXIST
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_KEYEXIST)
else :
self.assertEqual(val.args[0], db.DB_KEYEXIST)
if verbose: print(val)
else:
self.fail("expected exception")
if self.dbsetflags & db.DB_DUP:
assert d.get(b'abcd') == b'a new record'
self.assertEqual(d.get('abcd'), 'a new record')
else:
assert d.get(b'abcd') == b'same key'
self.assertEqual(d.get('abcd'), 'same key')
d.sync()
@ -207,28 +188,28 @@ class BasicTestCase(unittest.TestCase):
self.d.open(self.filename)
d = self.d
assert d.get(b'0321') == b'0321-0321-0321-0321-0321'
self.assertEqual(d.get('0321'), '0321-0321-0321-0321-0321')
if self.dbsetflags & db.DB_DUP:
assert d.get(b'abcd') == b'a new record'
self.assertEqual(d.get('abcd'), 'a new record')
else:
assert d.get(b'abcd') == b'same key'
self.assertEqual(d.get('abcd'), 'same key')
rec = d.get_both(b'0555', b'0555-0555-0555-0555-0555')
rec = d.get_both('0555', '0555-0555-0555-0555-0555')
if verbose:
print(rec)
assert d.get_both(b'0555', b'bad data') == None
self.assertEqual(d.get_both('0555', 'bad data'), None)
# test default value
data = d.get(b'bad key', b'bad data')
assert data == b'bad data'
data = d.get('bad key', 'bad data')
self.assertEqual(data, 'bad data')
# any object can pass through
data = d.get(b'bad key', self)
assert data == self
data = d.get('bad key', self)
self.assertEqual(data, self)
s = d.stat()
assert type(s) == type({})
self.assertEqual(type(s), type({}))
if verbose:
print('d.stat() returned this dictionary:')
pprint(s)
@ -244,49 +225,51 @@ class BasicTestCase(unittest.TestCase):
print("Running %s.test02_DictionaryMethods..." % \
self.__class__.__name__)
for key in [b'0002', b'0101', b'0401', b'0701', b'0998']:
for key in ['0002', '0101', '0401', '0701', '0998']:
data = d[key]
assert data == self.makeData(key)
self.assertEqual(data, self.makeData(key))
if verbose:
print(data)
assert len(d) == self._numKeys
keys = d.keys()
assert len(keys) == self._numKeys
assert type(keys) == type([])
self.assertEqual(len(d), self._numKeys)
keys = list(d.keys())
self.assertEqual(len(keys), self._numKeys)
self.assertEqual(type(keys), type([]))
d[b'new record'] = b'a new record'
assert len(d) == self._numKeys+1
keys = d.keys()
assert len(keys) == self._numKeys+1
d['new record'] = 'a new record'
self.assertEqual(len(d), self._numKeys+1)
keys = list(d.keys())
self.assertEqual(len(keys), self._numKeys+1)
d[b'new record'] = b'a replacement record'
assert len(d) == self._numKeys+1
keys = d.keys()
assert len(keys) == self._numKeys+1
d['new record'] = 'a replacement record'
self.assertEqual(len(d), self._numKeys+1)
keys = list(d.keys())
self.assertEqual(len(keys), self._numKeys+1)
if verbose:
print("the first 10 keys are:")
pprint(keys[:10])
assert d[b'new record'] == b'a replacement record'
self.assertEqual(d['new record'], 'a replacement record')
assert d.has_key(b'0001') == 1
assert d.has_key(b'spam') == 0
# We check also the positional parameter
self.assertEqual(d.has_key('0001', None), 1)
# We check also the keyword parameter
self.assertEqual(d.has_key('spam', txn=None), 0)
items = d.items()
assert len(items) == self._numKeys+1
assert type(items) == type([])
assert type(items[0]) == type(())
assert len(items[0]) == 2
items = list(d.items())
self.assertEqual(len(items), self._numKeys+1)
self.assertEqual(type(items), type([]))
self.assertEqual(type(items[0]), type(()))
self.assertEqual(len(items[0]), 2)
if verbose:
print("the first 10 items are:")
pprint(items[:10])
values = d.values()
assert len(values) == self._numKeys+1
assert type(values) == type([])
values = list(d.values())
self.assertEqual(len(values), self._numKeys+1)
self.assertEqual(type(values), type([]))
if verbose:
print("the first 10 values are:")
@ -315,17 +298,22 @@ class BasicTestCase(unittest.TestCase):
if verbose and count % 100 == 0:
print(rec)
try:
rec = c.next()
rec = next(c)
except db.DBNotFoundError as val:
if get_raises_error:
assert val.args[0] == db.DB_NOTFOUND
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print(val)
rec = None
else:
self.fail("unexpected DBNotFoundError")
assert c.get_current_size() == len(c.current()[1]), "%s != len(%r)" % (c.get_current_size(), c.current()[1])
self.assertEqual(c.get_current_size(), len(c.current()[1]),
"%s != len(%r)" % (c.get_current_size(), c.current()[1]))
assert count == self._numKeys
self.assertEqual(count, self._numKeys)
rec = c.last()
@ -338,73 +326,89 @@ class BasicTestCase(unittest.TestCase):
rec = c.prev()
except db.DBNotFoundError as val:
if get_raises_error:
assert val.args[0] == db.DB_NOTFOUND
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print(val)
rec = None
else:
self.fail("unexpected DBNotFoundError")
assert count == self._numKeys
self.assertEqual(count, self._numKeys)
rec = c.set(b'0505')
rec = c.set('0505')
rec2 = c.current()
assert rec == rec2, (repr(rec),repr(rec2))
assert rec[0] == b'0505'
assert rec[1] == self.makeData(b'0505')
assert c.get_current_size() == len(rec[1])
self.assertEqual(rec, rec2)
self.assertEqual(rec[0], '0505')
self.assertEqual(rec[1], self.makeData('0505'))
self.assertEqual(c.get_current_size(), len(rec[1]))
# make sure we get empty values properly
rec = c.set(b'empty value')
assert rec[1] == b''
assert c.get_current_size() == 0
rec = c.set('empty value')
self.assertEqual(rec[1], '')
self.assertEqual(c.get_current_size(), 0)
try:
n = c.set(b'bad key')
n = c.set('bad key')
except db.DBNotFoundError as val:
assert val.args[0] == db.DB_NOTFOUND
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print(val)
else:
if set_raises_error:
self.fail("expected exception")
if n is not None:
if n != None:
self.fail("expected None: %r" % (n,))
rec = c.get_both(b'0404', self.makeData(b'0404'))
assert rec == (b'0404', self.makeData(b'0404'))
rec = c.get_both('0404', self.makeData('0404'))
self.assertEqual(rec, ('0404', self.makeData('0404')))
try:
n = c.get_both(b'0404', b'bad data')
n = c.get_both('0404', 'bad data')
except db.DBNotFoundError as val:
assert val.args[0] == db.DB_NOTFOUND
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print(val)
else:
if get_raises_error:
self.fail("expected exception")
if n is not None:
if n != None:
self.fail("expected None: %r" % (n,))
if self.d.get_type() == db.DB_BTREE:
rec = c.set_range(b'011')
rec = c.set_range('011')
if verbose:
print("searched for '011', found: ", rec)
rec = c.set_range(b'011',dlen=0,doff=0)
rec = c.set_range('011',dlen=0,doff=0)
if verbose:
print("searched (partial) for '011', found: ", rec)
if rec[1] != b'': self.fail('expected empty data portion')
if rec[1] != '': self.fail('expected empty data portion')
ev = c.set_range(b'empty value')
ev = c.set_range('empty value')
if verbose:
print("search for 'empty value' returned", ev)
if ev[1] != b'': self.fail('empty value lookup failed')
if ev[1] != '': self.fail('empty value lookup failed')
c.set(b'0499')
c.set('0499')
c.delete()
try:
rec = c.current()
except db.DBKeyEmptyError as val:
if get_raises_error:
assert val.args[0] == db.DB_KEYEMPTY
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.DB_KEYEMPTY)
else :
self.assertEqual(val.args[0], db.DB_KEYEMPTY)
if verbose: print(val)
else:
self.fail("unexpected DBKeyEmptyError")
@ -412,16 +416,16 @@ class BasicTestCase(unittest.TestCase):
if get_raises_error:
self.fail('DBKeyEmptyError exception expected')
c.next()
next(c)
c2 = c.dup(db.DB_POSITION)
assert c.current() == c2.current()
self.assertEqual(c.current(), c2.current())
c2.put(b'', b'a new value', db.DB_CURRENT)
assert c.current() == c2.current()
assert c.current()[1] == b'a new value'
c2.put('', 'a new value', db.DB_CURRENT)
self.assertEqual(c.current(), c2.current())
self.assertEqual(c.current()[1], 'a new value')
c2.put(b'', b'er', db.DB_CURRENT, dlen=0, doff=5)
assert c2.current()[1] == b'a newer value'
c2.put('', 'er', db.DB_CURRENT, dlen=0, doff=5)
self.assertEqual(c2.current()[1], 'a newer value')
c.close()
c2.close()
@ -441,7 +445,7 @@ class BasicTestCase(unittest.TestCase):
'put':('', 'spam', db.DB_CURRENT),
'set': ("0505",),
}
for method, args in methods_to_test.items():
for method, args in list(methods_to_test.items()):
try:
if verbose:
print("attempting to use a closed cursor's %s method" % \
@ -449,7 +453,11 @@ class BasicTestCase(unittest.TestCase):
# a bug may cause a NULL pointer dereference...
getattr(c, method)(*args)
except db.DBError as val:
assert val.args[0] == 0
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], 0)
else :
self.assertEqual(val.args[0], 0)
if verbose: print(val)
else:
self.fail("no exception raised when using a buggy cursor's"
@ -474,7 +482,7 @@ class BasicTestCase(unittest.TestCase):
self.__class__.__name__)
old = self.d.set_get_returns_none(0)
assert old == 2
self.assertEqual(old, 2)
self.test03_SimpleCursorStuff(get_raises_error=1, set_raises_error=1)
def test03b_SimpleCursorWithGetReturnsNone1(self):
@ -496,9 +504,9 @@ class BasicTestCase(unittest.TestCase):
self.__class__.__name__)
old = self.d.set_get_returns_none(1)
assert old == 2
self.assertEqual(old, 2)
old = self.d.set_get_returns_none(2)
assert old == 1
self.assertEqual(old, 1)
self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=0)
#----------------------------------------
@ -510,26 +518,27 @@ class BasicTestCase(unittest.TestCase):
print("Running %s.test04_PartialGetAndPut..." % \
self.__class__.__name__)
key = b"partialTest"
data = b"1" * 1000 + b"2" * 1000
key = "partialTest"
data = "1" * 1000 + "2" * 1000
d.put(key, data)
assert d.get(key) == data
assert d.get(key, dlen=20, doff=990) == (b"1" * 10) + (b"2" * 10)
self.assertEqual(d.get(key), data)
self.assertEqual(d.get(key, dlen=20, doff=990),
("1" * 10) + ("2" * 10))
d.put(b"partialtest2", (b"1" * 30000) + b"robin" )
assert d.get(b"partialtest2", dlen=5, doff=30000) == b"robin"
d.put("partialtest2", ("1" * 30000) + "robin" )
self.assertEqual(d.get("partialtest2", dlen=5, doff=30000), "robin")
# There seems to be a bug in DB here... Commented out the test for
# now.
##assert d.get("partialtest2", dlen=5, doff=30010) == ""
##self.assertEqual(d.get("partialtest2", dlen=5, doff=30010), "")
if self.dbsetflags != db.DB_DUP:
# Partial put with duplicate records requires a cursor
d.put(key, b"0000", dlen=2000, doff=0)
assert d.get(key) == b"0000"
d.put(key, "0000", dlen=2000, doff=0)
self.assertEqual(d.get(key), "0000")
d.put(key, b"1111", dlen=1, doff=2)
assert d.get(key) == b"0011110"
d.put(key, "1111", dlen=1, doff=2)
self.assertEqual(d.get(key), "0011110")
#----------------------------------------
@ -540,30 +549,27 @@ class BasicTestCase(unittest.TestCase):
print("Running %s.test05_GetSize..." % self.__class__.__name__)
for i in range(1, 50000, 500):
key = ("size%s" % i).encode("utf-8")
key = "size%s" % i
#print "before ", i,
d.put(key, b"1" * i)
d.put(key, "1" * i)
#print "after",
assert d.get_size(key) == i
self.assertEqual(d.get_size(key), i)
#print "done"
#----------------------------------------
def test06_Truncate(self):
if db.version() < (3,3):
# truncate is a feature of BerkeleyDB 3.3 and above
return
d = self.d
if verbose:
print('\n', '-=' * 30)
print("Running %s.test99_Truncate..." % self.__class__.__name__)
d.put(b"abcde", b"ABCDE");
d.put("abcde", "ABCDE");
num = d.truncate()
assert num >= 1, "truncate returned <= 0 on non-empty database"
self.assert_(num >= 1, "truncate returned <= 0 on non-empty database")
num = d.truncate()
assert num == 0, "truncate on empty DB returned nonzero (%r)" % (num,)
self.assertEqual(num, 0,
"truncate on empty DB returned nonzero (%r)" % (num,))
#----------------------------------------
@ -628,6 +634,11 @@ class BasicHashWithEnvTestCase(BasicWithEnvTestCase):
#----------------------------------------------------------------------
class BasicTransactionTestCase(BasicTestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
useEnv = 1
envflags = (db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
@ -653,19 +664,21 @@ class BasicTransactionTestCase(BasicTestCase):
print('\n', '-=' * 30)
print("Running %s.test06_Transactions..." % self.__class__.__name__)
assert d.get(b'new rec', txn=self.txn) == None
d.put(b'new rec', b'this is a new record', self.txn)
assert d.get(b'new rec', txn=self.txn) == b'this is a new record'
self.assertEqual(d.get('new rec', txn=self.txn), None)
d.put('new rec', 'this is a new record', self.txn)
self.assertEqual(d.get('new rec', txn=self.txn),
'this is a new record')
self.txn.abort()
assert d.get(b'new rec') == None
self.assertEqual(d.get('new rec'), None)
self.txn = self.env.txn_begin()
assert d.get(b'new rec', txn=self.txn) == None
d.put(b'new rec', b'this is a new record', self.txn)
assert d.get(b'new rec', txn=self.txn) == b'this is a new record'
self.assertEqual(d.get('new rec', txn=self.txn), None)
d.put('new rec', 'this is a new record', self.txn)
self.assertEqual(d.get('new rec', txn=self.txn),
'this is a new record')
self.txn.commit()
assert d.get(b'new rec') == b'this is a new record'
self.assertEqual(d.get('new rec'), 'this is a new record')
self.txn = self.env.txn_begin()
c = d.cursor(self.txn)
@ -675,8 +688,8 @@ class BasicTransactionTestCase(BasicTestCase):
count = count + 1
if verbose and count % 100 == 0:
print(rec)
rec = c.next()
assert count == self._numKeys+1
rec = next(c)
self.assertEqual(count, self._numKeys+1)
c.close() # Cursors *MUST* be closed before commit!
self.txn.commit()
@ -687,43 +700,39 @@ class BasicTransactionTestCase(BasicTestCase):
except db.DBIncompleteError:
pass
if db.version() >= (4,0):
statDict = self.env.log_stat(0);
assert 'magic' in statDict
assert 'version' in statDict
assert 'cur_file' in statDict
assert 'region_nowait' in statDict
statDict = self.env.log_stat(0);
self.assert_('magic' in statDict)
self.assert_('version' in statDict)
self.assert_('cur_file' in statDict)
self.assert_('region_nowait' in statDict)
# must have at least one log file present:
logs = self.env.log_archive(db.DB_ARCH_ABS | db.DB_ARCH_LOG)
assert logs != None
self.assertNotEqual(logs, None)
for log in logs:
if verbose:
print('log file: ' + log)
if db.version() >= (4,2):
logs = self.env.log_archive(db.DB_ARCH_REMOVE)
assert not logs
self.assertTrue(not logs)
self.txn = self.env.txn_begin()
#----------------------------------------
def test07_TxnTruncate(self):
if db.version() < (3,3):
# truncate is a feature of BerkeleyDB 3.3 and above
return
d = self.d
if verbose:
print('\n', '-=' * 30)
print("Running %s.test07_TxnTruncate..." % self.__class__.__name__)
d.put(b"abcde", b"ABCDE");
d.put("abcde", "ABCDE");
txn = self.env.txn_begin()
num = d.truncate(txn)
assert num >= 1, "truncate returned <= 0 on non-empty database"
self.assert_(num >= 1, "truncate returned <= 0 on non-empty database")
num = d.truncate(txn)
assert num == 0, "truncate on empty DB returned nonzero (%r)" % (num,)
self.assertEqual(num, 0,
"truncate on empty DB returned nonzero (%r)" % (num,))
txn.commit()
#----------------------------------------
@ -769,20 +778,20 @@ class BTreeRecnoTestCase(BasicTestCase):
print("Running %s.test07_RecnoInBTree..." % self.__class__.__name__)
rec = d.get(200)
assert type(rec) == type(())
assert len(rec) == 2
self.assertEqual(type(rec), type(()))
self.assertEqual(len(rec), 2)
if verbose:
print("Record #200 is ", rec)
c = d.cursor()
c.set(b'0200')
c.set('0200')
num = c.get_recno()
assert type(num) == type(1)
self.assertEqual(type(num), type(1))
if verbose:
print("recno of d['0200'] is ", num)
rec = c.current()
assert c.set_recno(num) == rec
self.assertEqual(c.set_recno(num), rec)
c.close()
@ -803,40 +812,39 @@ class BasicDUPTestCase(BasicTestCase):
print("Running %s.test08_DuplicateKeys..." % \
self.__class__.__name__)
d.put(b"dup0", b"before")
d.put("dup0", "before")
for x in "The quick brown fox jumped over the lazy dog.".split():
x = x.encode("ascii")
d.put(b"dup1", x)
d.put(b"dup2", b"after")
d.put("dup1", x)
d.put("dup2", "after")
data = d.get(b"dup1")
assert data == b"The"
data = d.get("dup1")
self.assertEqual(data, "The")
if verbose:
print(data)
c = d.cursor()
rec = c.set(b"dup1")
assert rec == (b'dup1', b'The')
rec = c.set("dup1")
self.assertEqual(rec, ('dup1', 'The'))
next = c.next()
assert next == (b'dup1', b'quick')
next_reg = next(c)
self.assertEqual(next_reg, ('dup1', 'quick'))
rec = c.set(b"dup1")
rec = c.set("dup1")
count = c.count()
assert count == 9
self.assertEqual(count, 9)
next_dup = c.next_dup()
assert next_dup == (b'dup1', b'quick')
self.assertEqual(next_dup, ('dup1', 'quick'))
rec = c.set(b'dup1')
rec = c.set('dup1')
while rec is not None:
if verbose:
print(rec)
rec = c.next_dup()
c.set(b'dup1')
c.set('dup1')
rec = c.next_nodup()
assert rec[0] != b'dup1'
self.assertNotEqual(rec[0], 'dup1')
if verbose:
print(rec)
@ -884,11 +892,9 @@ class BasicMultiDBTestCase(BasicTestCase):
self.dbopenflags|db.DB_CREATE)
for x in "The quick brown fox jumped over the lazy dog".split():
x = x.encode("ascii")
d2.put(x, self.makeData(x))
for x in letters:
x = x.encode("ascii")
for x in string.letters:
d3.put(x, x*70)
d1.sync()
@ -917,8 +923,8 @@ class BasicMultiDBTestCase(BasicTestCase):
count = count + 1
if verbose and (count % 50) == 0:
print(rec)
rec = c1.next()
assert count == self._numKeys
rec = next(c1)
self.assertEqual(count, self._numKeys)
count = 0
rec = c2.first()
@ -926,8 +932,8 @@ class BasicMultiDBTestCase(BasicTestCase):
count = count + 1
if verbose:
print(rec)
rec = c2.next()
assert count == 9
rec = next(c2)
self.assertEqual(count, 9)
count = 0
rec = c3.first()
@ -935,15 +941,14 @@ class BasicMultiDBTestCase(BasicTestCase):
count = count + 1
if verbose:
print(rec)
rec = c3.next()
assert count == 52
rec = next(c3)
self.assertEqual(count, len(string.letters))
c1.close()
c2.close()
c3.close()
d1.close()
d2.close()
d3.close()
@ -965,6 +970,55 @@ class HashMultiDBTestCase(BasicMultiDBTestCase):
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
class PrivateObject(unittest.TestCase) :
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def tearDown(self) :
del self.obj
def test01_DefaultIsNone(self) :
self.assertEqual(self.obj.get_private(), None)
def test02_assignment(self) :
a = "example of private object"
self.obj.set_private(a)
b = self.obj.get_private()
self.assertTrue(a is b) # Object identity
def test03_leak_assignment(self) :
import sys
a = "example of private object"
refcount = sys.getrefcount(a)
self.obj.set_private(a)
self.assertEqual(refcount+1, sys.getrefcount(a))
self.obj.set_private(None)
self.assertEqual(refcount, sys.getrefcount(a))
def test04_leak_GC(self) :
import sys
a = "example of private object"
refcount = sys.getrefcount(a)
self.obj.set_private(a)
self.obj = None
self.assertEqual(refcount, sys.getrefcount(a))
class DBEnvPrivateObject(PrivateObject) :
def setUp(self) :
self.obj = db.DBEnv()
class DBPrivateObject(PrivateObject) :
def setUp(self) :
self.obj = db.DB()
class CrashAndBurn(unittest.TestCase) :
def test01_OpenCrash(self) :
# See http://bugs.python.org/issue3307
self.assertRaises(db.DBInvalidArgError, db.DB, None, 65535)
#----------------------------------------------------------------------
#----------------------------------------------------------------------
@ -988,6 +1042,9 @@ def test_suite():
suite.addTest(unittest.makeSuite(HashDUPWithThreadTestCase))
suite.addTest(unittest.makeSuite(BTreeMultiDBTestCase))
suite.addTest(unittest.makeSuite(HashMultiDBTestCase))
suite.addTest(unittest.makeSuite(DBEnvPrivateObject))
suite.addTest(unittest.makeSuite(DBPrivateObject))
#suite.addTest(unittest.makeSuite(CrashAndBurn))
return suite

View File

@ -2,55 +2,51 @@
TestCases for python DB Btree key comparison function.
"""
import shutil
import sys, os, re
from io import StringIO
import tempfile
from . import test_all
from io import StringIO
import unittest
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbshelve
except ImportError:
# For Python 2.3
from bsddb import db, dbshelve
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
from .test_all import db, dbshelve, test_support, \
get_new_environment_path, get_new_database_path
lexical_cmp = cmp
def lowercase_cmp(left, right):
return cmp (str(left, encoding='ascii').lower(),
str(right, encoding='ascii').lower())
return cmp (left.lower(), right.lower())
def make_reverse_comparator (cmp):
def reverse (left, right, delegate=cmp):
return - delegate (left, right)
return reverse
_expected_lexical_test_data = [s.encode('ascii') for s in
('', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf')]
_expected_lowercase_test_data = [s.encode('ascii') for s in
('', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP')]
def CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
_expected_lexical_test_data = ['', 'CCCP', 'a', 'aaa', 'b', 'c', 'cccce', 'ccccf']
_expected_lowercase_test_data = ['', 'a', 'aaa', 'b', 'c', 'CC', 'cccce', 'ccccf', 'CCCP']
class ComparatorTests (unittest.TestCase):
def comparator_test_helper (self, comparator, expected_data):
data = expected_data[:]
data.sort (key=CmpToKey(comparator))
import sys
if sys.version_info[0] < 3 :
if sys.version_info[:3] < (2, 4, 0):
data.sort(comparator)
else :
data.sort(cmp=comparator)
else : # Insertion Sort. Please, improve
data2 = []
for i in data :
for j, k in enumerate(data2) :
r = comparator(k, i)
if r == 1 :
data2.insert(j, i)
break
else :
data2.append(i)
data = data2
self.failUnless (data == expected_data,
"comparator `%s' is not right: %s vs. %s"
% (comparator, expected_data, data))
@ -71,30 +67,24 @@ class AbstractBtreeKeyCompareTestCase (unittest.TestCase):
def setUp (self):
self.filename = self.__class__.__name__ + '.db'
homeDir = os.path.join (tempfile.gettempdir(), 'db_home%d'%os.getpid())
self.homeDir = homeDir
try:
os.mkdir (homeDir)
except os.error:
pass
env = db.DBEnv ()
self.homeDir = get_new_environment_path()
env = db.DBEnv()
env.open (self.homeDir,
db.DB_CREATE | db.DB_INIT_MPOOL
| db.DB_INIT_LOCK | db.DB_THREAD)
self.env = env
def tearDown (self):
self.closeDB ()
self.closeDB()
if self.env is not None:
self.env.close ()
self.env.close()
self.env = None
test_support.rmtree(self.homeDir)
def addDataToDB (self, data):
i = 0
for item in data:
self.db.put (item, str(i).encode("ascii"))
self.db.put (item, str (i))
i = i + 1
def createDB (self, key_comparator):
@ -128,10 +118,10 @@ class AbstractBtreeKeyCompareTestCase (unittest.TestCase):
self.failUnless (index < len (expected),
"to many values returned from cursor")
self.failUnless (expected[index] == key,
"expected value %r at %d but got %r"
"expected value `%s' at %d but got `%s'"
% (expected[index], index, key))
index = index + 1
rec = curs.next ()
rec = next(curs)
self.failUnless (index == len (expected),
"not enough values returned from cursor")
finally:
@ -158,10 +148,10 @@ class BtreeKeyCompareTestCase (AbstractBtreeKeyCompareTestCase):
def socialist_comparator (l, r):
return 0
self.createDB (socialist_comparator)
self.addDataToDB ([b'b', b'a', b'd'])
self.addDataToDB (['b', 'a', 'd'])
# all things being equal the first key will be the only key
# in the database... (with the last key's value fwiw)
self.finishTest ([b'b'])
self.finishTest (['b'])
class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase):
@ -200,9 +190,9 @@ class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase):
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if not successRe.search(errorOut):
self.fail("unexpected stderr output: %r" % errorOut)
errorOut = temp.getvalue()
if not successRe.search(errorOut):
self.fail("unexpected stderr output:\n"+errorOut)
def _test_compare_function_exception (self):
self.startTest ()
@ -213,7 +203,7 @@ class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase):
raise RuntimeError("i'm a naughty comparison function")
self.createDB (bad_comparator)
#print "\n*** test should print 2 uncatchable tracebacks ***"
self.addDataToDB ([b'a', b'b', b'c']) # this should raise, but...
self.addDataToDB (['a', 'b', 'c']) # this should raise, but...
self.finishTest ()
def test_compare_function_exception(self):
@ -231,7 +221,7 @@ class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase):
return l
self.createDB (bad_comparator)
#print "\n*** test should print 2 errors about returning an int ***"
self.addDataToDB ([b'a', b'b', b'c']) # this should raise, but...
self.addDataToDB (['a', 'b', 'c']) # this should raise, but...
self.finishTest ()
def test_compare_function_bad_return(self):
@ -250,7 +240,7 @@ class BtreeExceptionsTestCase (AbstractBtreeKeyCompareTestCase):
self.createDB (my_compare)
try:
self.db.set_bt_compare (my_compare)
assert False, "this set should fail"
self.assert_(0, "this set should fail")
except RuntimeError as msg:
pass
@ -259,10 +249,9 @@ def test_suite ():
res = unittest.TestSuite ()
res.addTest (unittest.makeSuite (ComparatorTests))
if db.version () >= (3, 3, 11):
res.addTest (unittest.makeSuite (BtreeExceptionsTestCase))
res.addTest (unittest.makeSuite (BtreeKeyCompareTestCase))
res.addTest (unittest.makeSuite (BtreeExceptionsTestCase))
res.addTest (unittest.makeSuite (BtreeKeyCompareTestCase))
return res
if __name__ == '__main__':
unittest.main (defaultTest = 'test_suite')
unittest.main (defaultTest = 'suite')

View File

@ -3,18 +3,16 @@ Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import sys, os
import os, string
import unittest
import tempfile
from bsddb.test.test_all import verbose
from bsddb import db, hashopen, btopen, rnopen
from .test_all import db, hashopen, btopen, rnopen, verbose, \
get_new_database_path
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
self.filename = get_new_database_path()
def tearDown(self):
try:
@ -36,31 +34,31 @@ class CompatibilityTestCase(unittest.TestCase):
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x].encode("ascii")
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print('%s %s %s' % getTest)
assert getTest[1] == b'quick', 'data mismatch!'
self.assertEqual(getTest[1], 'quick', 'data mismatch!')
rv = f.set_location(3)
if rv != (3, b'brown'):
if rv != (3, 'brown'):
self.fail('recno database set_location failed: '+repr(rv))
f[25] = b'twenty-five'
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = b'twenty'
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[b'a string']
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
@ -70,7 +68,7 @@ class CompatibilityTestCase(unittest.TestCase):
if verbose:
print(rec)
try:
rec = f.next()
rec = next(f)
except KeyError:
break
@ -96,42 +94,42 @@ class CompatibilityTestCase(unittest.TestCase):
else:
if verbose: print("truth test: false")
f[b'0'] = b''
f[b'a'] = b'Guido'
f[b'b'] = b'van'
f[b'c'] = b'Rossum'
f[b'd'] = b'invented'
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
# 'e' intentionally left out
f[b'f'] = b'Python'
f['f'] = 'Python'
if verbose:
print('%s %s %s' % (f['a'], f['b'], f['c']))
if verbose:
print('key ordering...')
start = f.set_location(f.first()[0])
if start != (b'0', b''):
if start != ('0', ''):
self.fail("incorrect first() result: "+repr(start))
while 1:
try:
rec = f.next()
rec = next(f)
except KeyError:
assert rec == f.last(), 'Error, last != last!'
self.assertEqual(rec, f.last(), 'Error, last <> last!')
f.previous()
break
if verbose:
print(rec)
assert f.has_key(b'f'), 'Error, missing key!'
self.assert_('f' in f, 'Error, missing key!')
# test that set_location() returns the next nearest key, value
# on btree databases and raises KeyError on others.
if factory == btopen:
e = f.set_location(b'e')
if e != (b'f', b'Python'):
e = f.set_location('e')
if e != ('f', 'Python'):
self.fail('wrong key,value returned: '+repr(e))
else:
try:
e = f.set_location(b'e')
e = f.set_location('e')
except KeyError:
pass
else:
@ -155,17 +153,17 @@ class CompatibilityTestCase(unittest.TestCase):
if verbose:
print('modification...')
f = factory(self.filename, 'w')
f[b'd'] = b'discovered'
f['d'] = 'discovered'
if verbose:
print('access...')
for key in f.keys():
for key in list(f.keys()):
word = f[key]
if verbose:
print(word)
def noRec(f):
rec = f[b'no such key']
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):

View File

@ -1,16 +1,8 @@
import unittest
import tempfile
import sys, os, glob
import shutil
import tempfile
from bsddb import db
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
import os, glob
from .test_all import db, test_support, get_new_environment_path, \
get_new_database_path
#----------------------------------------------------------------------
@ -19,11 +11,7 @@ class pget_bugTestCase(unittest.TestCase):
db_name = 'test-cursor_pget.db'
def setUp(self):
self.homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
try:
os.mkdir(self.homeDir)
except os.error:
pass
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
self.primary_db = db.DB(self.env)
@ -32,9 +20,9 @@ class pget_bugTestCase(unittest.TestCase):
self.secondary_db.set_flags(db.DB_DUP)
self.secondary_db.open(self.db_name, 'secondary', db.DB_BTREE, db.DB_CREATE)
self.primary_db.associate(self.secondary_db, lambda key, data: data)
self.primary_db.put(b'salad', b'eggs')
self.primary_db.put(b'spam', b'ham')
self.primary_db.put(b'omelet', b'eggs')
self.primary_db.put('salad', 'eggs')
self.primary_db.put('spam', 'ham')
self.primary_db.put('omelet', 'eggs')
def tearDown(self):
@ -49,11 +37,11 @@ class pget_bugTestCase(unittest.TestCase):
def test_pget(self):
cursor = self.secondary_db.cursor()
self.assertEquals((b'eggs', b'salad', b'eggs'), cursor.pget(key=b'eggs', flags=db.DB_SET))
self.assertEquals((b'eggs', b'omelet', b'eggs'), cursor.pget(db.DB_NEXT_DUP))
self.assertEquals(('eggs', 'salad', 'eggs'), cursor.pget(key='eggs', flags=db.DB_SET))
self.assertEquals(('eggs', 'omelet', 'eggs'), cursor.pget(db.DB_NEXT_DUP))
self.assertEquals(None, cursor.pget(db.DB_NEXT_DUP))
self.assertEquals((b'ham', b'spam', b'ham'), cursor.pget(b'ham', b'spam', flags=db.DB_SET))
self.assertEquals(('ham', 'spam', 'ham'), cursor.pget('ham', 'spam', flags=db.DB_SET))
self.assertEquals(None, cursor.pget(db.DB_NEXT_DUP))
cursor.close()

View File

@ -1,21 +1,9 @@
import shutil
import sys, os
import os, string
import unittest
import tempfile
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbobj
except ImportError:
# For Python 2.3
from bsddb import db, dbobj
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
from .test_all import db, dbobj, test_support, get_new_environment_path, \
get_new_database_path
#----------------------------------------------------------------------
@ -24,10 +12,7 @@ class dbobjTestCase(unittest.TestCase):
db_name = 'test-dbobj.db'
def setUp(self):
homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
self.homeDir = homeDir
try: os.mkdir(homeDir)
except os.error: pass
self.homeDir = get_new_environment_path()
def tearDown(self):
if hasattr(self, 'db'):
@ -40,18 +25,18 @@ class dbobjTestCase(unittest.TestCase):
class TestDBEnv(dbobj.DBEnv): pass
class TestDB(dbobj.DB):
def put(self, key, *args, **kwargs):
key = key.decode("ascii").upper().encode("ascii")
key = key.upper()
# call our parent classes put method with an upper case key
return dbobj.DB.put(self, key, *args, **kwargs)
return dbobj.DB.put(*(self, key) + args, **kwargs)
self.env = TestDBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
self.db = TestDB(self.env)
self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE)
self.db.put(b'spam', b'eggs')
assert self.db.get(b'spam') == None, \
"overridden dbobj.DB.put() method failed [1]"
assert self.db.get(b'SPAM') == b'eggs', \
"overridden dbobj.DB.put() method failed [2]"
self.db.put('spam', 'eggs')
self.assertEqual(self.db.get('spam'), None,
"overridden dbobj.DB.put() method failed [1]")
self.assertEqual(self.db.get('SPAM'), 'eggs',
"overridden dbobj.DB.put() method failed [2]")
self.db.close()
self.env.close()
@ -61,14 +46,14 @@ class dbobjTestCase(unittest.TestCase):
self.db = dbobj.DB(self.env)
self.db.open(self.db_name+'02', db.DB_HASH, db.DB_CREATE)
# __setitem__
self.db[b'spam'] = b'eggs'
self.db['spam'] = 'eggs'
# __len__
assert len(self.db) == 1
self.assertEqual(len(self.db), 1)
# __getitem__
assert self.db[b'spam'] == b'eggs'
self.assertEqual(self.db['spam'], 'eggs')
# __del__
del self.db[b'spam']
assert self.db.get(b'spam') == None, "dbobj __del__ failed"
del self.db['spam']
self.assertEqual(self.db.get('spam'), None, "dbobj __del__ failed")
self.db.close()
self.env.close()

View File

@ -2,19 +2,14 @@
TestCases for checking dbShelve objects.
"""
import os
import shutil
import tempfile, random
import os, string
import random
import unittest
from bsddb import db, dbshelve
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
from .test_all import db, dbshelve, test_support, verbose, \
get_new_environment_path, get_new_database_path
from bsddb.test.test_all import verbose
#----------------------------------------------------------------------
@ -22,43 +17,44 @@ from bsddb.test.test_all import verbose
# We want the objects to be comparable so we can test dbshelve.values
# later on.
class DataClass:
def __init__(self):
self.value = random.random()
def __repr__(self):
return "DataClass(%r)" % self.value
def __repr__(self) : # For Python 3.0 comparison
return "DataClass %f" %self.value
def __eq__(self, other):
value = self.value
if isinstance(other, DataClass):
other = other.value
return value == other
def __cmp__(self, other): # For Python 2.x comparison
return cmp(self.value, other)
def __lt__(self, other):
value = self.value
if isinstance(other, DataClass):
other = other.value
return value < other
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
class DBShelveTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
import sys
if sys.version_info[0] >= 3 :
from .test_all import do_proxy_db_py3k
self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
self.filename = get_new_database_path()
self.do_open()
def tearDown(self):
import sys
if sys.version_info[0] >= 3 :
from .test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
self.do_close()
test_support.unlink(self.filename)
def mk(self, key):
"""Turn key into an appropriate key type for this db"""
# override in child class for RECNO
return key.encode("ascii")
import sys
if sys.version_info[0] < 3 :
return key
else :
return bytes(key, "iso8859-1") # 8 bits
def populateDB(self, d):
for x in letters:
for x in string.letters:
d[self.mk('S' + x)] = 10 * x # add a string
d[self.mk('I' + x)] = ord(x) # add an integer
d[self.mk('L' + x)] = [x] * 10 # add a list
@ -86,19 +82,13 @@ class DBShelveTestCase(unittest.TestCase):
print("Running %s.test01_basics..." % self.__class__.__name__)
self.populateDB(self.d)
if verbose:
print(1, self.d.keys())
self.d.sync()
if verbose:
print(2, self.d.keys())
self.do_close()
self.do_open()
if verbose:
print(3, self.d.keys())
d = self.d
l = len(d)
k = d.keys()
k = list(d.keys())
s = d.stat()
f = d.fd()
@ -107,30 +97,37 @@ class DBShelveTestCase(unittest.TestCase):
print("keys:", k)
print("stats:", s)
self.assertFalse(d.has_key(self.mk('bad key')))
self.assertTrue(d.has_key(self.mk('IA')), d.keys())
self.assertTrue(d.has_key(self.mk('OA')))
self.assertEqual(0, self.mk('bad key') in d)
self.assertEqual(1, self.mk('IA') in d)
self.assertEqual(1, self.mk('OA') in d)
d.delete(self.mk('IA'))
del d[self.mk('OA')]
self.assertFalse(d.has_key(self.mk('IA')))
self.assertFalse(d.has_key(self.mk('OA')))
self.assertEqual(0, self.mk('IA') in d)
self.assertEqual(0, self.mk('OA') in d)
self.assertEqual(len(d), l-2)
values = []
for key in d.keys():
for key in list(d.keys()):
value = d[key]
values.append(value)
if verbose:
print("%s: %s" % (key, value))
self.checkrec(key, value)
dbvalues = sorted(d.values(), key=lambda x: (str(type(x)), x))
self.assertEqual(len(dbvalues), len(d.keys()))
values.sort(key=lambda x: (str(type(x)), x))
self.assertEqual(values, dbvalues, "%r != %r" % (values, dbvalues))
dbvalues = list(d.values())
self.assertEqual(len(dbvalues), len(list(d.keys())))
import sys
if sys.version_info[0] < 3 :
values.sort()
dbvalues.sort()
self.assertEqual(values, dbvalues)
else : # XXX: Convert all to strings. Please, improve
values.sort(key=lambda x : str(x))
dbvalues.sort(key=lambda x : str(x))
self.assertEqual(repr(values), repr(dbvalues))
items = d.items()
items = list(d.items())
self.assertEqual(len(items), len(values))
for key, value in items:
@ -138,16 +135,16 @@ class DBShelveTestCase(unittest.TestCase):
self.assertEqual(d.get(self.mk('bad key')), None)
self.assertEqual(d.get(self.mk('bad key'), None), None)
self.assertEqual(d.get(self.mk('bad key'), b'a string'), b'a string')
self.assertEqual(d.get(self.mk('bad key'), 'a string'), 'a string')
self.assertEqual(d.get(self.mk('bad key'), [1, 2, 3]), [1, 2, 3])
d.set_get_returns_none(0)
self.assertRaises(db.DBNotFoundError, d.get, self.mk('bad key'))
d.set_get_returns_none(1)
d.put(self.mk('new key'), b'new data')
self.assertEqual(d.get(self.mk('new key')), b'new data')
self.assertEqual(d[self.mk('new key')], b'new data')
d.put(self.mk('new key'), 'new data')
self.assertEqual(d.get(self.mk('new key')), 'new data')
self.assertEqual(d[self.mk('new key')], 'new data')
@ -165,10 +162,11 @@ class DBShelveTestCase(unittest.TestCase):
while rec is not None:
count = count + 1
if verbose:
print(repr(rec))
print(rec)
key, value = rec
self.checkrec(key, value)
rec = c.next()
# Hack to avoid conversion by 2to3 tool
rec = getattr(c, "next")()
del c
self.assertEqual(count, len(d))
@ -191,6 +189,7 @@ class DBShelveTestCase(unittest.TestCase):
self.checkrec(key, value)
del c
def test03_append(self):
# NOTE: this is overridden in RECNO subclass, don't change its name.
if verbose:
@ -198,31 +197,44 @@ class DBShelveTestCase(unittest.TestCase):
print("Running %s.test03_append..." % self.__class__.__name__)
self.assertRaises(dbshelve.DBShelveError,
self.d.append, b'unit test was here')
self.d.append, 'unit test was here')
def checkrec(self, key, value):
# override this in a subclass if the key type is different
x = key[1:]
if key[0:1] == b'S':
self.assertEquals(type(value), str)
self.assertEquals(value, 10 * x.decode("ascii"))
elif key[0:1] == b'I':
self.assertEquals(type(value), int)
self.assertEquals(value, ord(x))
import sys
if sys.version_info[0] >= 3 :
if isinstance(key, bytes) :
key = key.decode("iso8859-1") # 8 bits
elif key[0:1] == b'L':
self.assertEquals(type(value), list)
self.assertEquals(value, [x.decode("ascii")] * 10)
x = key[1]
if key[0] == 'S':
self.assertEqual(type(value), str)
self.assertEqual(value, 10 * x)
elif key[0:1] == b'O':
self.assertEquals(value.S, 10 * x.decode("ascii"))
self.assertEquals(value.I, ord(x))
self.assertEquals(value.L, [x.decode("ascii")] * 10)
elif key[0] == 'I':
self.assertEqual(type(value), int)
self.assertEqual(value, ord(x))
elif key[0] == 'L':
self.assertEqual(type(value), list)
self.assertEqual(value, [x] * 10)
elif key[0] == 'O':
import sys
if sys.version_info[0] < 3 :
from types import InstanceType
self.assertEqual(type(value), InstanceType)
else :
self.assertEqual(type(value), DataClass)
self.assertEqual(value.S, 10 * x)
self.assertEqual(value.I, ord(x))
self.assertEqual(value.L, [x] * 10)
else:
self.fail('Unknown key type, fix the test')
self.assert_(0, 'Unknown key type, fix the test')
#----------------------------------------------------------------------
@ -258,19 +270,12 @@ class ThreadHashShelveTestCase(BasicShelveTestCase):
#----------------------------------------------------------------------
class BasicEnvShelveTestCase(DBShelveTestCase):
def setUp(self):
self.homeDir = tempfile.mkdtemp()
self.filename = 'dbshelve_db_file.db'
self.do_open()
def do_open(self):
self.homeDir = homeDir = os.path.join(
tempfile.gettempdir(), 'db_home%d'%os.getpid())
try: os.mkdir(homeDir)
except os.error: pass
self.env = db.DBEnv()
self.env.open(self.homeDir, self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE)
self.env.open(self.homeDir,
self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE)
self.filename = os.path.split(self.filename)[1]
self.d = dbshelve.DBShelf(self.env)
self.d.open(self.filename, self.dbtype, self.dbflags)
@ -280,7 +285,15 @@ class BasicEnvShelveTestCase(DBShelveTestCase):
self.env.close()
def setUp(self) :
self.homeDir = get_new_environment_path()
DBShelveTestCase.setUp(self)
def tearDown(self):
import sys
if sys.version_info[0] >= 3 :
from .test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
self.do_close()
test_support.rmtree(self.homeDir)
@ -327,7 +340,7 @@ class RecNoShelveTestCase(BasicShelveTestCase):
def mk(self, key):
if key not in self.key_map:
self.key_map[key] = self.key_pool.pop(0)
self.intkey_map[self.key_map[key]] = key.encode('ascii')
self.intkey_map[self.key_map[key]] = key
return self.key_map[key]
def checkrec(self, intkey, value):
@ -339,14 +352,14 @@ class RecNoShelveTestCase(BasicShelveTestCase):
print('\n', '-=' * 30)
print("Running %s.test03_append..." % self.__class__.__name__)
self.d[1] = b'spam'
self.d[5] = b'eggs'
self.assertEqual(6, self.d.append(b'spam'))
self.assertEqual(7, self.d.append(b'baked beans'))
self.assertEqual(b'spam', self.d.get(6))
self.assertEqual(b'spam', self.d.get(1))
self.assertEqual(b'baked beans', self.d.get(7))
self.assertEqual(b'eggs', self.d.get(5))
self.d[1] = 'spam'
self.d[5] = 'eggs'
self.assertEqual(6, self.d.append('spam'))
self.assertEqual(7, self.d.append('baked beans'))
self.assertEqual('spam', self.d.get(6))
self.assertEqual('spam', self.d.get(1))
self.assertEqual('baked beans', self.d.get(7))
self.assertEqual('eggs', self.d.get(5))
#----------------------------------------------------------------------

View File

@ -20,25 +20,16 @@
#
# $Id$
import sys, os, re
import pickle
import tempfile
import os, re
try:
import pickle
pickle = pickle
except ImportError:
import pickle
import unittest
from bsddb.test.test_all import verbose
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbtables
except ImportError:
# For Python 2.3
from bsddb import db, dbtables
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
from .test_all import db, dbtables, test_support, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
@ -46,16 +37,21 @@ class TableDBTestCase(unittest.TestCase):
db_name = 'test-table.db'
def setUp(self):
homeDir = tempfile.mkdtemp()
self.testHomeDir = homeDir
try: os.mkdir(homeDir)
except os.error: pass
import sys
if sys.version_info[0] >= 3 :
from .test_all import do_proxy_db_py3k
self._flag_proxy_db_py3k = do_proxy_db_py3k(False)
self.testHomeDir = get_new_environment_path()
self.tdb = dbtables.bsdTableDB(
filename='tabletest.db', dbhome=homeDir, create=1)
filename='tabletest.db', dbhome=self.testHomeDir, create=1)
def tearDown(self):
self.tdb.close()
import sys
if sys.version_info[0] >= 3 :
from .test_all import do_proxy_db_py3k
do_proxy_db_py3k(self._flag_proxy_db_py3k)
test_support.rmtree(self.testHomeDir)
def test01(self):
@ -66,21 +62,26 @@ class TableDBTestCase(unittest.TestCase):
except dbtables.TableDBError:
pass
self.tdb.CreateTable(tabname, [colname])
try:
import sys
if sys.version_info[0] < 3 :
self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159, 1)})
except Exception:
import traceback
traceback.print_exc()
else :
self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159,
1).decode("iso8859-1")}) # 8 bits
if verbose:
self.tdb._db_print()
values = self.tdb.Select(
tabname, [colname], conditions={colname: None})
values = list(values)
colval = pickle.loads(values[0][colname])
self.assertTrue(colval > 3.141 and colval < 3.142)
import sys
if sys.version_info[0] < 3 :
colval = pickle.loads(values[0][colname])
else :
colval = pickle.loads(bytes(values[0][colname], "iso8859-1"))
self.assert_(colval > 3.141)
self.assert_(colval < 3.142)
def test02(self):
@ -88,11 +89,23 @@ class TableDBTestCase(unittest.TestCase):
col0 = 'coolness factor'
col1 = 'but can it fly?'
col2 = 'Species'
testinfo = [
{col0: pickle.dumps(8, 1), col1: b'no', col2: b'Penguin'},
{col0: pickle.dumps(-1, 1), col1: b'no', col2: b'Turkey'},
{col0: pickle.dumps(9, 1), col1: b'yes', col2: b'SR-71A Blackbird'}
]
import sys
if sys.version_info[0] < 3 :
testinfo = [
{col0: pickle.dumps(8, 1), col1: 'no', col2: 'Penguin'},
{col0: pickle.dumps(-1, 1), col1: 'no', col2: 'Turkey'},
{col0: pickle.dumps(9, 1), col1: 'yes', col2: 'SR-71A Blackbird'}
]
else :
testinfo = [
{col0: pickle.dumps(8, 1).decode("iso8859-1"),
col1: 'no', col2: 'Penguin'},
{col0: pickle.dumps(-1, 1).decode("iso8859-1"),
col1: 'no', col2: 'Turkey'},
{col0: pickle.dumps(9, 1).decode("iso8859-1"),
col1: 'yes', col2: 'SR-71A Blackbird'}
]
try:
self.tdb.Drop(tabname)
@ -102,19 +115,24 @@ class TableDBTestCase(unittest.TestCase):
for row in testinfo :
self.tdb.Insert(tabname, row)
values = self.tdb.Select(tabname, [col2],
conditions={col0: lambda x: pickle.loads(x) >= 8})
values = list(values)
import sys
if sys.version_info[0] < 3 :
values = self.tdb.Select(tabname, [col2],
conditions={col0: lambda x: pickle.loads(x) >= 8})
else :
values = self.tdb.Select(tabname, [col2],
conditions={col0: lambda x:
pickle.loads(bytes(x, "iso8859-1")) >= 8})
self.assertEquals(len(values), 2)
if values[0]['Species'] == b'Penguin' :
self.assertEquals(values[1]['Species'], b'SR-71A Blackbird')
elif values[0]['Species'] == b'SR-71A Blackbird' :
self.assertEquals(values[1]['Species'], b'Penguin')
self.assertEqual(len(values), 2)
if values[0]['Species'] == 'Penguin' :
self.assertEqual(values[1]['Species'], 'SR-71A Blackbird')
elif values[0]['Species'] == 'SR-71A Blackbird' :
self.assertEqual(values[1]['Species'], 'Penguin')
else :
if verbose:
print("values= %r" % (values,))
self.fail("Wrong values returned!")
raise RuntimeError("Wrong values returned!")
def test03(self):
tabname = "test03"
@ -140,57 +158,55 @@ class TableDBTestCase(unittest.TestCase):
{'a': "",
'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
'f': "Zero"})
self.fail("exception not raised")
self.fail('Expected an exception')
except dbtables.TableDBError:
pass
try:
self.tdb.Select(tabname, [], conditions={'foo': '123'})
self.fail("exception not raised")
self.fail('Expected an exception')
except dbtables.TableDBError:
pass
self.tdb.Insert(tabname,
{'a': b'42',
'b': b'bad',
'c': b'meep',
'e': b'Fuzzy wuzzy was a bear'})
{'a': '42',
'b': "bad",
'c': "meep",
'e': 'Fuzzy wuzzy was a bear'})
self.tdb.Insert(tabname,
{'a': b'581750',
'b': b'good',
'd': b'bla',
'c': b'black',
'e': b'fuzzy was here'})
{'a': '581750',
'b': "good",
'd': "bla",
'c': "black",
'e': 'fuzzy was here'})
self.tdb.Insert(tabname,
{'a': b'800000',
'b': b'good',
'd': b'bla',
'c': b'black',
'e': b'Fuzzy wuzzy is a bear'})
{'a': '800000',
'b': "good",
'd': "bla",
'c': "black",
'e': 'Fuzzy wuzzy is a bear'})
if verbose:
self.tdb._db_print()
# this should return two rows
values = self.tdb.Select(tabname, ['b', 'a', 'd'],
conditions={'e': re.compile(b'wuzzy').search,
'a': re.compile(b'^[0-9]+$').match})
self.assertEquals(len(values), 2)
conditions={'e': re.compile('wuzzy').search,
'a': re.compile('^[0-9]+$').match})
self.assertEqual(len(values), 2)
# now lets delete one of them and try again
self.tdb.Delete(tabname, conditions={'b': dbtables.ExactCond('good')})
values = self.tdb.Select(
tabname, ['a', 'd', 'b'],
conditions={'e': dbtables.PrefixCond('Fuzzy')})
values = list(values)
self.assertEquals(len(values), 1)
self.assertEquals(values[0]['d'], None)
self.assertEqual(len(values), 1)
self.assertEqual(values[0]['d'], None)
values = self.tdb.Select(tabname, ['b'],
conditions={'c': lambda c: c.decode("ascii") == 'meep'})
values = list(values)
self.assertEquals(len(values), 1)
self.assertEquals(values[0]['b'], b"bad")
conditions={'c': lambda c: c == 'meep'})
self.assertEqual(len(values), 1)
self.assertEqual(values[0]['b'], "bad")
def test04_MultiCondSelect(self):
@ -203,19 +219,19 @@ class TableDBTestCase(unittest.TestCase):
try:
self.tdb.Insert(tabname,
{'a': b"",
{'a': "",
'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
'f': b"Zero"})
self.fail("exception not raised")
'f': "Zero"})
self.fail('Expected an exception')
except dbtables.TableDBError:
pass
self.tdb.Insert(tabname, {'a': b"A", 'b': b"B", 'c': b"C",
'd': b"D", 'e': b"E"})
self.tdb.Insert(tabname, {'a': b"-A", 'b': b"-B", 'c': b"-C",
'd': b"-D", 'e': b"-E"})
self.tdb.Insert(tabname, {'a': b"A-", 'b': b"B-", 'c': b"C-",
'd': b"D-", 'e': b"E-"})
self.tdb.Insert(tabname, {'a': "A", 'b': "B", 'c': "C", 'd': "D",
'e': "E"})
self.tdb.Insert(tabname, {'a': "-A", 'b': "-B", 'c': "-C", 'd': "-D",
'e': "-E"})
self.tdb.Insert(tabname, {'a': "A-", 'b': "B-", 'c': "C-", 'd': "D-",
'e': "E-"})
if verbose:
self.tdb._db_print()
@ -230,7 +246,7 @@ class TableDBTestCase(unittest.TestCase):
'a': dbtables.ExactCond('A'),
'd': dbtables.PrefixCond('-')
} )
self.assertEquals(len(values), 0, values)
self.assertEqual(len(values), 0, values)
def test_CreateOrExtend(self):
@ -240,9 +256,9 @@ class TableDBTestCase(unittest.TestCase):
tabname, ['name', 'taste', 'filling', 'alcohol content', 'price'])
try:
self.tdb.Insert(tabname,
{'taste': b'crap',
'filling': b'no',
'is it Guinness?': b'no'})
{'taste': 'crap',
'filling': 'no',
'is it Guinness?': 'no'})
self.fail("Insert should've failed due to bad column name")
except:
pass
@ -250,11 +266,11 @@ class TableDBTestCase(unittest.TestCase):
['name', 'taste', 'is it Guinness?'])
# these should both succeed as the table should contain the union of both sets of columns.
self.tdb.Insert(tabname, {'taste': b'crap', 'filling': b'no',
'is it Guinness?': b'no'})
self.tdb.Insert(tabname, {'taste': b'great', 'filling': b'yes',
'is it Guinness?': b'yes',
'name': b'Guinness'})
self.tdb.Insert(tabname, {'taste': 'crap', 'filling': 'no',
'is it Guinness?': 'no'})
self.tdb.Insert(tabname, {'taste': 'great', 'filling': 'yes',
'is it Guinness?': 'yes',
'name': 'Guinness'})
def test_CondObjs(self):
@ -262,33 +278,31 @@ class TableDBTestCase(unittest.TestCase):
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e', 'p'])
self.tdb.Insert(tabname, {'a': b"the letter A",
'b': b"the letter B",
'c': b"is for cookie"})
self.tdb.Insert(tabname, {'a': b"is for aardvark",
'e': b"the letter E",
'c': b"is for cookie",
'd': b"is for dog"})
self.tdb.Insert(tabname, {'a': b"the letter A",
'e': b"the letter E",
'c': b"is for cookie",
'p': b"is for Python"})
self.tdb.Insert(tabname, {'a': "the letter A",
'b': "the letter B",
'c': "is for cookie"})
self.tdb.Insert(tabname, {'a': "is for aardvark",
'e': "the letter E",
'c': "is for cookie",
'd': "is for dog"})
self.tdb.Insert(tabname, {'a': "the letter A",
'e': "the letter E",
'c': "is for cookie",
'p': "is for Python"})
values = self.tdb.Select(
tabname, ['p', 'e'],
conditions={'e': dbtables.PrefixCond('the l')})
values = list(values)
self.assertEquals(len(values), 2)
self.assertEquals(values[0]['e'], values[1]['e'])
self.assertNotEquals(values[0]['p'], values[1]['p'])
self.assertEqual(len(values), 2, values)
self.assertEqual(values[0]['e'], values[1]['e'], values)
self.assertNotEqual(values[0]['p'], values[1]['p'], values)
values = self.tdb.Select(
tabname, ['d', 'a'],
conditions={'a': dbtables.LikeCond('%aardvark%')})
values = list(values)
self.assertEquals(len(values), 1)
self.assertEquals(values[0]['d'], b"is for dog")
self.assertEquals(values[0]['a'], b"is for aardvark")
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['d'], "is for dog", values)
self.assertEqual(values[0]['a'], "is for aardvark", values)
values = self.tdb.Select(tabname, None,
{'b': dbtables.Cond(),
@ -297,10 +311,9 @@ class TableDBTestCase(unittest.TestCase):
'd':dbtables.ExactCond('is for dog'),
'c':dbtables.PrefixCond('is for'),
'p':lambda s: not s})
values = list(values)
self.assertEquals(len(values), 1)
self.assertEquals(values[0]['d'], b"is for dog")
self.assertEquals(values[0]['a'], b"is for aardvark")
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['d'], "is for dog", values)
self.assertEqual(values[0]['a'], "is for aardvark", values)
def test_Delete(self):
tabname = "test_Delete"
@ -310,30 +323,30 @@ class TableDBTestCase(unittest.TestCase):
# fail if it encountered any rows that did not have values in
# every column.
# Hunted and Squashed by <Donwulff> (Jukka Santala - donwulff@nic.fi)
self.tdb.Insert(tabname, {'x': b'X1', 'y':b'Y1'})
self.tdb.Insert(tabname, {'x': b'X2', 'y':b'Y2', 'z': b'Z2'})
self.tdb.Insert(tabname, {'x': 'X1', 'y':'Y1'})
self.tdb.Insert(tabname, {'x': 'X2', 'y':'Y2', 'z': 'Z2'})
self.tdb.Delete(tabname, conditions={'x': dbtables.PrefixCond('X')})
values = self.tdb.Select(tabname, ['y'],
conditions={'x': dbtables.PrefixCond('X')})
self.assertEquals(len(values), 0)
self.assertEqual(len(values), 0)
def test_Modify(self):
tabname = "test_Modify"
self.tdb.CreateTable(tabname, ['Name', 'Type', 'Access'])
self.tdb.Insert(tabname, {'Name': b'Index to MP3 files.doc',
'Type': b'Word', 'Access': b'8'})
self.tdb.Insert(tabname, {'Name': b'Nifty.MP3', 'Access': b'1'})
self.tdb.Insert(tabname, {'Type': b'Unknown', 'Access': b'0'})
self.tdb.Insert(tabname, {'Name': 'Index to MP3 files.doc',
'Type': 'Word', 'Access': '8'})
self.tdb.Insert(tabname, {'Name': 'Nifty.MP3', 'Access': '1'})
self.tdb.Insert(tabname, {'Type': 'Unknown', 'Access': '0'})
def set_type(type):
if type is None:
return b'MP3'
if type == None:
return 'MP3'
return type
def increment_access(count):
return str(int(count)+1).encode('ascii')
return str(int(count)+1)
def remove_value(value):
return None
@ -351,7 +364,7 @@ class TableDBTestCase(unittest.TestCase):
try:
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%')},
mappings={'Access': b'What is your quest?'})
mappings={'Access': 'What is your quest?'})
except TypeError:
# success, the string value in mappings isn't callable
pass
@ -362,27 +375,24 @@ class TableDBTestCase(unittest.TestCase):
values = self.tdb.Select(
tabname, None,
conditions={'Type': dbtables.ExactCond('Unknown')})
values = list(values)
self.assertEquals(len(values), 1)
self.assertEquals(values[0]['Name'], None)
self.assertEquals(values[0]['Access'], None)
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['Name'], None, values)
self.assertEqual(values[0]['Access'], None, values)
# Modify value by select conditions
values = self.tdb.Select(
tabname, None,
conditions={'Name': dbtables.ExactCond('Nifty.MP3')})
values = list(values)
self.assertEquals(len(values), 1)
self.assertEquals(values[0]['Type'], b"MP3")
self.assertEquals(values[0]['Access'], b"2")
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['Type'], "MP3", values)
self.assertEqual(values[0]['Access'], "2", values)
# Make sure change applied only to select conditions
values = self.tdb.Select(
tabname, None, conditions={'Name': dbtables.LikeCond('%doc%')})
values = list(values)
self.assertEquals(len(values), 1)
self.assertEquals(values[0]['Type'], b"Word")
self.assertEquals(values[0]['Access'], b"9")
self.assertEqual(len(values), 1, values)
self.assertEqual(values[0]['Type'], "Word", values)
self.assertEqual(values[0]['Access'], "9", values)
def test_suite():

View File

@ -0,0 +1,163 @@
"""TestCases for distributed transactions.
"""
import os
import unittest
from .test_all import db, test_support, get_new_environment_path, \
get_new_database_path
try :
a=set()
except : # Python 2.3
from sets import Set as set
else :
del a
from .test_all import verbose
#----------------------------------------------------------------------
class DBTxn_distributed(unittest.TestCase):
num_txns=1234
nosync=True
must_open_db=False
def _create_env(self, must_open_db) :
self.dbenv = db.DBEnv()
self.dbenv.set_tx_max(self.num_txns)
self.dbenv.set_lk_max_lockers(self.num_txns*2)
self.dbenv.set_lk_max_locks(self.num_txns*2)
self.dbenv.set_lk_max_objects(self.num_txns*2)
if self.nosync :
self.dbenv.set_flags(db.DB_TXN_NOSYNC,True)
self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_THREAD |
db.DB_RECOVER |
db.DB_INIT_TXN | db.DB_INIT_LOG | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK, 0o666)
self.db = db.DB(self.dbenv)
self.db.set_re_len(db.DB_XIDDATASIZE)
if must_open_db :
if db.version() > (4,1) :
txn=self.dbenv.txn_begin()
self.db.open(self.filename,
db.DB_QUEUE, db.DB_CREATE | db.DB_THREAD, 0o666,
txn=txn)
txn.commit()
else :
self.db.open(self.filename,
db.DB_QUEUE, db.DB_CREATE | db.DB_THREAD, 0o666)
def setUp(self) :
self.homeDir = get_new_environment_path()
self.filename = "test"
return self._create_env(must_open_db=True)
def _destroy_env(self):
if self.nosync or (db.version()[:2] == (4,6)): # Known bug
self.dbenv.log_flush()
self.db.close()
self.dbenv.close()
def tearDown(self):
self._destroy_env()
test_support.rmtree(self.homeDir)
def _recreate_env(self,must_open_db) :
self._destroy_env()
self._create_env(must_open_db)
def test01_distributed_transactions(self) :
txns=set()
adapt = lambda x : x
import sys
if sys.version_info[0] >= 3 :
adapt = lambda x : bytes(x, "ascii")
# Create transactions, "prepare" them, and
# let them be garbage collected.
for i in range(self.num_txns) :
txn = self.dbenv.txn_begin()
gid = "%%%dd" %db.DB_XIDDATASIZE
gid = adapt(gid %i)
self.db.put(i, gid, txn=txn, flags=db.DB_APPEND)
txns.add(gid)
txn.prepare(gid)
del txn
self._recreate_env(self.must_open_db)
# Get "to be recovered" transactions but
# let them be garbage collected.
recovered_txns=self.dbenv.txn_recover()
self.assertEquals(self.num_txns,len(recovered_txns))
for gid,txn in recovered_txns :
self.assert_(gid in txns)
del txn
del recovered_txns
self._recreate_env(self.must_open_db)
# Get "to be recovered" transactions. Commit, abort and
# discard them.
recovered_txns=self.dbenv.txn_recover()
self.assertEquals(self.num_txns,len(recovered_txns))
discard_txns=set()
committed_txns=set()
state=0
for gid,txn in recovered_txns :
if state==0 or state==1:
committed_txns.add(gid)
txn.commit()
elif state==2 :
txn.abort()
elif state==3 :
txn.discard()
discard_txns.add(gid)
state=-1
state+=1
del txn
del recovered_txns
self._recreate_env(self.must_open_db)
# Verify the discarded transactions are still
# around, and dispose them.
recovered_txns=self.dbenv.txn_recover()
self.assertEquals(len(discard_txns),len(recovered_txns))
for gid,txn in recovered_txns :
txn.abort()
del txn
del recovered_txns
self._recreate_env(must_open_db=True)
# Be sure there are not pending transactions.
# Check also database size.
recovered_txns=self.dbenv.txn_recover()
self.assert_(len(recovered_txns)==0)
self.assertEquals(len(committed_txns),self.db.stat()["nkeys"])
class DBTxn_distributedSYNC(DBTxn_distributed):
nosync=False
class DBTxn_distributed_must_open_db(DBTxn_distributed):
must_open_db=True
class DBTxn_distributedSYNC_must_open_db(DBTxn_distributed):
nosync=False
must_open_db=True
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4,5) :
suite.addTest(unittest.makeSuite(DBTxn_distributed))
suite.addTest(unittest.makeSuite(DBTxn_distributedSYNC))
if db.version() >= (4,6) :
suite.addTest(unittest.makeSuite(DBTxn_distributed_must_open_db))
suite.addTest(unittest.makeSuite(DBTxn_distributedSYNC_must_open_db))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')

View File

@ -0,0 +1,195 @@
"""TestCases for checking that it does not segfault when a DBEnv object
is closed before its DB objects.
"""
import os
import unittest
from .test_all import db, test_support, verbose, get_new_environment_path, get_new_database_path
# We're going to get warnings in this module about trying to close the db when
# its env is already closed. Let's just ignore those.
try:
import warnings
except ImportError:
pass
else:
warnings.filterwarnings('ignore',
message='DB could not be closed in',
category=RuntimeWarning)
#----------------------------------------------------------------------
class DBEnvClosedEarlyCrash(unittest.TestCase):
def setUp(self):
self.homeDir = get_new_environment_path()
self.filename = "test"
def tearDown(self):
test_support.rmtree(self.homeDir)
def test01_close_dbenv_before_db(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0o666)
d = db.DB(dbenv)
d2 = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
self.assertRaises(db.DBNoSuchFileError, d2.open,
self.filename+"2", db.DB_BTREE, db.DB_THREAD, 0o666)
d.put("test","this is a test")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
dbenv.close() # This "close" should close the child db handle also
self.assertRaises(db.DBError, d.get, "test")
def test02_close_dbenv_before_dbcursor(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0o666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
d.put("test","this is a test")
d.put("test2","another test")
d.put("test3","another one")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
c=d.cursor()
c.first()
next(c)
d.close() # This "close" should close the child db handle also
# db.close should close the child cursor
self.assertRaises(db.DBError,c.__next__)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
c=d.cursor()
c.first()
next(c)
dbenv.close()
# The "close" should close the child db handle also, with cursors
self.assertRaises(db.DBError, c.__next__)
def test03_close_db_before_dbcursor_without_env(self):
import os.path
path=os.path.join(self.homeDir,self.filename)
d = db.DB()
d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
d.put("test","this is a test")
d.put("test2","another test")
d.put("test3","another one")
self.assertEqual(d.get("test"), "this is a test", "put!=get")
c=d.cursor()
c.first()
next(c)
d.close()
# The "close" should close the child db handle also
self.assertRaises(db.DBError, c.__next__)
def test04_close_massive(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0o666)
dbs=[db.DB(dbenv) for i in range(16)]
cursors=[]
for i in dbs :
i.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
dbs[10].put("test","this is a test")
dbs[10].put("test2","another test")
dbs[10].put("test3","another one")
self.assertEqual(dbs[4].get("test"), "this is a test", "put!=get")
for i in dbs :
cursors.extend([i.cursor() for j in range(32)])
for i in dbs[::3] :
i.close()
for i in cursors[::3] :
i.close()
# Check for missing exception in DB! (after DB close)
self.assertRaises(db.DBError, dbs[9].get, "test")
# Check for missing exception in DBCursor! (after DB close)
self.assertRaises(db.DBError, cursors[101].first)
cursors[80].first()
next(cursors[80])
dbenv.close() # This "close" should close the child db handle also
# Check for missing exception! (after DBEnv close)
self.assertRaises(db.DBError, cursors[80].__next__)
def test05_close_dbenv_delete_db_success(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0o666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
dbenv.close() # This "close" should close the child db handle also
del d
try:
import gc
except ImportError:
gc = None
if gc:
# force d.__del__ [DB_dealloc] to be called
gc.collect()
def test06_close_txn_before_dup_cursor(self) :
dbenv = db.DBEnv()
dbenv.open(self.homeDir,db.DB_INIT_TXN | db.DB_INIT_MPOOL |
db.DB_INIT_LOG | db.DB_CREATE)
d = db.DB(dbenv)
txn = dbenv.txn_begin()
if db.version() < (4,1) :
d.open(self.filename, dbtype = db.DB_HASH, flags = db.DB_CREATE)
else :
d.open(self.filename, dbtype = db.DB_HASH, flags = db.DB_CREATE,
txn=txn)
d.put("XXX", "yyy", txn=txn)
txn.commit()
txn = dbenv.txn_begin()
c1 = d.cursor(txn)
c2 = c1.dup()
self.assertEquals(("XXX", "yyy"), c1.first())
import warnings
# Not interested in warnings about implicit close.
warnings.simplefilter("ignore")
txn.commit()
warnings.resetwarnings()
self.assertRaises(db.DBCursorClosedError, c2.first)
if db.version() > (4,3,0) :
def test07_close_db_before_sequence(self):
import os.path
path=os.path.join(self.homeDir,self.filename)
d = db.DB()
d.open(path, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0o666)
dbs=db.DBSequence(d)
d.close() # This "close" should close the child DBSequence also
dbs.close() # If not closed, core dump (in Berkeley DB 4.6.*)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBEnvClosedEarlyCrash))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')

View File

@ -2,22 +2,17 @@
TestCases for checking set_get_returns_none.
"""
import sys, os, string
import tempfile
from pprint import pprint
import os, string
import unittest
from bsddb import db
from .test_all import db, verbose, get_new_database_path
from bsddb.test.test_all import verbose
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
#----------------------------------------------------------------------
class GetReturnsNoneTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
self.filename = get_new_database_path()
def tearDown(self):
try:
@ -31,25 +26,24 @@ class GetReturnsNoneTestCase(unittest.TestCase):
d.open(self.filename, db.DB_BTREE, db.DB_CREATE)
d.set_get_returns_none(1)
for x in letters:
x = x.encode("ascii")
for x in string.letters:
d.put(x, x * 40)
data = d.get(b'bad key')
assert data == None
data = d.get('bad key')
self.assertEqual(data, None)
data = d.get(b'a')
assert data == b'a'*40
data = d.get(string.letters[0])
self.assertEqual(data, string.letters[0]*40)
count = 0
c = d.cursor()
rec = c.first()
while rec:
count = count + 1
rec = c.next()
rec = next(c)
assert rec == None
assert count == 52
self.assertEqual(rec, None)
self.assertEqual(count, len(string.letters))
c.close()
d.close()
@ -60,15 +54,14 @@ class GetReturnsNoneTestCase(unittest.TestCase):
d.open(self.filename, db.DB_BTREE, db.DB_CREATE)
d.set_get_returns_none(0)
for x in letters:
x = x.encode("ascii")
for x in string.letters:
d.put(x, x * 40)
self.assertRaises(db.DBNotFoundError, d.get, b'bad key')
self.assertRaises(KeyError, d.get, b'bad key')
self.assertRaises(db.DBNotFoundError, d.get, 'bad key')
self.assertRaises(KeyError, d.get, 'bad key')
data = d.get(b'a')
assert data == b'a'*40
data = d.get(string.letters[0])
self.assertEqual(data, string.letters[0]*40)
count = 0
exceptionHappened = 0
@ -77,14 +70,14 @@ class GetReturnsNoneTestCase(unittest.TestCase):
while rec:
count = count + 1
try:
rec = c.next()
rec = next(c)
except db.DBNotFoundError: # end of the records
exceptionHappened = 1
break
assert rec != None
assert exceptionHappened
assert count == 52
self.assertNotEqual(rec, None)
self.assert_(exceptionHappened)
self.assertEqual(count, len(string.letters))
c.close()
d.close()

View File

@ -1,27 +1,12 @@
"""TestCases for using the DB.join and DBCursor.join_item methods.
"""
import shutil
import sys, os
import tempfile
import time
from pprint import pprint
try:
from threading import Thread, current_thread
have_threads = 1
except ImportError:
have_threads = 0
import os
import unittest
from bsddb.test.test_all import verbose
from bsddb import db, dbshelve, StringKeys
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
from .test_all import db, dbshelve, test_support, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
@ -44,18 +29,12 @@ ColorIndex = [
('black', "shotgun"),
]
def ASCII(s):
return s.encode("ascii")
class JoinTestCase(unittest.TestCase):
keytype = ''
def setUp(self):
self.filename = self.__class__.__name__ + '.db'
homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
self.homeDir = homeDir
try: os.mkdir(homeDir)
except os.error: pass
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK )
@ -72,13 +51,13 @@ class JoinTestCase(unittest.TestCase):
# create and populate primary index
priDB = db.DB(self.env)
priDB.open(self.filename, "primary", db.DB_BTREE, db.DB_CREATE)
[priDB.put(ASCII(k),ASCII(v)) for k,v in ProductIndex]
list(map(lambda t, priDB=priDB: priDB.put(*t), ProductIndex))
# create and populate secondary index
secDB = db.DB(self.env)
secDB.set_flags(db.DB_DUP | db.DB_DUPSORT)
secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE)
[secDB.put(ASCII(k),ASCII(v)) for k,v in ColorIndex]
list(map(lambda t, secDB=secDB: secDB.put(*t), ColorIndex))
sCursor = None
jCursor = None
@ -87,19 +66,19 @@ class JoinTestCase(unittest.TestCase):
sCursor = secDB.cursor()
# Don't do the .set() in an assert, or you can get a bogus failure
# when running python -O
tmp = sCursor.set(b'red')
assert tmp
tmp = sCursor.set('red')
self.assert_(tmp)
# FIXME: jCursor doesn't properly hold a reference to its
# cursors, if they are closed before jcursor is used it
# can cause a crash.
jCursor = priDB.join([sCursor])
if jCursor.get(0) != (b'apple', b"Convenience Store"):
if jCursor.get(0) != ('apple', "Convenience Store"):
self.fail("join cursor positioned wrong")
if jCursor.join_item() != b'chainsaw':
if jCursor.join_item() != 'chainsaw':
self.fail("DBCursor.join_item returned wrong item")
if jCursor.get(0)[0] != b'strawberry':
if jCursor.get(0)[0] != 'strawberry':
self.fail("join cursor returned wrong thing")
if jCursor.get(0): # there were only three red items to return
self.fail("join cursor returned too many items")

View File

@ -2,39 +2,31 @@
TestCases for testing the locking sub-system.
"""
import sys
import tempfile
import time
try:
from threading import Thread, current_thread
have_threads = 1
except ImportError:
have_threads = 0
import unittest
from bsddb.test.test_all import verbose
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db
except ImportError:
# For Python 2.3
from bsddb import db
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
from .test_all import db, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
if have_threads :
from threading import Thread
import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
class LockingTestCase(unittest.TestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self):
self.homeDir = tempfile.mkdtemp('.test_lock')
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.env.open(self.homeDir, db.DB_THREAD | db.DB_INIT_MPOOL |
db.DB_INIT_LOCK | db.DB_CREATE)
@ -53,15 +45,13 @@ class LockingTestCase(unittest.TestCase):
anID = self.env.lock_id()
if verbose:
print("locker ID: %s" % anID)
lock = self.env.lock_get(anID, b"some locked thing", db.DB_LOCK_WRITE)
lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
if verbose:
print("Aquired lock: %s" % lock)
time.sleep(1)
self.env.lock_put(lock)
if verbose:
print("Released lock: %s" % lock)
if db.version() >= (4,0):
self.env.lock_id_free(anID)
self.env.lock_id_free(anID)
def test02_threaded(self):
@ -71,34 +61,35 @@ class LockingTestCase(unittest.TestCase):
threads = []
threads.append(Thread(target = self.theThread,
args=(5, db.DB_LOCK_WRITE)))
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_READ)))
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_READ)))
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_WRITE)))
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_READ)))
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_READ)))
args=(db.DB_LOCK_READ,)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_WRITE)))
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_WRITE)))
args=(db.DB_LOCK_WRITE,)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_WRITE)))
args=(db.DB_LOCK_WRITE,)))
for t in threads:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in threads:
t.join()
def _DISABLED_test03_lock_timeout(self):
# Disabled as this test crashes the python interpreter built in
# debug mode with:
# Fatal Python error: UNREF invalid object
# the error occurs as marked below.
def test03_lock_timeout(self):
self.env.set_timeout(0, db.DB_SET_LOCK_TIMEOUT)
self.env.set_timeout(0, db.DB_SET_TXN_TIMEOUT)
self.env.set_timeout(123456, db.DB_SET_LOCK_TIMEOUT)
@ -117,7 +108,11 @@ class LockingTestCase(unittest.TestCase):
deadlock_detection.end=False
deadlock_detection.count=0
t=Thread(target=deadlock_detection)
t.daemon = True
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.env.set_timeout(100000, db.DB_SET_LOCK_TIMEOUT)
anID = self.env.lock_id()
@ -125,8 +120,6 @@ class LockingTestCase(unittest.TestCase):
self.assertNotEqual(anID, anID2)
lock = self.env.lock_get(anID, "shared lock", db.DB_LOCK_WRITE)
start_time=time.time()
# FIXME: I see the UNREF crash as the interpreter trys to exit
# from this call to lock_get.
self.assertRaises(db.DBLockNotGrantedError,
self.env.lock_get,anID2, "shared lock", db.DB_LOCK_READ)
end_time=time.time()
@ -135,15 +128,19 @@ class LockingTestCase(unittest.TestCase):
self.env.lock_put(lock)
t.join()
if db.version() >= (4,0):
self.env.lock_id_free(anID)
self.env.lock_id_free(anID2)
self.env.lock_id_free(anID)
self.env.lock_id_free(anID2)
if db.version() >= (4,6):
self.assertTrue(deadlock_detection.count>0)
def theThread(self, sleepTime, lockType):
name = current_thread().name
def theThread(self, lockType):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if lockType == db.DB_LOCK_WRITE:
lt = "write"
else:
@ -153,17 +150,16 @@ class LockingTestCase(unittest.TestCase):
if verbose:
print("%s: locker ID: %s" % (name, anID))
lock = self.env.lock_get(anID, b"some locked thing", lockType)
if verbose:
print("%s: Aquired %s lock: %s" % (name, lt, lock))
for i in range(1000) :
lock = self.env.lock_get(anID, "some locked thing", lockType)
if verbose:
print("%s: Aquired %s lock: %s" % (name, lt, lock))
time.sleep(sleepTime)
self.env.lock_put(lock)
if verbose:
print("%s: Released %s lock: %s" % (name, lt, lock))
self.env.lock_put(lock)
if verbose:
print("%s: Released %s lock: %s" % (name, lt, lock))
if db.version() >= (4,0):
self.env.lock_id_free(anID)
self.env.lock_id_free(anID)
#----------------------------------------------------------------------

View File

@ -2,34 +2,16 @@
"""
import os
import shutil
import sys
import unittest
import tempfile
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbshelve, hashopen
except ImportError:
# For the bundled bsddb
from bsddb import db, dbshelve, hashopen
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
from .test_all import db, dbshelve, hashopen, test_support, get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class MiscTestCase(unittest.TestCase):
def setUp(self):
self.filename = self.__class__.__name__ + '.db'
homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
self.homeDir = homeDir
try:
os.mkdir(homeDir)
except OSError:
pass
self.homeDir = get_new_environment_path()
def tearDown(self):
test_support.unlink(self.filename)
@ -38,14 +20,18 @@ class MiscTestCase(unittest.TestCase):
def test01_badpointer(self):
dbs = dbshelve.open(self.filename)
dbs.close()
self.assertRaises(db.DBError, dbs.get, b"foo")
self.assertRaises(db.DBError, dbs.get, "foo")
def test02_db_home(self):
env = db.DBEnv()
# check for crash fixed when db_home is used before open()
assert env.db_home is None
self.assert_(env.db_home is None)
env.open(self.homeDir, db.DB_CREATE)
assert self.homeDir == env.db_home
import sys
if sys.version_info[0] < 3 :
self.assertEqual(self.homeDir, env.db_home)
else :
self.assertEqual(bytes(self.homeDir, "ascii"), env.db_home)
def test03_repr_closed_db(self):
db = hashopen(self.filename)
@ -53,6 +39,18 @@ class MiscTestCase(unittest.TestCase):
rp = repr(db)
self.assertEquals(rp, "{}")
def test04_repr_db(self) :
db = hashopen(self.filename)
d = {}
for i in range(100) :
db[repr(i)] = repr(100*i)
d[repr(i)] = repr(100*i)
db.close()
db = hashopen(self.filename)
rp = repr(db)
self.assertEquals(rp, repr(d))
db.close()
# http://sourceforge.net/tracker/index.php?func=detail&aid=1708868&group_id=13900&atid=313900
#
# See the bug report for details.
@ -60,65 +58,65 @@ class MiscTestCase(unittest.TestCase):
# The problem was that make_key_dbt() was not allocating a copy of
# string keys but FREE_DBT() was always being told to free it when the
# database was opened with DB_THREAD.
def test04_double_free_make_key_dbt(self):
def test05_double_free_make_key_dbt(self):
try:
db1 = db.DB()
db1.open(self.filename, None, db.DB_BTREE,
db.DB_CREATE | db.DB_THREAD)
curs = db1.cursor()
t = curs.get(b"/foo", db.DB_SET)
t = curs.get("/foo", db.DB_SET)
# double free happened during exit from DBC_get
finally:
db1.close()
os.unlink(self.filename)
test_support.unlink(self.filename)
def test05_key_with_null_bytes(self):
def test06_key_with_null_bytes(self):
try:
db1 = db.DB()
db1.open(self.filename, None, db.DB_HASH, db.DB_CREATE)
db1[b'a'] = b'eh?'
db1[b'a\x00'] = b'eh zed.'
db1[b'a\x00a'] = b'eh zed eh?'
db1[b'aaa'] = b'eh eh eh!'
keys = db1.keys()
db1['a'] = 'eh?'
db1['a\x00'] = 'eh zed.'
db1['a\x00a'] = 'eh zed eh?'
db1['aaa'] = 'eh eh eh!'
keys = list(db1.keys())
keys.sort()
self.assertEqual([b'a', b'a\x00', b'a\x00a', b'aaa'], keys)
self.assertEqual(db1[b'a'], b'eh?')
self.assertEqual(db1[b'a\x00'], b'eh zed.')
self.assertEqual(db1[b'a\x00a'], b'eh zed eh?')
self.assertEqual(db1[b'aaa'], b'eh eh eh!')
self.assertEqual(['a', 'a\x00', 'a\x00a', 'aaa'], keys)
self.assertEqual(db1['a'], 'eh?')
self.assertEqual(db1['a\x00'], 'eh zed.')
self.assertEqual(db1['a\x00a'], 'eh zed eh?')
self.assertEqual(db1['aaa'], 'eh eh eh!')
finally:
db1.close()
os.unlink(self.filename)
test_support.unlink(self.filename)
def test_DB_set_flags_persists(self):
def test07_DB_set_flags_persists(self):
if db.version() < (4,2):
# The get_flags API required for this to work is only available
# in BerkeleyDB >= 4.2
# in Berkeley DB >= 4.2
return
try:
db1 = db.DB()
db1.set_flags(db.DB_DUPSORT)
db1.open(self.filename, db.DB_HASH, db.DB_CREATE)
db1[b'a'] = b'eh'
db1[b'a'] = b'A'
self.assertEqual([(b'a', b'A')], db1.items())
db1.put(b'a', b'Aa')
self.assertEqual([(b'a', b'A'), (b'a', b'Aa')], db1.items())
db1['a'] = 'eh'
db1['a'] = 'A'
self.assertEqual([('a', 'A')], list(db1.items()))
db1.put('a', 'Aa')
self.assertEqual([('a', 'A'), ('a', 'Aa')], list(db1.items()))
db1.close()
db1 = db.DB()
# no set_flags call, we're testing that it reads and obeys
# the flags on open.
db1.open(self.filename, db.DB_HASH)
self.assertEqual([(b'a', b'A'), (b'a', b'Aa')], db1.items())
self.assertEqual([('a', 'A'), ('a', 'Aa')], list(db1.items()))
# if it read the flags right this will replace all values
# for key b'a' instead of adding a new one. (as a dict should)
db1[b'a'] = b'new A'
self.assertEqual([(b'a', b'new A')], db1.items())
# for key 'a' instead of adding a new one. (as a dict should)
db1['a'] = 'new A'
self.assertEqual([('a', 'new A')], list(db1.items()))
finally:
db1.close()
os.unlink(self.filename)
test_support.unlink(self.filename)
#----------------------------------------------------------------------

View File

@ -1,23 +1,13 @@
import shutil
import sys, os
import os
import pickle
import tempfile
import unittest
import tempfile
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db
except ImportError as e:
# For Python 2.3
from bsddb import db
try:
from bsddb3 import test_support
import pickle
except ImportError:
from test import support as test_support
pickle = None
import unittest
from .test_all import db, test_support, get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
@ -26,10 +16,7 @@ class pickleTestCase(unittest.TestCase):
db_name = 'test-dbobj.db'
def setUp(self):
homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
self.homeDir = homeDir
try: os.mkdir(homeDir)
except os.error: pass
self.homeDir = get_new_environment_path()
def tearDown(self):
if hasattr(self, 'db'):
@ -43,10 +30,10 @@ class pickleTestCase(unittest.TestCase):
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
self.db = db.DB(self.env)
self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE)
self.db.put(b'spam', b'eggs')
self.assertEqual(self.db[b'spam'], b'eggs')
self.db.put('spam', 'eggs')
self.assertEqual(self.db['spam'], 'eggs')
try:
self.db.put(b'spam', b'ham', flags=db.DB_NOOVERWRITE)
self.db.put('spam', 'ham', flags=db.DB_NOOVERWRITE)
except db.DBError as egg:
pickledEgg = pickle.dumps(egg)
#print repr(pickledEgg)
@ -54,7 +41,7 @@ class pickleTestCase(unittest.TestCase):
if rottenEgg.args != egg.args or type(rottenEgg) != type(egg):
raise Exception(rottenEgg, '!=', egg)
else:
self.fail("where's my DBError exception?!?")
raise Exception("where's my DBError exception?!?")
self.db.close()
self.env.close()
@ -62,6 +49,10 @@ class pickleTestCase(unittest.TestCase):
def test01_pickle_DBError(self):
self._base_test_pickle_DBError(pickle=pickle)
if pickle:
def test02_cPickle_DBError(self):
self._base_test_pickle_DBError(pickle=pickle)
#----------------------------------------------------------------------
def test_suite():

View File

@ -2,27 +2,17 @@
TestCases for exercising a Queue DB.
"""
import sys, os, string
import tempfile
import os, string
from pprint import pprint
import unittest
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db
except ImportError:
# For Python 2.3
from bsddb import db
from bsddb.test.test_all import verbose
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
from .test_all import db, verbose, get_new_database_path
#----------------------------------------------------------------------
class SimpleQueueTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
self.filename = get_new_database_path()
def tearDown(self):
try:
@ -46,17 +36,17 @@ class SimpleQueueTestCase(unittest.TestCase):
print("before appends" + '-' * 30)
pprint(d.stat())
for x in letters:
d.append(x.encode('ascii') * 40)
for x in string.letters:
d.append(x * 40)
assert len(d) == 52
self.assertEqual(len(d), len(string.letters))
d.put(100, b"some more data")
d.put(101, b"and some more ")
d.put(75, b"out of order")
d.put(1, b"replacement data")
d.put(100, "some more data")
d.put(101, "and some more ")
d.put(75, "out of order")
d.put(1, "replacement data")
assert len(d) == 55
self.assertEqual(len(d), len(string.letters)+3)
if verbose:
print("before close" + '-' * 30)
@ -71,7 +61,11 @@ class SimpleQueueTestCase(unittest.TestCase):
print("after open" + '-' * 30)
pprint(d.stat())
d.append(b"one more")
# Test "txn" as a positional parameter
d.append("one more", None)
# Test "txn" as a keyword parameter
d.append("another one", txn=None)
c = d.cursor()
if verbose:
@ -89,9 +83,9 @@ class SimpleQueueTestCase(unittest.TestCase):
print("after consume loop" + '-' * 30)
pprint(d.stat())
assert len(d) == 0, \
self.assertEqual(len(d), 0, \
"if you see this message then you need to rebuild " \
"BerkeleyDB 3.1.17 with the patch in patches/qam_stat.diff"
"Berkeley DB 3.1.17 with the patch in patches/qam_stat.diff")
d.close()
@ -118,17 +112,17 @@ class SimpleQueueTestCase(unittest.TestCase):
print("before appends" + '-' * 30)
pprint(d.stat())
for x in letters:
d.append(x.encode('ascii') * 40)
for x in string.letters:
d.append(x * 40)
assert len(d) == 52
self.assertEqual(len(d), len(string.letters))
d.put(100, b"some more data")
d.put(101, b"and some more ")
d.put(75, b"out of order")
d.put(1, b"replacement data")
d.put(100, "some more data")
d.put(101, "and some more ")
d.put(75, "out of order")
d.put(1, "replacement data")
assert len(d) == 55
self.assertEqual(len(d), len(string.letters)+3)
if verbose:
print("before close" + '-' * 30)
@ -144,7 +138,7 @@ class SimpleQueueTestCase(unittest.TestCase):
print("after open" + '-' * 30)
pprint(d.stat())
d.append(b"one more")
d.append("one more")
if verbose:
print("after append" + '-' * 30)

View File

@ -2,26 +2,11 @@
"""
import os
import shutil
import sys
import errno
import tempfile
from pprint import pprint
import unittest
from bsddb.test.test_all import verbose
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db
except ImportError:
# For Python 2.3
from bsddb import db
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
from .test_all import db, test_support, verbose, get_new_environment_path, get_new_database_path
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
@ -29,8 +14,13 @@ letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
#----------------------------------------------------------------------
class SimpleRecnoTestCase(unittest.TestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertFalse(self, expr, msg=None):
self.failIf(expr,msg=msg)
def setUp(self):
self.filename = tempfile.mktemp()
self.filename = get_new_database_path()
self.homeDir = None
def tearDown(self):
@ -47,9 +37,9 @@ class SimpleRecnoTestCase(unittest.TestCase):
d.open(self.filename, db.DB_RECNO, db.DB_CREATE)
for x in letters:
recno = d.append(x.encode('ascii') * 60)
assert type(recno) == type(0)
assert recno >= 1
recno = d.append(x * 60)
self.assertEqual(type(recno), type(0))
self.assert_(recno >= 1)
if verbose:
print(recno, end=' ')
@ -64,20 +54,24 @@ class SimpleRecnoTestCase(unittest.TestCase):
if verbose:
print(data)
assert type(data) == bytes
assert data == d.get(recno)
self.assertEqual(type(data), type(""))
self.assertEqual(data, d.get(recno))
try:
data = d[0] # This should raise a KeyError!?!?!
except db.DBInvalidArgError as val:
assert val.args[0] == db.EINVAL
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.EINVAL)
else :
self.assertEqual(val.args[0], db.EINVAL)
if verbose: print(val)
else:
self.fail("expected exception")
# test that has_key raises DB exceptions (fixed in pybsddb 4.3.2)
try:
d.has_key(0)
0 in d
except db.DBError as val:
pass
else:
@ -96,35 +90,35 @@ class SimpleRecnoTestCase(unittest.TestCase):
if get_returns_none:
self.fail("unexpected exception")
else:
assert data == None
self.assertEqual(data, None)
keys = d.keys()
keys = list(d.keys())
if verbose:
print(keys)
assert type(keys) == type([])
assert type(keys[0]) == type(123)
assert len(keys) == len(d)
self.assertEqual(type(keys), type([]))
self.assertEqual(type(keys[0]), type(123))
self.assertEqual(len(keys), len(d))
items = d.items()
items = list(d.items())
if verbose:
pprint(items)
assert type(items) == type([])
assert type(items[0]) == type(())
assert len(items[0]) == 2
assert type(items[0][0]) == type(123)
assert type(items[0][1]) == bytes
assert len(items) == len(d)
self.assertEqual(type(items), type([]))
self.assertEqual(type(items[0]), type(()))
self.assertEqual(len(items[0]), 2)
self.assertEqual(type(items[0][0]), type(123))
self.assertEqual(type(items[0][1]), type(""))
self.assertEqual(len(items), len(d))
assert d.has_key(25)
self.assert_(25 in d)
del d[25]
assert not d.has_key(25)
self.assertFalse(25 in d)
d.delete(13)
assert not d.has_key(13)
self.assertFalse(13 in d)
data = d.get_both(26, b"z" * 60)
assert data == b"z" * 60, 'was %r' % data
data = d.get_both(26, "z" * 60)
self.assertEqual(data, "z" * 60, 'was %r' % data)
if verbose:
print(data)
@ -137,18 +131,18 @@ class SimpleRecnoTestCase(unittest.TestCase):
while rec:
if verbose:
print(rec)
rec = c.next()
rec = next(c)
c.set(50)
rec = c.current()
if verbose:
print(rec)
c.put(-1, b"a replacement record", db.DB_CURRENT)
c.put(-1, "a replacement record", db.DB_CURRENT)
c.set(50)
rec = c.current()
assert rec == (50, b"a replacement record")
self.assertEqual(rec, (50, "a replacement record"))
if verbose:
print(rec)
@ -159,7 +153,7 @@ class SimpleRecnoTestCase(unittest.TestCase):
# test that non-existant key lookups work (and that
# DBC_set_range doesn't have a memleak under valgrind)
rec = c.set_range(999999)
assert rec == None
self.assertEqual(rec, None)
if verbose:
print(rec)
@ -171,8 +165,8 @@ class SimpleRecnoTestCase(unittest.TestCase):
c = d.cursor()
# put a record beyond the consecutive end of the recno's
d[100] = b"way out there"
assert d[100] == b"way out there"
d[100] = "way out there"
self.assertEqual(d[100], "way out there")
try:
data = d[99]
@ -187,7 +181,7 @@ class SimpleRecnoTestCase(unittest.TestCase):
if get_returns_none:
self.fail("unexpected DBKeyEmptyError exception")
else:
assert val.args[0] == db.DB_KEYEMPTY
self.assertEqual(val[0], db.DB_KEYEMPTY)
if verbose: print(val)
else:
if not get_returns_none:
@ -197,7 +191,7 @@ class SimpleRecnoTestCase(unittest.TestCase):
while rec:
if verbose:
print(rec)
rec = c.next()
rec = next(c)
c.close()
d.close()
@ -209,7 +203,7 @@ class SimpleRecnoTestCase(unittest.TestCase):
just a line in the file, but you can set a different record delimiter
if needed.
"""
homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
homeDir = get_new_environment_path()
self.homeDir = homeDir
source = os.path.join(homeDir, 'test_recno.txt')
if not os.path.isdir(homeDir):
@ -226,7 +220,7 @@ class SimpleRecnoTestCase(unittest.TestCase):
data = "The quick brown fox jumped over the lazy dog".split()
for datum in data:
d.append(datum.encode('ascii'))
d.append(datum)
d.sync()
d.close()
@ -238,15 +232,15 @@ class SimpleRecnoTestCase(unittest.TestCase):
print(data)
print(text.split('\n'))
assert text.split('\n') == data
self.assertEqual(text.split('\n'), data)
# open as a DB again
d = db.DB()
d.set_re_source(source)
d.open(self.filename, db.DB_RECNO)
d[3] = b'reddish-brown'
d[8] = b'comatose'
d[3] = 'reddish-brown'
d[8] = 'comatose'
d.sync()
d.close()
@ -257,8 +251,8 @@ class SimpleRecnoTestCase(unittest.TestCase):
print(text)
print(text.split('\n'))
assert text.split('\n') == \
"The quick reddish-brown fox jumped over the comatose dog".split()
self.assertEqual(text.split('\n'),
"The quick reddish-brown fox jumped over the comatose dog".split())
def test03_FixedLength(self):
d = db.DB()
@ -268,14 +262,18 @@ class SimpleRecnoTestCase(unittest.TestCase):
d.open(self.filename, db.DB_RECNO, db.DB_CREATE)
for x in letters:
d.append(x.encode('ascii') * 35) # These will be padded
d.append(x * 35) # These will be padded
d.append(b'.' * 40) # this one will be exact
d.append('.' * 40) # this one will be exact
try: # this one will fail
d.append(b'bad' * 20)
d.append('bad' * 20)
except db.DBInvalidArgError as val:
assert val.args[0] == db.EINVAL
import sys
if sys.version_info[0] < 3 :
self.assertEqual(val[0], db.EINVAL)
else :
self.assertEqual(val.args[0], db.EINVAL)
if verbose: print(val)
else:
self.fail("expected exception")
@ -285,7 +283,7 @@ class SimpleRecnoTestCase(unittest.TestCase):
while rec:
if verbose:
print(rec)
rec = c.next()
rec = next(c)
c.close()
d.close()

View File

@ -0,0 +1,444 @@
"""TestCases for distributed transactions.
"""
import os
import time
import unittest
from .test_all import db, test_support, have_threads, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class DBReplicationManager(unittest.TestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self) :
self.homeDirMaster = get_new_environment_path()
self.homeDirClient = get_new_environment_path()
self.dbenvMaster = db.DBEnv()
self.dbenvClient = db.DBEnv()
# Must use "DB_THREAD" because the Replication Manager will
# be executed in other threads but will use the same environment.
# http://forums.oracle.com/forums/thread.jspa?threadID=645788&tstart=0
self.dbenvMaster.open(self.homeDirMaster, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0o666)
self.dbenvClient.open(self.homeDirClient, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0o666)
self.confirmed_master=self.client_startupdone=False
def confirmed_master(a,b,c) :
if b==db.DB_EVENT_REP_MASTER :
self.confirmed_master=True
def client_startupdone(a,b,c) :
if b==db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone=True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
self.dbMaster = self.dbClient = None
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def test01_basic_replication(self) :
master_port = test_support.find_unused_port()
self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
client_port = test_support.find_unused_port()
self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
self.dbenvMaster.rep_set_nsites(2)
self.dbenvClient.rep_set_nsites(2)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_priority(0)
self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123)
self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100123)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100321)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100234)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100432)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100345)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100543)
self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvClient.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER);
self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT);
self.assertEquals(self.dbenvMaster.rep_get_nsites(),2)
self.assertEquals(self.dbenvClient.rep_get_nsites(),2)
self.assertEquals(self.dbenvMaster.rep_get_priority(),10)
self.assertEquals(self.dbenvClient.rep_get_priority(),0)
self.assertEquals(self.dbenvMaster.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
self.assertEquals(self.dbenvClient.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+10
while (time.time()<timeout) and not (self.confirmed_master and self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
d = self.dbenvMaster.repmgr_site_list()
self.assertEquals(len(d), 1)
self.assertEquals(d[0][0], "127.0.0.1")
self.assertEquals(d[0][1], client_port)
self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
(d[0][2]==db.DB_REPMGR_DISCONNECTED))
d = self.dbenvClient.repmgr_site_list()
self.assertEquals(len(d), 1)
self.assertEquals(d[0][0], "127.0.0.1")
self.assertEquals(d[0][1], master_port)
self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
(d[0][2]==db.DB_REPMGR_DISCONNECTED))
if db.version() >= (4,6) :
d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("msgs_queued" in d)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0o666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0o666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v==None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v!=None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals(None, v)
class DBBaseReplication(DBReplicationManager):
def setUp(self) :
DBReplicationManager.setUp(self)
def confirmed_master(a,b,c) :
if (b == db.DB_EVENT_REP_MASTER) or (b == db.DB_EVENT_REP_ELECTED) :
self.confirmed_master = True
def client_startupdone(a,b,c) :
if b == db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone = True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
import queue
self.m2c = queue.Queue()
self.c2m = queue.Queue()
# There are only two nodes, so we don't need to
# do any routing decision
def m2c(dbenv, control, rec, lsnp, envid, flags) :
self.m2c.put((control, rec))
def c2m(dbenv, control, rec, lsnp, envid, flags) :
self.c2m.put((control, rec))
self.dbenvMaster.rep_set_transport(13,m2c)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_transport(3,c2m)
self.dbenvClient.rep_set_priority(0)
self.assertEquals(self.dbenvMaster.rep_get_priority(),10)
self.assertEquals(self.dbenvClient.rep_get_priority(),0)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
def thread_master() :
return self.thread_do(self.dbenvMaster, self.c2m, 3,
self.master_doing_election, True)
def thread_client() :
return self.thread_do(self.dbenvClient, self.m2c, 13,
self.client_doing_election, False)
from threading import Thread
t_m=Thread(target=thread_master)
t_c=Thread(target=thread_client)
import sys
if sys.version_info[0] < 3 :
t_m.setDaemon(True)
t_c.setDaemon(True)
else :
t_m.daemon = True
t_c.daemon = True
self.t_m = t_m
self.t_c = t_c
self.dbMaster = self.dbClient = None
self.master_doing_election=[False]
self.client_doing_election=[False]
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.m2c.put(None)
self.c2m.put(None)
self.t_m.join()
self.t_c.join()
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def basic_rep_threading(self) :
self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v == None : return
env.rep_process_message(v[0], v[1], envid)
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
def test01_basic_replication(self) :
self.basic_rep_threading()
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+10
while (time.time()<timeout) and not (self.confirmed_master and
self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0o666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0o666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v==None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v!=None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals(None, v)
if db.version() >= (4,7) :
def test02_test_request(self) :
self.basic_rep_threading()
(minimum, maximum) = self.dbenvClient.rep_get_request()
self.dbenvClient.rep_set_request(minimum-1, maximum+1)
self.assertEqual(self.dbenvClient.rep_get_request(),
(minimum-1, maximum+1))
if db.version() >= (4,6) :
def test03_master_election(self) :
# Get ready to hold an election
#self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvMaster.rep_start(flags=db.DB_REP_CLIENT)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v == None : return
r = env.rep_process_message(v[0],v[1],envid)
if must_be_master and self.confirmed_master :
self.dbenvMaster.rep_start(flags = db.DB_REP_MASTER)
must_be_master = False
if r[0] == db.DB_REP_HOLDELECTION :
def elect() :
while True :
try :
env.rep_elect(2, 1)
election_status[0] = False
break
except db.DBRepUnavailError :
pass
if not election_status[0] and not self.confirmed_master :
from threading import Thread
election_status[0] = True
t=Thread(target=elect)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.client_doing_election[0] = True
while True :
try :
self.dbenvClient.rep_elect(2, 1)
self.client_doing_election[0] = False
break
except db.DBRepUnavailError :
pass
self.assertTrue(self.confirmed_master)
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4, 6) :
dbenv = db.DBEnv()
try :
dbenv.repmgr_get_ack_policy()
ReplicationManager_available=True
except :
ReplicationManager_available=False
dbenv.close()
del dbenv
if ReplicationManager_available :
suite.addTest(unittest.makeSuite(DBReplicationManager))
if have_threads :
suite.addTest(unittest.makeSuite(DBBaseReplication))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')

View File

@ -1,33 +1,19 @@
import unittest
import os
import shutil
import sys
import tempfile
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db
except ImportError:
from bsddb import db
from bsddb.test.test_all import verbose
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
from .test_all import db, test_support, get_new_environment_path, get_new_database_path
class DBSequenceTest(unittest.TestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self):
self.int_32_max = 0x100000000
self.homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
try:
os.mkdir(self.homeDir)
except os.error:
pass
tempfile.tempdir = self.homeDir
self.filename = os.path.split(tempfile.mktemp())[1]
tempfile.tempdir = None
self.homeDir = get_new_environment_path()
self.filename = "test"
self.dbenv = db.DBEnv()
self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL, 0o666)
@ -52,39 +38,39 @@ class DBSequenceTest(unittest.TestCase):
start_value = 10 * self.int_32_max
self.assertEqual(0xA00000000, start_value)
self.assertEquals(None, self.seq.init_value(start_value))
self.assertEquals(None, self.seq.open(key=b'id', txn=None, flags=db.DB_CREATE))
self.assertEquals(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE))
self.assertEquals(start_value, self.seq.get(5))
self.assertEquals(start_value + 5, self.seq.get())
def test_remove(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(None, self.seq.remove(txn=None, flags=0))
del self.seq
def test_get_key(self):
self.seq = db.DBSequence(self.d, flags=0)
key = b'foo'
key = 'foo'
self.assertEquals(None, self.seq.open(key=key, txn=None, flags=db.DB_CREATE))
self.assertEquals(key, self.seq.get_key())
def test_get_dbp(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(self.d, self.seq.get_dbp())
def test_cachesize(self):
self.seq = db.DBSequence(self.d, flags=0)
cashe_size = 10
self.assertEquals(None, self.seq.set_cachesize(cashe_size))
self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(cashe_size, self.seq.get_cachesize())
def test_flags(self):
self.seq = db.DBSequence(self.d, flags=0)
flag = db.DB_SEQ_WRAP;
self.assertEquals(None, self.seq.set_flags(flag))
self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(flag, self.seq.get_flags() & flag)
def test_range(self):
@ -92,17 +78,59 @@ class DBSequenceTest(unittest.TestCase):
seq_range = (10 * self.int_32_max, 11 * self.int_32_max - 1)
self.assertEquals(None, self.seq.set_range(seq_range))
self.seq.init_value(seq_range[0])
self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(seq_range, self.seq.get_range())
def test_stat(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEquals(None, self.seq.open(key=b'foo', txn=None, flags=db.DB_CREATE))
self.assertEquals(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
stat = self.seq.stat()
for param in ('nowait', 'min', 'max', 'value', 'current',
'flags', 'cache_size', 'last_value', 'wait'):
self.assertTrue(param in stat, "parameter %s isn't in stat info" % param)
if db.version() >= (4,7) :
# This code checks a crash solved in Berkeley DB 4.7
def test_stat_crash(self) :
d=db.DB()
d.open(None,dbtype=db.DB_HASH,flags=db.DB_CREATE) # In RAM
seq = db.DBSequence(d, flags=0)
self.assertRaises(db.DBNotFoundError, seq.open,
key='id', txn=None, flags=0)
self.assertRaises(db.DBInvalidArgError, seq.stat)
d.close()
def test_64bits(self) :
# We don't use both extremes because they are problematic
value_plus=(1<<63)-2
self.assertEquals(9223372036854775806,value_plus)
value_minus=(-1<<63)+1 # Two complement
self.assertEquals(-9223372036854775807,value_minus)
self.seq = db.DBSequence(self.d, flags=0)
self.assertEquals(None, self.seq.init_value(value_plus-1))
self.assertEquals(None, self.seq.open(key='id', txn=None,
flags=db.DB_CREATE))
self.assertEquals(value_plus-1, self.seq.get(1))
self.assertEquals(value_plus, self.seq.get(1))
self.seq.remove(txn=None, flags=0)
self.seq = db.DBSequence(self.d, flags=0)
self.assertEquals(None, self.seq.init_value(value_minus))
self.assertEquals(None, self.seq.open(key='id', txn=None,
flags=db.DB_CREATE))
self.assertEquals(value_minus, self.seq.get(1))
self.assertEquals(value_minus+1, self.seq.get(1))
def test_multiple_close(self):
self.seq = db.DBSequence(self.d)
self.seq.close() # You can close a Sequence multiple times
self.seq.close()
self.seq.close()
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4,3):

View File

@ -5,17 +5,9 @@ import os
import sys
import time
import errno
import tempfile
from pprint import pprint
from random import random
DASH = b'-'
try:
from threading import Thread, current_thread
have_threads = True
except ImportError:
have_threads = False
DASH = '-'
try:
WindowsError
@ -24,19 +16,16 @@ except NameError:
pass
import unittest
from bsddb.test.test_all import verbose
from .test_all import db, dbutils, test_support, verbose, have_threads, \
get_new_environment_path, get_new_database_path
try:
# For Pythons w/distutils pybsddb
from bsddb3 import db, dbutils
except ImportError:
# For Python 2.3
from bsddb import db, dbutils
try:
from bsddb3 import test_support
except ImportError:
from test import support as test_support
if have_threads :
from threading import Thread
import sys
if sys.version_info[0] < 3 :
from threading import currentThread
else :
from threading import current_thread as currentThread
#----------------------------------------------------------------------
@ -47,16 +36,16 @@ class BaseThreadedTestCase(unittest.TestCase):
dbsetflags = 0
envflags = 0
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self):
if verbose:
dbutils._deadlock_VerboseFile = sys.stdout
homeDir = os.path.join(tempfile.gettempdir(), 'db_home%d'%os.getpid())
self.homeDir = homeDir
try:
os.mkdir(homeDir)
except OSError as e:
if e.errno != errno.EEXIST: raise
self.homeDir = get_new_environment_path()
self.env = db.DBEnv()
self.setEnvOpts()
self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
@ -78,33 +67,6 @@ class BaseThreadedTestCase(unittest.TestCase):
def makeData(self, key):
return DASH.join([key] * 5)
def _writerThread(self, *args, **kwargs):
raise RuntimeError("must override this in a subclass")
def _readerThread(self, *args, **kwargs):
raise RuntimeError("must override this in a subclass")
def writerThread(self, *args, **kwargs):
try:
self._writerThread(*args, **kwargs)
except db.DBLockDeadlockError:
if verbose:
print(current_thread().name, 'died from', e)
else:
if verbose:
print(current_thread().name, "finished.")
def readerThread(self, *args, **kwargs):
try:
self._readerThread(*args, **kwargs)
except db.DBLockDeadlockError as e:
if verbose:
print(current_thread().name, 'died from', e)
else:
if verbose:
print(current_thread().name, "finished.")
#----------------------------------------------------------------------
@ -121,60 +83,91 @@ class ConcurrentDataStoreBase(BaseThreadedTestCase):
print('\n', '-=' * 30)
print("Running %s.test01_1WriterMultiReaders..." % \
self.__class__.__name__)
print('Using:', self.homeDir, self.filename)
threads = []
wt = Thread(target = self.writerThread,
args = (self.d, self.records),
name = 'the writer',
)#verbose = verbose)
threads.append(wt)
keys=list(range(self.records))
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers = []
for x in range(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
threads.append(rt)
import sys
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
for t in threads:
writers=[]
for x in range(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in threads:
for t in writers:
t.join()
for t in readers:
t.join()
def _writerThread(self, d, howMany):
name = current_thread().name
start = 0
stop = howMany
if verbose:
print(name+": creating records", start, "-", stop)
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
for x in range(start, stop):
key = ('%04d' % x).encode("ascii")
d.put(key, self.makeData(key))
if verbose and x > start and x % 50 == 0:
print(name+": records", start, "-", x, "finished")
if verbose:
print("%s: creating records %d - %d" % (name, start, stop))
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print("%s: records %d - %d finished" % (name, start, x))
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print("%s: finished creating records" % name)
## # Each write-cursor will be exclusive, the only one that can update the DB...
## if verbose: print "%s: deleting a few records" % name
## c = d.cursor(flags = db.DB_WRITECURSOR)
## for x in range(10):
## key = int(random() * howMany) + start
## key = '%04d' % key
## if d.has_key(key):
## c.set(key)
## c.delete()
if verbose:
print("%s: thread finished" % name)
## c.close()
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
def _readerThread(self, d, readerNum):
time.sleep(0.01 * readerNum)
name = current_thread().name
for loop in range(5):
for i in range(5) :
c = d.cursor()
count = 0
rec = c.first()
@ -182,24 +175,26 @@ class ConcurrentDataStoreBase(BaseThreadedTestCase):
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
rec = next(c)
if verbose:
print(name+": found", count, "records")
print("%s: found %d records" % (name, count))
c.close()
time.sleep(0.05)
if verbose:
print("%s: thread finished" % name)
class BTreeConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_BTREE
writers = 1
writers = 2
readers = 10
records = 1000
class HashConcurrentDataStore(ConcurrentDataStoreBase):
dbtype = db.DB_HASH
writers = 1
readers = 0
writers = 2
readers = 10
records = 1000
@ -208,8 +203,8 @@ class HashConcurrentDataStore(ConcurrentDataStoreBase):
class SimpleThreadedBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
readers = 5
writers = 3
readers = 10
writers = 2
records = 1000
def setEnvOpts(self):
@ -220,87 +215,98 @@ class SimpleThreadedBase(BaseThreadedTestCase):
print('\n', '-=' * 30)
print("Running %s.test02_SimpleLocks..." % self.__class__.__name__)
threads = []
for x in range(self.writers):
wt = Thread(target = self.writerThread,
args = (self.d, self.records, x),
name = 'writer %d' % x,
)#verbose = verbose)
threads.append(wt)
keys=list(range(self.records))
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers = []
for x in range(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
threads.append(rt)
import sys
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
for t in threads:
writers = []
for x in range(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
a.sort() # Generate conflicts
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
for t in writers:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in threads:
for t in writers:
t.join()
for t in readers:
t.join()
def _writerThread(self, d, howMany, writerNum):
name = current_thread().name
start = howMany * writerNum
stop = howMany * (writerNum + 1) - 1
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
if verbose:
print("%s: creating records %d - %d" % (name, start, stop))
# create a bunch of records
for x in range(start, stop):
key = ('%04d' % x).encode("ascii")
count=len(keys)//len(readers)
count2=count
for x in keys :
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=20)
max_retries=12)
if verbose and x % 100 == 0:
print("%s: records %d - %d finished" % (name, start, x))
# do a bit or reading too
if random() <= 0.05:
for y in range(start, x):
key = ('%04d' % x).encode("ascii")
data = dbutils.DeadlockWrap(d.get, key, max_retries=20)
self.assertEqual(data, self.makeData(key))
# flush them
try:
dbutils.DeadlockWrap(d.sync, max_retries=20)
except db.DBIncompleteError as val:
if verbose:
print("could not complete sync()...")
# read them back, deleting a few
for x in range(start, stop):
key = ('%04d' % x).encode("ascii")
data = dbutils.DeadlockWrap(d.get, key, max_retries=20)
if verbose and x % 100 == 0:
print("%s: fetched record (%s, %s)" % (name, key, data))
self.assertEqual(data, self.makeData(key))
if random() <= 0.10:
dbutils.DeadlockWrap(d.delete, key, max_retries=20)
if verbose:
print("%s: deleted record %s" % (name, key))
count2-=1
if not count2 :
readers.pop().start()
count2=count
if verbose:
print("%s: thread finished" % name)
def _readerThread(self, d, readerNum):
time.sleep(0.01 * readerNum)
name = current_thread().name
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
for loop in range(5):
c = d.cursor()
count = 0
rec = dbutils.DeadlockWrap(c.first, max_retries=20)
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = dbutils.DeadlockWrap(c.next, max_retries=20)
if verbose:
print("%s: found %d records" % (name, count))
c.close()
time.sleep(0.05)
c = d.cursor()
count = 0
rec = dbutils.DeadlockWrap(c.first, max_retries=10)
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = dbutils.DeadlockWrap(c.__next__, max_retries=10)
if verbose:
print("%s: found %d records" % (name, count))
c.close()
if verbose:
print("%s: thread finished" % name)
@ -340,120 +346,118 @@ class ThreadedTransactionsBase(BaseThreadedTestCase):
print("Running %s.test03_ThreadedTransactions..." % \
self.__class__.__name__)
threads = []
for x in range(self.writers):
wt = Thread(target = self.writerThread,
args = (self.d, self.records, x),
name = 'writer %d' % x,
)#verbose = verbose)
threads.append(wt)
keys=list(range(self.records))
import random
random.shuffle(keys)
records_per_writer=self.records//self.writers
readers_per_writer=self.readers//self.writers
self.assertEqual(self.records,self.writers*records_per_writer)
self.assertEqual(self.readers,self.writers*readers_per_writer)
self.assertTrue((records_per_writer%readers_per_writer)==0)
readers=[]
for x in range(self.readers):
rt = Thread(target = self.readerThread,
args = (self.d, x),
name = 'reader %d' % x,
)#verbose = verbose)
threads.append(rt)
import sys
if sys.version_info[0] < 3 :
rt.setDaemon(True)
else :
rt.daemon = True
readers.append(rt)
writers = []
for x in range(self.writers):
a=keys[records_per_writer*x:records_per_writer*(x+1)]
b=readers[readers_per_writer*x:readers_per_writer*(x+1)]
wt = Thread(target = self.writerThread,
args = (self.d, a, b),
name = 'writer %d' % x,
)#verbose = verbose)
writers.append(wt)
dt = Thread(target = self.deadlockThread)
import sys
if sys.version_info[0] < 3 :
dt.setDaemon(True)
else :
dt.daemon = True
dt.start()
for t in threads:
for t in writers:
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
for t in threads:
for t in writers:
t.join()
for t in readers:
t.join()
self.doLockDetect = False
dt.join()
def doWrite(self, d, name, start, stop):
finished = False
while not finished:
def writerThread(self, d, keys, readers):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
count=len(keys)//len(readers)
while len(keys):
try:
txn = self.env.txn_begin(None, self.txnFlag)
for x in range(start, stop):
key = ('%04d' % x).encode("ascii")
keys2=keys[:count]
for x in keys2 :
key = '%04d' % x
d.put(key, self.makeData(key), txn)
if verbose and x % 100 == 0:
print("%s: records %d - %d finished" % (name, start, x))
txn.commit()
finished = True
keys=keys[count:]
readers.pop().start()
except (db.DBLockDeadlockError, db.DBLockNotGrantedError) as val:
if verbose:
print("%s: Aborting transaction (%s)" % (name, val))
print("%s: Aborting transaction (%s)" % (name, val[1]))
txn.abort()
time.sleep(0.05)
def _writerThread(self, d, howMany, writerNum):
name = current_thread().name
start = howMany * writerNum
stop = howMany * (writerNum + 1) - 1
if verbose:
print("%s: creating records %d - %d" % (name, start, stop))
step = 100
for x in range(start, stop, step):
self.doWrite(d, name, x, min(stop, x+step))
if verbose:
print("%s: finished creating records" % name)
if verbose:
print("%s: deleting a few records" % name)
finished = False
while not finished:
try:
recs = []
txn = self.env.txn_begin(None, self.txnFlag)
for x in range(10):
key = int(random() * howMany) + start
key = ('%04d' % key).encode("ascii")
data = d.get(key, None, txn, db.DB_RMW)
if data is not None:
d.delete(key, txn)
recs.append(key)
txn.commit()
finished = True
if verbose:
print("%s: deleted records %s" % (name, recs))
except (db.DBLockDeadlockError, db.DBLockNotGrantedError) as val:
if verbose:
print("%s: Aborting transaction (%s)" % (name, val))
txn.abort()
time.sleep(0.05)
if verbose:
print("%s: thread finished" % name)
def _readerThread(self, d, readerNum):
time.sleep(0.01 * readerNum + 0.05)
name = current_thread().name
def readerThread(self, d, readerNum):
import sys
if sys.version_info[0] < 3 :
name = currentThread().getName()
else :
name = currentThread().name
for loop in range(5):
finished = False
while not finished:
try:
txn = self.env.txn_begin(None, self.txnFlag)
c = d.cursor(txn)
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = c.next()
if verbose: print("%s: found %d records" % (name, count))
c.close()
txn.commit()
finished = True
except (db.DBLockDeadlockError, db.DBLockNotGrantedError) as val:
if verbose:
print("%s: Aborting transaction (%s)" % (name, val))
c.close()
txn.abort()
time.sleep(0.05)
time.sleep(0.05)
finished = False
while not finished:
try:
txn = self.env.txn_begin(None, self.txnFlag)
c = d.cursor(txn)
count = 0
rec = c.first()
while rec:
count += 1
key, data = rec
self.assertEqual(self.makeData(key), data)
rec = next(c)
if verbose: print("%s: found %d records" % (name, count))
c.close()
txn.commit()
finished = True
except (db.DBLockDeadlockError, db.DBLockNotGrantedError) as val:
if verbose:
print("%s: Aborting transaction (%s)" % (name, val[1]))
c.close()
txn.abort()
if verbose:
print("%s: thread finished" % name)
@ -461,7 +465,7 @@ class ThreadedTransactionsBase(BaseThreadedTestCase):
def deadlockThread(self):
self.doLockDetect = True
while self.doLockDetect:
time.sleep(0.5)
time.sleep(0.05)
try:
aborted = self.env.lock_detect(
db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
@ -474,28 +478,28 @@ class ThreadedTransactionsBase(BaseThreadedTestCase):
class BTreeThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 3
readers = 5
records = 2000
writers = 2
readers = 10
records = 1000
class HashThreadedTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 1
readers = 5
records = 2000
writers = 2
readers = 10
records = 1000
class BTreeThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_BTREE
writers = 3
readers = 5
records = 2000
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT
class HashThreadedNoWaitTransactions(ThreadedTransactionsBase):
dbtype = db.DB_HASH
writers = 1
readers = 5
records = 2000
writers = 2
readers = 10
records = 1000
txnFlag = db.DB_TXN_NOWAIT

View File

@ -63,6 +63,12 @@ Extension Modules
- Issue #3643: Added a few more checks to _testcapi to prevent segfaults by
exploitation of poor argument checking.
- bsddb code updated to version 4.7.3pre2. This code is the same than
Python 2.6 one, since the intention is to keep an unified 2.x/3.x codebase.
The Python code is automatically translated using "2to3". Please, do not
update this code in Python 3.0 by hand. Update the 2.6 one and then
do "2to3".
Tools/Demos
-----------

File diff suppressed because it is too large Load Diff

View File

@ -36,7 +36,7 @@
/*
* Handwritten code to wrap version 3.x of the Berkeley DB library,
* written to replace a SWIG-generated file. It has since been updated
* to compile with BerkeleyDB versions 3.2 through 4.2.
* to compile with Berkeley DB versions 3.2 through 4.2.
*
* This module was started by Andrew Kuchling to remove the dependency
* on SWIG in a package by Gregory P. Smith who based his work on a
@ -105,7 +105,7 @@
#error "eek! DBVER can't handle minor versions > 9"
#endif
#define PY_BSDDB_VERSION "4.6.0"
#define PY_BSDDB_VERSION "4.7.3pre2"
/* Python object definitions */
@ -119,17 +119,27 @@ struct behaviourFlags {
};
struct DBObject; /* Forward declaration */
struct DBCursorObject; /* Forward declaration */
struct DBTxnObject; /* Forward declaration */
struct DBSequenceObject; /* Forward declaration */
typedef struct {
PyObject_HEAD
DB_ENV* db_env;
u_int32_t flags; /* saved flags from open() */
int closed;
struct behaviourFlags moduleFlags;
PyObject* event_notifyCallback;
struct DBObject *children_dbs;
struct DBTxnObject *children_txns;
PyObject *private_obj;
PyObject *rep_transport;
PyObject *in_weakreflist; /* List of weak references */
} DBEnvObject;
typedef struct {
typedef struct DBObject {
PyObject_HEAD
DB* db;
DBEnvObject* myenvobj; /* PyObject containing the DB_ENV */
@ -137,27 +147,48 @@ typedef struct {
u_int32_t setflags; /* saved flags from set_flags() */
int haveStat;
struct behaviourFlags moduleFlags;
#if (DBVER >= 33)
struct DBTxnObject *txn;
struct DBCursorObject *children_cursors;
#if (DBVER >=43)
struct DBSequenceObject *children_sequences;
#endif
struct DBObject **sibling_prev_p;
struct DBObject *sibling_next;
struct DBObject **sibling_prev_p_txn;
struct DBObject *sibling_next_txn;
PyObject* associateCallback;
PyObject* btCompareCallback;
int primaryDBType;
#endif
PyObject *private_obj;
PyObject *in_weakreflist; /* List of weak references */
} DBObject;
typedef struct {
typedef struct DBCursorObject {
PyObject_HEAD
DBC* dbc;
struct DBCursorObject **sibling_prev_p;
struct DBCursorObject *sibling_next;
struct DBCursorObject **sibling_prev_p_txn;
struct DBCursorObject *sibling_next_txn;
DBObject* mydb;
struct DBTxnObject *txn;
PyObject *in_weakreflist; /* List of weak references */
} DBCursorObject;
typedef struct {
typedef struct DBTxnObject {
PyObject_HEAD
DB_TXN* txn;
PyObject *env;
DBEnvObject* env;
int flag_prepare;
struct DBTxnObject *parent_txn;
struct DBTxnObject **sibling_prev_p;
struct DBTxnObject *sibling_next;
struct DBTxnObject *children_txns;
struct DBObject *children_dbs;
struct DBSequenceObject *children_sequences;
struct DBCursorObject *children_cursors;
PyObject *in_weakreflist; /* List of weak references */
} DBTxnObject;
@ -170,13 +201,17 @@ typedef struct {
#if (DBVER >= 43)
typedef struct {
typedef struct DBSequenceObject {
PyObject_HEAD
DB_SEQUENCE* sequence;
DBObject* mydb;
struct DBTxnObject *txn;
struct DBSequenceObject **sibling_prev_p;
struct DBSequenceObject *sibling_next;
struct DBSequenceObject **sibling_prev_p_txn;
struct DBSequenceObject *sibling_next_txn;
PyObject *in_weakreflist; /* List of weak references */
} DBSequenceObject;
static PyTypeObject DBSequence_Type;
#endif

View File

@ -673,12 +673,8 @@ class PyBuildExt(build_ext):
# a release. Most open source OSes come with one or more
# versions of BerkeleyDB already installed.
max_db_ver = (4, 5) # XXX(gregory.p.smith): 4.6 "works" but seems to
# have issues on many platforms. I've temporarily
# disabled 4.6 to see what the odd platform
# buildbots say.
max_db_ver = (4, 7) # XXX(matthias.klose): test with 4.7 on some buildds
min_db_ver = (3, 3)
max_db_ver = (4, 7)
min_db_ver = (4, 0)
db_setup_debug = False # verbose debug prints from this script?
# construct a list of paths to look for the header file in on