diff --git a/dashi/datasets/hdf_datasets.py b/dashi/datasets/hdf_datasets.py index 2859252..cdd14a6 100644 --- a/dashi/datasets/hdf_datasets.py +++ b/dashi/datasets/hdf_datasets.py @@ -41,12 +41,12 @@ def hdf_toc(h5file): """ toc = [] - for node in h5file.walkNodes(classname="Table"): + for node in h5file.walk_nodes(classname="Table"): for varname in node.description._v_names: toc.append( "%s:%s" % (node._v_pathname, varname) ) for arraytype in ["Array", "EArray"]: - for node in h5file.walkNodes(classname=arraytype): + for node in h5file.walk_nodes(classname=arraytype): toc.append( node._v_pathname ) return sorted(toc) @@ -66,7 +66,7 @@ def store_earray1d(h5file, path, array, complevel=6, **kwargs): if complevel > 0: filters = tables.Filters(complevel=complevel, complib="zlib") - earr = h5file.createEArray(parent, arrname, + earr = h5file.create_earray(parent, arrname, tables.Atom.from_dtype(array.dtype), (0,), filters=filters, @@ -101,12 +101,12 @@ def _ds_toc(self): if not self.isopen: return toc - for node in self.walkNodes(classname="Table"): + for node in self.walk_nodes(classname="Table"): toc.append( node._v_pathname ) for varname in node.description._v_names: toc.append( "%s:%s" % (node._v_pathname, varname) ) - for node in self.walkNodes(classname="Array"): + for node in self.walk_nodes(classname="Array"): toc.append( node._v_pathname ) self._ds_toc_cache = sorted(toc) @@ -121,18 +121,18 @@ def _ds_write_variable(self, path, array): arrname = os.path.basename(path) self._ds_toc_cache = None if array.dtype.names is None: - earr = self.createEArray(parent, arrname, + earr = self.create_earray(parent, arrname, tables.Atom.from_dtype(array.dtype), (0,), filters=self.filters, createparents=True) earr.append(array) earr.flush() else: - tab = self.createTable(parent, arrname, array, createparents=True, filters=self.filters) + tab = self.create_table(parent, arrname, array, createparents=True, filters=self.filters) tab.flush() def _ds_remove_variable(self, path): try: - self.removeNode(path) + self.remove_node(path) self.flush() except tables.NoSuchNodeError as exc: raise ValueError("removing path %s raised a NoSuchNodeError" % path) diff --git a/dashi/fitting.py b/dashi/fitting.py index 1b1bb8e..54c6364 100644 --- a/dashi/fitting.py +++ b/dashi/fitting.py @@ -26,7 +26,7 @@ def __init__(self, callable, integral_callable=None): :params integral_callable: a function P(X < x) """ self.dfunc = callable - argspec = inspect.getargspec(callable) + argspec = inspect.getfullargspec(callable) self.func = integral_callable # initialize params with 1 which is a better starting value than zero for factors diff --git a/dashi/objbundleutils.py b/dashi/objbundleutils.py index 8dbd963..aaeaa84 100644 --- a/dashi/objbundleutils.py +++ b/dashi/objbundleutils.py @@ -68,7 +68,7 @@ def read_hdf(h5file, selection): readorder.insert(0, (varname, [] )) #print "insert", varname elif callable(cfg): - args = inspect.getargspec(cfg).args + args = inspect.getfullargspec(cfg).args if args == ["file"]: # pass h5file, calculation doesn't depend on anything readorder.insert(0, (varname, []) ) @@ -99,7 +99,7 @@ def read_hdf(h5file, selection): else: arrays[varname] = h5file.get_node(cfg).read() elif callable(cfg): - args = inspect.getargspec(cfg).args + args = inspect.getfullargspec(cfg).args if args == ["file"]: arrays[varname] = cfg(h5file) else: diff --git a/dashi/odict.py b/dashi/odict.py index 5d9874f..24762bd 100644 --- a/dashi/odict.py +++ b/dashi/odict.py @@ -1,4 +1,2 @@ - # Python 3 dictionaries remember insertion order OrderedDict = dict - diff --git a/dashi/tests/datasets_test.py b/dashi/tests/datasets_test.py index ea7c831..21fda7a 100644 --- a/dashi/tests/datasets_test.py +++ b/dashi/tests/datasets_test.py @@ -24,8 +24,8 @@ arr["y"] = n.arange(100, 0, -1, dtype=int) fname = "%s_%d.h5" % (name,j) - f = tables.openFile(fname, "a") - f.createTable("/", "test", arr) + f = tables.open_file(fname, "a") + f.create_table("/", "test", arr) f.close() datafiles[name].append(fname)