Skip to content

Commit

Permalink
Merge pull request #177 from aykuznetsova/add-python2.6-compatibility
Browse files Browse the repository at this point in the history
Add python2.6 compatibility
  • Loading branch information
shoyer committed Jul 1, 2014
2 parents 7e0e7b1 + 16bfa99 commit 837ff50
Show file tree
Hide file tree
Showing 18 changed files with 68 additions and 57 deletions.
5 changes: 5 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Based on https://github.com/Jorge-C/ordination/blob/master/.travis.yml
language: python
python:
- "2.6"
- "2.7"
- "3.3"
notifications:
Expand Down Expand Up @@ -28,11 +29,15 @@ matrix:
env: UPDATE_PYENV='pip install pydap'
- python: "2.7"
env: UPDATE_PYENV=''
- python: "2.6"
env: UPDATE_PYENV=''

# Install packages
install:
- conda create --yes -n test_env python=$TRAVIS_PYTHON_VERSION pip nose mock numpy pandas scipy netCDF4
- source activate test_env
# install unittest2 ONLY if running 2.6
- if [ ${TRAVIS_PYTHON_VERSION:0:3} == "2.6" ]; then pip install unittest2; fi
- echo $UPDATE_PYENV; $UPDATE_PYENV
- python setup.py install
# Run test
Expand Down
3 changes: 3 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
Expand All @@ -37,6 +38,8 @@
INSTALL_REQUIRES = ['numpy >= 1.7', 'pandas >= 0.13.1']
TESTS_REQUIRE = ['nose >= 1.0']

if sys.version_info[:2] < (2, 7):
TESTS_REQUIRE += ["unittest2 == 0.5.1"]

DESCRIPTION = "Extended arrays for working with scientific datasets in Python"
LONG_DESCRIPTION = """
Expand Down
7 changes: 5 additions & 2 deletions test/__init__.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
import unittest

import numpy as np
from numpy.testing import assert_array_equal

from xray import utils, DataArray
from xray.variable import as_variable
from xray.pycompat import PY3

try:
import unittest2 as unittest
except ImportError:
import unittest

try:
import scipy
has_scipy = True
Expand Down
4 changes: 2 additions & 2 deletions test/test_backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,8 +239,8 @@ def test_open_encodings(self):
actual = open_dataset(tmp_file)

self.assertVariableEqual(actual['time'], expected['time'])
actual_encoding = {k: v for k, v in iteritems(actual['time'].encoding)
if k in expected['time'].encoding}
actual_encoding = dict((k, v) for k, v in iteritems(actual['time'].encoding)
if k in expected['time'].encoding)
self.assertDictEqual(actual_encoding, expected['time'].encoding)

def test_open_group(self):
Expand Down
3 changes: 1 addition & 2 deletions test/test_data_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@
import pandas as pd
from copy import deepcopy
from textwrap import dedent
from collections import OrderedDict

from xray import Dataset, DataArray, Index, Variable, align
from xray.pycompat import iteritems
from xray.pycompat import iteritems, OrderedDict
from . import TestCase, ReturnItem, source_ndarray


Expand Down
23 changes: 11 additions & 12 deletions test/test_dataset.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from collections import OrderedDict
from copy import copy, deepcopy
from textwrap import dedent
try:
Expand All @@ -11,7 +10,7 @@

from xray import (Dataset, DataArray, Index, Variable,
backends, utils, align, indexing)
from xray.pycompat import iteritems
from xray.pycompat import iteritems, OrderedDict

from . import TestCase

Expand Down Expand Up @@ -164,7 +163,7 @@ def test_indexes_properties(self):

self.assertEquals(2, len(data.indexes))

self.assertEquals({'x', 'y'}, set(data.indexes))
self.assertEquals(set(['x', 'y']), set(data.indexes))

self.assertVariableIdentical(data.indexes['x'], data['x'].variable)
self.assertVariableIdentical(data.indexes['y'], data['y'].variable)
Expand Down Expand Up @@ -367,11 +366,11 @@ def test_drop_vars(self):

self.assertEqual(data, data.drop_vars())

expected = Dataset({k: data[k] for k in data if k != 'time'})
expected = Dataset(dict((k, data[k]) for k in data if k != 'time'))
actual = data.drop_vars('time')
self.assertEqual(expected, actual)

expected = Dataset({k: data[k] for k in ['dim2', 'dim3', 'time']})
expected = Dataset(dict((k, data[k]) for k in ['dim2', 'dim3', 'time']))
actual = data.drop_vars('dim1')
self.assertEqual(expected, actual)

Expand Down Expand Up @@ -510,20 +509,20 @@ def test_setitem(self):

def test_delitem(self):
data = create_test_data()
all_items = {'time', 'dim1', 'dim2', 'dim3', 'var1', 'var2', 'var3'}
all_items = set(['time', 'dim1', 'dim2', 'dim3', 'var1', 'var2', 'var3'])
self.assertItemsEqual(data, all_items)
del data['var1']
self.assertItemsEqual(data, all_items - {'var1'})
self.assertItemsEqual(data, all_items - set(['var1']))
del data['dim1']
self.assertItemsEqual(data, {'time', 'dim2', 'dim3'})
self.assertItemsEqual(data, set(['time', 'dim2', 'dim3']))

def test_squeeze(self):
data = Dataset({'foo': (['x', 'y', 'z'], [[[1], [2]]])})
for args in [[], [['x']], [['x', 'z']]]:
def get_args(v):
return [set(args[0]) & set(v.dimensions)] if args else []
expected = Dataset({k: v.squeeze(*get_args(v))
for k, v in iteritems(data.variables)})
expected = Dataset(dict((k, v.squeeze(*get_args(v)))
for k, v in iteritems(data.variables)))
self.assertDatasetIdentical(expected, data.squeeze(*args))
# invalid squeeze
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
Expand Down Expand Up @@ -595,8 +594,8 @@ def test_concat(self):
def rectify_dim_order(dataset):
# return a new dataset with all variable dimensions tranposed into
# the order in which they are found in `data`
return Dataset({k: v.transpose(*data[k].dimensions)
for k, v in iteritems(dataset.variables)},
return Dataset(dict((k, v.transpose(*data[k].dimensions))
for k, v in iteritems(dataset.variables)),
dataset.attrs)

for dim in ['dim1', 'dim2', 'dim3']:
Expand Down
2 changes: 1 addition & 1 deletion test/test_utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from collections import OrderedDict
import numpy as np
import pandas as pd

from xray import utils
from xray.pycompat import OrderedDict
from . import TestCase


Expand Down
4 changes: 2 additions & 2 deletions test/test_variable.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from collections import namedtuple, OrderedDict
from collections import namedtuple
from copy import copy, deepcopy
from datetime import datetime
from textwrap import dedent
Expand All @@ -9,7 +9,7 @@
from xray import Variable, Dataset, DataArray, indexing
from xray.variable import (Index, as_variable, NumpyArrayAdapter,
PandasIndexAdapter, _as_compatible_data)
from xray.pycompat import PY3
from xray.pycompat import PY3, OrderedDict

from . import TestCase, source_ndarray

Expand Down
2 changes: 1 addition & 1 deletion xray/backends/memory.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from collections import OrderedDict
from xray.pycompat import OrderedDict

from .common import AbstractWritableDataStore

Expand Down
3 changes: 1 addition & 2 deletions xray/backends/netCDF4_.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from collections import OrderedDict
import warnings

import numpy as np
Expand All @@ -9,7 +8,7 @@
from xray.conventions import encode_cf_variable
from xray.utils import FrozenOrderedDict, NDArrayMixin
from xray import indexing
from xray.pycompat import iteritems, basestring, bytes_type
from xray.pycompat import iteritems, basestring, bytes_type, OrderedDict


class NetCDF4ArrayWrapper(NDArrayMixin):
Expand Down
3 changes: 1 addition & 2 deletions xray/backends/scipy_.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from collections import OrderedDict
try: # Python 2
from cStringIO import StringIO as BytesIO
except ImportError: # Python 3
Expand All @@ -9,7 +8,7 @@
import xray
from xray.backends.common import AbstractWritableDataStore
from xray.utils import Frozen
from xray.pycompat import iteritems, basestring, unicode_type
from xray.pycompat import iteritems, basestring, unicode_type, OrderedDict

from .. import conventions
from .netcdf3 import is_valid_nc3_name, coerce_nc3_dtype, encode_nc3_variable
Expand Down
6 changes: 3 additions & 3 deletions xray/conventions.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
import numpy as np
import pandas as pd
import warnings
from collections import defaultdict, OrderedDict
from collections import defaultdict
from datetime import datetime

from . import indexing
from . import utils
from .pycompat import iteritems, bytes_type, unicode_type
from .pycompat import iteritems, bytes_type, unicode_type, OrderedDict
import xray

# standard calendars recognized by netcdftime
_STANDARD_CALENDARS = {'standard', 'gregorian', 'proleptic_gregorian'}
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])


def mask_and_scale(array, fill_value=None, scale_factor=None, add_offset=None,
Expand Down
16 changes: 9 additions & 7 deletions xray/data_array.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import functools
import operator
import warnings
from collections import defaultdict, OrderedDict
from collections import defaultdict

import numpy as np
import pandas as pd
Expand All @@ -14,7 +14,7 @@
from . import variable
from .common import AbstractArray, AbstractIndexes
from .utils import multi_index_from_product
from .pycompat import iteritems, basestring
from .pycompat import iteritems, basestring, OrderedDict


def _is_dict_like(value):
Expand Down Expand Up @@ -639,8 +639,8 @@ def reduce(self, func, dimension=None, axis=None, keep_attrs=False,
# For now, take an aggressive strategy of removing all variables
# associated with any dropped dimensions
# TODO: save some summary (mean? bounds?) of dropped variables
drop |= {k for k, v in iteritems(self.dataset.variables)
if any(dim in drop for dim in v.dimensions)}
drop |= set(k for k, v in iteritems(self.dataset.variables)
if any(dim in drop for dim in v.dimensions))
ds = self.dataset.drop_vars(*drop)
ds[self.name] = var

Expand Down Expand Up @@ -702,7 +702,9 @@ def concat(cls, arrays, dimension='concat_dimension', indexers=None,
datasets.append(arr.dataset)
if concat_over is None:
concat_over = set()
concat_over = set(concat_over) | {name}
elif isinstance(concat_over, basestring):
concat_over = set([concat_over])
concat_over = set(concat_over) | set([name])
ds = xray.Dataset.concat(datasets, dimension, indexers,
concat_over=concat_over)
return ds[name]
Expand Down Expand Up @@ -897,7 +899,7 @@ def align(*objects, **kwargs):

# Exclude dimensions with all equal indices to avoid unnecessary reindexing
# work.
joined_indexes = {k: join_indices(v) for k, v in iteritems(all_indexes)
if any(not v[0].equals(idx) for idx in v[1:])}
joined_indexes = dict((k, join_indices(v)) for k, v in iteritems(all_indexes)
if any(not v[0].equals(idx) for idx in v[1:]))

return tuple(obj.reindex(copy=copy, **joined_indexes) for obj in objects)
25 changes: 12 additions & 13 deletions xray/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from cStringIO import StringIO as BytesIO
except ImportError: # Python 3
from io import BytesIO
from collections import OrderedDict, Mapping
from collections import Mapping

from . import backends
from . import conventions
Expand All @@ -18,7 +18,7 @@
from . import ops
from .utils import (FrozenOrderedDict, Frozen, SortedKeysDict, ChainMap,
multi_index_from_product)
from .pycompat import iteritems, itervalues, basestring
from .pycompat import iteritems, itervalues, basestring, OrderedDict


def open_dataset(nc, decode_cf=True, mask_and_scale=True, decode_times=True,
Expand Down Expand Up @@ -623,13 +623,12 @@ def isel(self, **indexers):
raise ValueError("dimensions %r do not exist" % invalid)

# all indexers should be int, slice or np.ndarrays
indexers = {k: np.asarray(v) if not isinstance(v, (int, np.integer, slice)) else v
for k, v in iteritems(indexers)}
indexers = dict((k, np.asarray(v) if not isinstance(v, (int, np.integer, slice)) else v)
for k, v in iteritems(indexers))

variables = OrderedDict()
for name, var in iteritems(self.variables):
var_indexers = {k: v for k, v in iteritems(indexers)
if k in var.dimensions}
var_indexers = dict((k, v) for k, v in iteritems(indexers) if k in var.dimensions)
variables[name] = var.isel(**var_indexers)
return type(self)(variables, self.attrs)

Expand Down Expand Up @@ -923,11 +922,11 @@ def merge(self, other, inplace=False, overwrite_vars=set(),
potential_conflicts = self.variables
else:
if isinstance(overwrite_vars, basestring):
overwrite_vars = {overwrite_vars}
overwrite_vars = set([overwrite_vars])
else:
overwrite_vars = set(overwrite_vars)
potential_conflicts = {k: v for k, v in iteritems(self.variables)
if k not in overwrite_vars}
potential_conflicts = dict((k, v) for k, v in iteritems(self.variables)
if k not in overwrite_vars)

# update variables
new_variables = _expand_variables(other_variables, potential_conflicts,
Expand Down Expand Up @@ -975,8 +974,8 @@ def drop_vars(self, *names):
raise ValueError('One or more of the specified variable '
'names does not exist in this dataset')
drop = set(names)
drop |= {k for k, v in iteritems(self.variables)
if any(name in v.dimensions for name in names)}
drop |= set(k for k, v in iteritems(self.variables)
if any(name in v.dimensions for name in names))
variables = OrderedDict((k, v) for k, v in iteritems(self.variables)
if k not in drop)
return type(self)(variables, self.attrs)
Expand Down Expand Up @@ -1150,7 +1149,7 @@ def concat(cls, datasets, dimension='concat_dimension', indexers=None,
if concat_over is None:
concat_over = set()
elif isinstance(concat_over, basestring):
concat_over = {concat_over}
concat_over = set([concat_over])
else:
concat_over = set(concat_over)

Expand Down Expand Up @@ -1180,7 +1179,7 @@ def differs(vname, v):
% (concat_over, datasets[0]))

# automatically concatenate over variables along the dimension
auto_concat_dims = {dim_name}
auto_concat_dims = set([dim_name])
if hasattr(dimension, 'dimensions'):
auto_concat_dims |= set(dimension.dimensions)
for k, v in iteritems(datasets[0]):
Expand Down
5 changes: 2 additions & 3 deletions xray/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,8 @@ def remap_label_indexers(data_obj, indexers):
"""Given an xray data object and label based indexers, return a mapping
of equivalent location based indexers.
"""
return {dim: convert_label_indexer(data_obj.indexes[dim], label, dim)
for dim, label in iteritems(indexers)}

return dict((dim, convert_label_indexer(data_obj.indexes[dim], label, dim))
for dim, label in iteritems(indexers))

def _expand_slice(slice_, size):
return np.arange(*slice_.indices(size))
Expand Down
5 changes: 5 additions & 0 deletions xray/pycompat.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ def iteritems(d):
def itervalues(d):
return iter(d.values())
xrange = range
from collections import OrderedDict
else:
# Python 2
basestring = basestring
Expand All @@ -21,3 +22,7 @@ def iteritems(d):
def itervalues(d):
return d.itervalues()
xrange = xrange
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
Loading

0 comments on commit 837ff50

Please sign in to comment.