Skip to content

COMPAT: fix numpy 1.9-dev deprecation warnings in test suite #6960

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Apr 27, 2014
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions ci/script.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,13 @@ fi
"$TRAVIS_BUILD_DIR"/ci/build_docs.sh 2>&1 > /tmp/doc.log &
# doc build log will be shown after tests

# export the testing mode
if [ -n "$NUMPY_BUILD" ]; then

export PANDAS_TESTING_MODE="numpy_deprecate"

fi

echo nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml
nosetests --exe -w /tmp -A "$NOSE_ARGS" pandas --with-xunit --xunit-file=/tmp/nosetests.xml

Expand Down
3 changes: 3 additions & 0 deletions doc/source/release.rst
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,9 @@ Prior Version Deprecations/Changes
- Remove ``time_rule`` from several rolling-moment statistical functions, such
as :func:`rolling_sum` (:issue:`1042`)

- Removed neg (-) boolean operations on numpy arrays in favor of inv (~), as this is going to
be deprecated in numpy 1.9 (:issue:`6960`)

Experimental Features
~~~~~~~~~~~~~~~~~~~~~

Expand Down
2 changes: 2 additions & 0 deletions doc/source/v0.14.0.txt
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,8 @@ There are prior version deprecations that are taking effect as of 0.14.0.
( `commit 3136390 <https://github.com/pydata/pandas/commit/3136390>`__ )
- Remove ``time_rule`` from several rolling-moment statistical functions, such
as :func:`rolling_sum` (:issue:`1042`)
- Removed neg (-) boolean operations on numpy arrays in favor of inv (~), as this is going to
be deprecated in numpy 1.9 (:issue:`6960`)

.. _whatsnew_0140.deprecations:

Expand Down
2 changes: 1 addition & 1 deletion pandas/algos.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@ def rank_1d_generic(object in_arr, bint retry=1, ties_method='average',
if not retry:
raise

valid_locs = (-mask).nonzero()[0]
valid_locs = (~mask).nonzero()[0]
ranks.put(valid_locs, rank_1d_generic(values.take(valid_locs), 0,
ties_method=ties_method,
ascending=ascending))
Expand Down
4 changes: 1 addition & 3 deletions pandas/computation/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,7 @@ class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
if PY3:
return supr_new(klass)
return supr_new(klass, name, env, side=side, encoding=encoding)
return supr_new(klass)

def __init__(self, name, env, side=None, encoding=None):
self._name = name
Expand Down
4 changes: 1 addition & 3 deletions pandas/computation/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,7 @@ class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
if PY3:
return supr_new(klass)
return supr_new(klass, name, env, side=side, encoding=encoding)
return supr_new(klass)

def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def quantile(x, q, interpolation_method='fraction'):
x = np.asarray(x)
mask = com.isnull(x)

x = x[-mask]
x = x[~mask]

values = np.sort(x)

Expand All @@ -339,7 +339,7 @@ def _get_score(at):

idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[idx]
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
Expand Down
18 changes: 14 additions & 4 deletions pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ def _isnull_ndarraylike_old(obj):
# this is the NaT pattern
result = values.view('i8') == tslib.iNaT
else:
result = -np.isfinite(values)
result = ~np.isfinite(values)

# box
if isinstance(obj, ABCSeries):
Expand Down Expand Up @@ -280,12 +280,22 @@ def notnull(obj):
res = isnull(obj)
if np.isscalar(res):
return not res
return -res
return ~res

def _is_null_datelike_scalar(other):
""" test whether the object is a null datelike, e.g. Nat
but guard against passing a non-scalar """
return (np.isscalar(other) and (isnull(other) or other == tslib.iNaT)) or other is pd.NaT or other is None
if other is pd.NaT or other is None:
return True
elif np.isscalar(other):

# a timedelta
if hasattr(other,'dtype'):
return other.view('i8') == tslib.iNaT
elif is_integer(other) and other == tslib.iNaT:
return True
return isnull(other)
return False

def array_equivalent(left, right):
"""
Expand Down Expand Up @@ -363,7 +373,7 @@ def mask_missing(arr, values_to_mask):
values_to_mask = np.array(values_to_mask, dtype=object)

na_mask = isnull(values_to_mask)
nonna = values_to_mask[-na_mask]
nonna = values_to_mask[~na_mask]

mask = None
for x in nonna:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -1861,7 +1861,7 @@ def _get_format_timedelta64(values):
def impl(x):
if x is None or lib.checknull(x):
return 'NaT'
elif format_short and x == 0:
elif format_short and com.is_integer(x) and x.view('int64') == 0:
return "0 days" if even_days else "00:00:00"
else:
return lib.repr_timedelta64(x, format=format)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -3041,7 +3041,7 @@ def update(self, other, join='left', overwrite=True, filter_func=None,
this = self[col].values
that = other[col].values
if filter_func is not None:
mask = -filter_func(this) | isnull(that)
mask = ~filter_func(this) | isnull(that)
else:
if raise_conflict:
mask_this = notnull(that)
Expand Down
10 changes: 7 additions & 3 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -606,7 +606,11 @@ def _indexed_same(self, other):
for a in self._AXIS_ORDERS])

def __neg__(self):
arr = operator.neg(_values_from_object(self))
values = _values_from_object(self)
if values.dtype == np.bool_:
arr = operator.inv(values)
else:
arr = operator.neg(values)
return self._wrap_array(arr, self.axes, copy=False)

def __invert__(self):
Expand Down Expand Up @@ -1459,10 +1463,10 @@ def drop(self, labels, axis=0, level=None, inplace=False, **kwargs):
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = -lib.ismember(axis.get_level_values(level),
indexer = ~lib.ismember(axis.get_level_values(level),
set(labels))
else:
indexer = -axis.isin(labels)
indexer = ~axis.isin(labels)

slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -1698,7 +1698,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,

labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[-mask] = -1
labels[~mask] = -1

if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
Expand Down
58 changes: 29 additions & 29 deletions pandas/core/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -480,10 +480,10 @@ def to_int():
if is_integer(key):
return key
elif is_float(key):
if not self.is_floating():
warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
type(self).__name__),FutureWarning)
return to_int()
key = to_int()
warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
type(self).__name__),FutureWarning)
return key
return self._convert_indexer_error(key, 'label')

if is_float(key):
Expand All @@ -498,17 +498,9 @@ def _validate_slicer(self, key, f):
""" validate and raise if needed on a slice indexers according to the
passed in function """

if not f(key.start):
self._convert_indexer_error(key.start, 'slice start value')
if not f(key.stop):
self._convert_indexer_error(key.stop, 'slice stop value')
if not f(key.step):
self._convert_indexer_error(key.step, 'slice step value')

def _convert_slice_indexer_iloc(self, key):
""" convert a slice indexer for iloc only """
self._validate_slicer(key, lambda v: v is None or is_integer(v))
return key
for c in ['start','stop','step']:
if not f(getattr(key,c)):
self._convert_indexer_error(key.start, 'slice {0} value'.format(c))

def _convert_slice_indexer_getitem(self, key, is_index_slice=False):
""" called from the getitem slicers, determine how to treat the key
Expand All @@ -520,6 +512,25 @@ def _convert_slice_indexer_getitem(self, key, is_index_slice=False):
def _convert_slice_indexer(self, key, typ=None):
""" convert a slice indexer. disallow floats in the start/stop/step """

# validate iloc
if typ == 'iloc':

# need to coerce to_int if needed
def f(c):
v = getattr(key,c)
if v is None or is_integer(v):
return v

# warn if its a convertible float
if v == int(v):
warnings.warn("slice indexers when using iloc should be integers "
"and not floating point",FutureWarning)
return int(v)

self._convert_indexer_error(v, 'slice {0} value'.format(c))

return slice(*[ f(c) for c in ['start','stop','step']])

# validate slicers
def validate(v):
if v is None or is_integer(v):
Expand All @@ -530,7 +541,6 @@ def validate(v):
return False

return True

self._validate_slicer(key, validate)

# figure out if this is a positional indexer
Expand All @@ -543,9 +553,7 @@ def is_int(v):
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()

if typ == 'iloc':
return self._convert_slice_indexer_iloc(key)
elif typ == 'getitem':
if typ == 'getitem':
return self._convert_slice_indexer_getitem(
key, is_index_slice=is_index_slice)

Expand Down Expand Up @@ -1980,7 +1988,7 @@ def _convert_slice_indexer(self, key, typ=None):
""" convert a slice indexer, by definition these are labels
unless we are iloc """
if typ == 'iloc':
return self._convert_slice_indexer_iloc(key)
return super(Float64Index, self)._convert_slice_indexer(key, typ=typ)

# allow floats here
self._validate_slicer(
Expand Down Expand Up @@ -2386,14 +2394,6 @@ def __unicode__(self):
def __len__(self):
return len(self.labels[0])

def _convert_slice_indexer(self, key, typ=None):
""" convert a slice indexer. disallow floats in the start/stop/step """

if typ == 'iloc':
return self._convert_slice_indexer_iloc(key)

return super(MultiIndex, self)._convert_slice_indexer(key, typ=typ)

def _get_names(self):
return FrozenList(level.name for level in self.levels)

Expand Down Expand Up @@ -2997,7 +2997,7 @@ def _drop_from_level(self, labels, level):
index = self.levels[i]
values = index.get_indexer(labels)

mask = -lib.ismember(self.labels[i], set(values))
mask = ~lib.ismember(self.labels[i], set(values))

return self[mask]

Expand Down
13 changes: 11 additions & 2 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import pandas.core.common as com
from pandas.core.common import (_is_bool_indexer, is_integer_dtype,
_asarray_tuplesafe, is_list_like, isnull,
ABCSeries, ABCDataFrame, ABCPanel)
ABCSeries, ABCDataFrame, ABCPanel, is_float)
import pandas.lib as lib

import numpy as np
Expand Down Expand Up @@ -1319,6 +1319,7 @@ def _get_slice_axis(self, slice_obj, axis=0):
if not _need_slice(slice_obj):
return obj

slice_obj = self._convert_slice_indexer(slice_obj, axis)
if isinstance(slice_obj, slice):
return self._slice(slice_obj, axis=axis, typ='iloc')
else:
Expand Down Expand Up @@ -1363,7 +1364,15 @@ def _getitem_axis(self, key, axis=0, validate_iterable=False):

def _convert_to_indexer(self, obj, axis=0, is_setter=False):
""" much simpler as we only have to deal with our valid types """
if self._has_valid_type(obj, axis):

# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)

elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)

elif self._has_valid_type(obj, axis):
return obj

raise ValueError("Can only index by location with a [%s]" %
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/internals.py
Original file line number Diff line number Diff line change
Expand Up @@ -1049,7 +1049,7 @@ def to_native_types(self, slicer=None, na_rep='', float_format=None,
mask = isnull(values)
values[mask] = na_rep
if float_format:
imask = (-mask).ravel()
imask = (~mask).ravel()
values.flat[imask] = np.array(
[float_format % val for val in values.ravel()[imask]])
return values.tolist()
Expand Down Expand Up @@ -1181,7 +1181,7 @@ def to_native_types(self, slicer=None, na_rep=None, **kwargs):
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (-mask).ravel()
imask = (~mask).ravel()
rvalues.flat[imask] = np.array([lib.repr_timedelta64(val)
for val in values.ravel()[imask]],
dtype=object)
Expand Down Expand Up @@ -1531,7 +1531,7 @@ def to_native_types(self, slicer=None, na_rep=None, date_format=None,
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (-mask).ravel()
imask = (~mask).ravel()

if date_format is None:
date_formatter = lambda x: Timestamp(x)._repr_base
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/nanops.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,9 +190,9 @@ def _isfinite(values):
if issubclass(values.dtype.type, (np.timedelta64, np.datetime64)):
return isnull(values)
elif isinstance(values.dtype, object):
return -np.isfinite(values.astype('float64'))
return ~np.isfinite(values.astype('float64'))

return -np.isfinite(values)
return ~np.isfinite(values)


def _na_ok_dtype(dtype):
Expand Down
Loading