Skip to content

CLN/BUG/TST: Fix and test multiple places using undefined names. #5045

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 9 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions pandas/computation/align.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import pandas as pd
from pandas import compat
import pandas.core.common as com
import pandas.computation.ops as ops


def _align_core_single_unary_op(term):
Expand Down Expand Up @@ -170,10 +171,10 @@ def _align_core(terms):

return typ, _zip_axes_from_type(typ, axes)


# TODO: Add tests that cover this function!
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@cpcloud this function is never used in any tests, is it necessary?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

probably not ... i'll add it to the unused codepaths issue

def _filter_terms(flat):
# numeric literals
literals = frozenset(filter(lambda x: isinstance(x, Constant), flat))
literals = frozenset(filter(lambda x: isinstance(x, ops.Constant), flat))

# these are strings which are variable names
names = frozenset(flat) - literals
Expand Down
5 changes: 0 additions & 5 deletions pandas/computation/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,7 @@

"""Top level ``eval`` module.
"""

import numbers
import numpy as np

from pandas.core import common as com
from pandas.compat import string_types
from pandas.computation.expr import Expr, _parsers, _ensure_scope
from pandas.computation.engines import _engines

Expand Down
19 changes: 0 additions & 19 deletions pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,15 +57,6 @@ def unique(values):
return _hashtable_algo(f, values.dtype)


# def count(values, uniques=None):
# f = lambda htype, caster: _count_generic(values, htype, caster)

# if uniques is not None:
# raise NotImplementedError
# else:
# return _hashtable_algo(f, values.dtype)


def _hashtable_algo(f, dtype):
"""
f(HashTable, type_caster) -> result
Expand All @@ -78,16 +69,6 @@ def _hashtable_algo(f, dtype):
return f(htable.PyObjectHashTable, com._ensure_object)


def _count_generic(values, table_type, type_caster):
from pandas.core.series import Series

values = type_caster(values)
table = table_type(min(len(values), 1000000))
uniques, labels = table.factorize(values)

return Series(counts, index=uniques)


def _match_generic(values, index, table_type, type_caster):
values = type_caster(values)
index = type_caster(index)
Expand Down
3 changes: 1 addition & 2 deletions pandas/core/groupby.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import sys
import types
from functools import wraps
import numpy as np
Expand Down Expand Up @@ -2123,8 +2124,6 @@ def filter(self, func, dropna=True, *args, **kwargs):
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
from pandas.tools.merge import concat

indexers = []

obj = self._obj_with_exclusions
Expand Down
5 changes: 3 additions & 2 deletions pandas/core/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,10 +501,11 @@ def is_int(v):
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# check that start and stop are valid
if start is not None:
i = self.get_loc(start)
self.get_loc(start)
if stop is not None:
j = self.get_loc(stop)
self.get_loc(stop)
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
Expand Down
10 changes: 4 additions & 6 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,8 +540,6 @@ def _align_frame(self, indexer, df):
raise ValueError('Incompatible indexer with DataFrame')

def _align_panel(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
raise NotImplementedError("cannot set using an indexer with a Panel yet!")

def _getitem_tuple(self, tup):
Expand Down Expand Up @@ -581,11 +579,9 @@ def _multi_take_opportunity(self, tup):
return False

# just too complicated
for indexer, ax in zip(tup,self.obj._data.axes):
for ax in self.obj._data.axes:
if isinstance(ax, MultiIndex):
return False
elif com._is_bool_indexer(indexer):
return False

return True

Expand Down Expand Up @@ -637,6 +633,7 @@ def _getitem_lowerdim(self, tup):
if not ax0.is_lexsorted_for_tuple(tup):
raise e1
try:
# Check for valid axis
loc = ax0.get_loc(tup[0])
except KeyError:
raise e1
Expand Down Expand Up @@ -933,6 +930,7 @@ class _IXIndexer(_NDFrameIndexer):
""" A primarily location based indexer, with integer fallback """

def _has_valid_type(self, key, axis):
# check for valid axis (raises if invalid)
ax = self.obj._get_axis(axis)

if isinstance(key, slice):
Expand All @@ -945,7 +943,7 @@ def _has_valid_type(self, key, axis):
return True

else:

# check for valid key/axis combo (raises if invalid)
self._convert_scalar_indexer(key, axis)

return True
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/internals.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import itertools
import re
from datetime import datetime, timedelta
import copy
from collections import defaultdict

import numpy as np
Expand Down Expand Up @@ -589,9 +588,9 @@ def setitem(self, indexer, value):
values = self._try_coerce_result(values)
values = self._try_cast_result(values, dtype)
return [make_block(transf(values), self.items, self.ref_items, ndim=self.ndim, fastpath=True)]
except (ValueError, TypeError) as detail:
except (ValueError, TypeError):
raise
except (Exception) as detail:
except Exception:
pass

return [ self ]
Expand Down Expand Up @@ -3681,6 +3680,7 @@ def _lcd_dtype(l):
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
# TODO: Use this.
have_sparse = len(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int

Expand Down
9 changes: 3 additions & 6 deletions pandas/core/panel.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,10 @@
"""
# pylint: disable=E1103,W0231,W0212,W0621

from pandas.compat import map, zip, range, lrange, lmap, u, OrderedDict, OrderedDefaultdict
from pandas import compat
import sys
import numpy as np
from pandas.core.common import (PandasError,
_try_sort, _default_index, _infer_dtype_from_scalar,
notnull)
from pandas.core.common import (PandasError, _try_sort, _default_index,
_infer_dtype_from_scalar)
from pandas.core.categorical import Categorical
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
Expand All @@ -20,10 +17,10 @@
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas import compat
from pandas.compat import zip, range, lrange, u, OrderedDict, OrderedDefaultdict
from pandas.util.decorators import deprecate, Appender, Substitution
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.core.nanops as nanops
import pandas.computation.expressions as expressions


Expand Down
1 change: 0 additions & 1 deletion pandas/io/ga.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,6 @@ def _get_match(obj_store, name, id, **kwargs):
id_ok = lambda item: id is not None and item.get('id') == id
key_ok = lambda item: key is not None and item.get(key) == val

match = None
if obj_store.get('items'):
# TODO look up gapi for faster lookup
for item in obj_store.get('items'):
Expand Down
5 changes: 3 additions & 2 deletions pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -720,6 +720,9 @@ def _extract_multi_indexer_columns(self, header, index_names, col_names, passed_
ic = [ ic ]
sic = set(ic)

# TODO: Decide if this is necessary...
orig_header = list(header)

# clean the index_names
index_names = header.pop(-1)
index_names, names, index_col = _clean_index_names(index_names,
Expand Down Expand Up @@ -2033,8 +2036,6 @@ def _stringify_na_values(na_values):
def _get_na_values(col, na_values, na_fvalues):
if isinstance(na_values, dict):
if col in na_values:
values = na_values[col]
fvalues = na_fvalues[col]
return na_values[col], na_fvalues[col]
else:
return _NA_VALUES, set()
Expand Down
4 changes: 2 additions & 2 deletions pandas/io/pytables.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@
import pandas.core.common as com
from pandas.tools.merge import concat
from pandas import compat
from pandas.compat import u_safe as u, PY3, range, lrange
from pandas.compat import u_safe as u, PY3, range, lrange, lmap
from pandas.io.common import PerformanceWarning
from pandas.core.config import get_option
from pandas.computation.pytables import Expr, maybe_expression
from pandas.computation.pytables import Expr, maybe_expression, TermValue

import pandas.lib as lib
import pandas.algos as algos
Expand Down
5 changes: 5 additions & 0 deletions pandas/stats/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from pandas.core.api import Series, DataFrame, isnull, notnull
from pandas.core.series import remove_na
from pandas.compat import zip
import pandas.core.common as com


def zscore(series):
Expand Down Expand Up @@ -157,6 +158,7 @@ def bucketcat(series, cats):
cats = np.asarray(cats)

unique_labels = np.unique(cats)
# TODO: Add test case that reaches this code.
unique_labels = unique_labels[com.notnull(unique_labels)]

# group by
Expand Down Expand Up @@ -217,6 +219,7 @@ def _bucketpanel_by(series, xby, yby, xbins, ybins):

labels = _uniquify(xlabels, ylabels, xbins, ybins)

# TODO: Add a test that reaches this part of the code.
mask = com.isnull(labels)
labels[mask] = -1

Expand All @@ -232,6 +235,7 @@ def relabel(key):
xlab = xlabels[pos]
ylab = ylabels[pos]

# TODO: Add a test that reaches this part of the code.
return '%sx%s' % (int(xlab) if com.notnull(xlab) else 'NULL',
int(ylab) if com.notnull(ylab) else 'NULL')

Expand All @@ -251,6 +255,7 @@ def _bucketpanel_cat(series, xcat, ycat):
sorted_ylabels = ylabels.take(sorter)

unique_labels = np.unique(labels)
# TODO: Add a test that reaches this part of the code.
unique_labels = unique_labels[com.notnull(unique_labels)]

locs = sorted_labels.searchsorted(unique_labels)
Expand Down
1 change: 0 additions & 1 deletion pandas/tools/rplot.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,6 @@ def work(self, fig=None, ax=None):
ax = fig.gca()
x = self.data[self.aes['x']]
y = self.data[self.aes['y']]
rvs = np.array([x, y])
x_min = x.min()
x_max = x.max()
y_min = y.min()
Expand Down
4 changes: 4 additions & 0 deletions pandas/tseries/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,12 +296,14 @@ def __call__(self):
try:
start = dmin - delta
except ValueError:
# TODO: Never used.
start = _from_ordinal(1.0)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@changhiskhan it looks like neither start or stop is actually used for anything in this function (according to pyflakes and searching). Can these calculations be removed? (Instead, st and ed, where you'd think they'd be used, just go back to using dmin and dmax. Otherwise, could remove these try/except statements.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(I'm asking you because it looks like you added this part in)


try:
stop = dmax + delta
except ValueError:
# The magic number!
# TODO: Never used.
stop = _from_ordinal(3652059.9999999)

nmax, nmin = dates.date2num((dmax, dmin))
Expand Down Expand Up @@ -357,12 +359,14 @@ def autoscale(self):
try:
start = dmin - delta
except ValueError:
# TODO: Never used.
start = _from_ordinal(1.0)

try:
stop = dmax + delta
except ValueError:
# The magic number!
# TODO: Never used.
stop = _from_ordinal(3652059.9999999)

dmin, dmax = self.datalim_to_dt()
Expand Down
1 change: 0 additions & 1 deletion pandas/tseries/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ def tsplot(series, plotf, **kwargs):
args.append(style)

lines = plotf(ax, *args, **kwargs)
label = kwargs.get('label', None)

# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq)
Expand Down