diff --git a/pandas/core/common.py b/pandas/core/common.py index a31c92caf4343..e86db79b90350 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -580,7 +580,7 @@ def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info = (row_mask, col_mask), (row_needs, col_needs) if row_needs or col_needs: if out is not None and out.dtype != dtype: - raise Exception('Incompatible type for fill_value') + raise TypeError('Incompatible type for fill_value') else: # if not, then depromote, set fill_value to dummy # (it won't be used but we don't want the cython code @@ -814,7 +814,7 @@ def changeit(): # if we are trying to do something unsafe # like put a bigger dtype in a smaller one, use the smaller one if change.dtype.itemsize < r.dtype.itemsize: - raise Exception("cannot change dtype of input to smaller size") + raise TypeError("cannot change dtype of input to smaller size") change.dtype = r.dtype change[:] = r @@ -1259,7 +1259,7 @@ def ensure_float(arr): def _mut_exclusive(arg1, arg2): if arg1 is not None and arg2 is not None: - raise Exception('mutually exclusive arguments') + raise TypeError('mutually exclusive arguments') elif arg1 is not None: return arg1 else: diff --git a/pandas/core/format.py b/pandas/core/format.py index 40d80e91f0264..f760e6226b454 100644 --- a/pandas/core/format.py +++ b/pandas/core/format.py @@ -825,7 +825,7 @@ def __init__(self, obj, path_or_buf, sep=",", na_rep='', float_format=None, # validate mi options if self.has_mi_columns: if cols is not None: - raise Exception("cannot specify cols with a multi_index on the columns") + raise TypeError("cannot specify cols with a multi_index on the columns") if cols is not None: if isinstance(cols,Index): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f0145364363ac..158543021c4e7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -933,7 +933,7 @@ def dot(self, other): lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: - raise Exception('Dot product shape mismatch, %s vs %s' % + raise ValueError('Dot product shape mismatch, %s vs %s' % (lvals.shape, rvals.shape)) if isinstance(other, DataFrame): @@ -2844,7 +2844,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False, if verify_integrity and not index.is_unique: duplicates = index.get_duplicates() - raise Exception('Index has duplicate keys: %s' % duplicates) + raise ValueError('Index has duplicate keys: %s' % duplicates) for c in to_remove: del frame[c] @@ -3315,7 +3315,7 @@ def sortlevel(self, level=0, axis=0, ascending=True, inplace=False): axis = self._get_axis_number(axis) the_axis = self._get_axis(axis) if not isinstance(the_axis, MultiIndex): - raise Exception('can only sort by level with a hierarchical index') + raise TypeError('can only sort by level with a hierarchical index') new_axis, indexer = the_axis.sortlevel(level, ascending=ascending) @@ -3377,7 +3377,7 @@ def reorder_levels(self, order, axis=0): axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover - raise Exception('Can only reorder levels on a hierarchical axis.') + raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy() @@ -3789,7 +3789,7 @@ def rename(self, index=None, columns=None, copy=True, inplace=False): from pandas.core.series import _get_rename_function if index is None and columns is None: - raise Exception('must pass either index or columns') + raise TypeError('must pass either index or columns') index_f = _get_rename_function(index) columns_f = _get_rename_function(columns) diff --git a/pandas/core/index.py b/pandas/core/index.py index a5880b9f18670..a9ccda3778780 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -361,7 +361,7 @@ def __hash__(self): return hash(self.view(np.ndarray)) def __setitem__(self, key, value): - raise Exception(str(self.__class__) + ' object is immutable') + raise TypeError(str(self.__class__) + ' does not support item assignment') def __getitem__(self, key): """Override numpy.ndarray's __getitem__ method to work as desired""" @@ -547,7 +547,7 @@ def order(self, return_indexer=False, ascending=True): return sorted_index def sort(self, *args, **kwargs): - raise Exception('Cannot sort an Index object') + raise TypeError('Cannot sort an %r object' % self.__class__.__name__) def shift(self, periods=1, freq=None): """ @@ -606,7 +606,7 @@ def union(self, other): union : Index """ if not hasattr(other, '__iter__'): - raise Exception('Input must be iterable!') + raise TypeError('Input must be iterable!') if len(other) == 0 or self.equals(other): return self @@ -671,7 +671,7 @@ def intersection(self, other): intersection : Index """ if not hasattr(other, '__iter__'): - raise Exception('Input must be iterable!') + raise TypeError('Input must be iterable!') self._assert_can_do_setop(other) @@ -713,7 +713,7 @@ def diff(self, other): """ if not hasattr(other, '__iter__'): - raise Exception('Input must be iterable!') + raise TypeError('Input must be iterable!') if self.equals(other): return Index([], name=self.name) @@ -1080,7 +1080,7 @@ def _join_level(self, other, level, how='left', return_indexers=False): the MultiIndex will not be changed (currently) """ if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): - raise Exception('Join on level between two MultiIndex objects ' + raise TypeError('Join on level between two MultiIndex objects ' 'is ambiguous') left, right = self, other @@ -2298,7 +2298,7 @@ def _partial_tup_index(self, tup, side='left'): if lab not in lev: if not lev.is_type_compatible(lib.infer_dtype([lab])): - raise Exception('Level type mismatch: %s' % lab) + raise TypeError('Level type mismatch: %s' % lab) # short circuit loc = lev.searchsorted(lab, side=side) @@ -2738,7 +2738,7 @@ def _ensure_index(index_like): def _validate_join_method(method): if method not in ['left', 'right', 'inner', 'outer']: - raise Exception('do not recognize join method %s' % method) + raise ValueError('do not recognize join method %s' % method) # TODO: handle index names! diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 01e976e397111..0b6de7213ec1f 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1571,7 +1571,7 @@ def xs(self, key, axis=1, copy=True): new_blocks = [] if len(self.blocks) > 1: if not copy: - raise Exception('cannot get view of mixed-type or ' + raise TypeError('cannot get view of mixed-type or ' 'non-consolidated DataFrame') for blk in self.blocks: newb = make_block(blk.values[slicer], @@ -1604,7 +1604,7 @@ def fast_2d_xs(self, loc, copy=False): return result if not copy: - raise Exception('cannot get view of mixed-type or ' + raise TypeError('cannot get view of mixed-type or ' 'non-consolidated DataFrame') dtype = _interleaved_dtype(self.blocks) @@ -2093,7 +2093,7 @@ def _maybe_rename_join(self, other, lsuffix, rsuffix, copydata=True): to_rename = self.items.intersection(other.items) if len(to_rename) > 0: if not lsuffix and not rsuffix: - raise Exception('columns overlap: %s' % to_rename) + raise ValueError('columns overlap: %s' % to_rename) def lrenamer(x): if x in to_rename: @@ -2377,7 +2377,7 @@ def _shape_compat(x): else: items = _ensure_index([ n for n in names if n in ref_items ]) if len(items) != len(stacked): - raise Exception("invalid names passed _stack_arrays") + raise ValueError("invalid names passed _stack_arrays") return items, stacked, placement diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 0a099661c58f1..455ab96ac08ba 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -137,7 +137,7 @@ def f(self, other): if isinstance(other, self._constructor): return self._compare_constructor(other, func) elif isinstance(other, (self._constructor_sliced, DataFrame, Series)): - raise Exception("input needs alignment for this object [%s]" % + raise ValueError("input needs alignment for this object [%s]" % self._constructor) else: return self._combine_const(other, na_op) diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py index 08ff3b70dcb13..4928cb565147a 100644 --- a/pandas/core/panelnd.py +++ b/pandas/core/panelnd.py @@ -58,7 +58,7 @@ def create_nd_panel_factory(klass_name, axis_orders, axis_slices, slicer, axis_a #### define the methods #### def __init__(self, *args, **kwargs): if not (kwargs.get('data') or len(args)): - raise Exception( + raise TypeError( "must supply at least a data argument to [%s]" % klass_name) if 'copy' not in kwargs: kwargs['copy'] = False diff --git a/pandas/core/series.py b/pandas/core/series.py index 938cd99dcef8d..605f8188ccf4c 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1962,7 +1962,8 @@ def clip(self, lower=None, upper=None, out=None): clipped : Series """ if out is not None: # pragma: no cover - raise Exception('out argument is not supported yet') + # TODO: Support out argument? + raise NotImplementedError('out argument is not supported yet') result = self if lower is not None: @@ -2028,7 +2029,7 @@ def dot(self, other): lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: - raise Exception('Dot product shape mismatch, %s vs %s' % + raise ValueError('Dot product shape mismatch, %s vs %s' % (lvals.shape, rvals.shape)) if isinstance(other, DataFrame): @@ -2379,7 +2380,7 @@ def sortlevel(self, level=0, ascending=True): sorted : Series """ if not isinstance(self.index, MultiIndex): - raise Exception('can only sort by level with a hierarchical index') + raise TypeError('can only sort by level with a hierarchical index') new_index, indexer = self.index.sortlevel(level, ascending=ascending) new_values = self.values.take(indexer) @@ -2417,7 +2418,7 @@ def reorder_levels(self, order): type of caller (new object) """ if not isinstance(self.index, MultiIndex): # pragma: no cover - raise Exception('Can only reorder levels on a hierarchical axis.') + raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy() result.index = result.index.reorder_levels(order) @@ -3156,7 +3157,7 @@ def interpolate(self, method='linear'): """ if method == 'time': if not isinstance(self, TimeSeries): - raise Exception('time-weighted interpolation only works' + raise TypeError('time-weighted interpolation only works' 'on TimeSeries') method = 'values' # inds = pa.array([d.toordinal() for d in self.index]) @@ -3279,7 +3280,7 @@ def tz_localize(self, tz, copy=True): if not isinstance(self.index, DatetimeIndex): if len(self.index) > 0: - raise Exception('Cannot tz-localize non-time series') + raise TypeError('Cannot tz-localize non-time series') new_index = DatetimeIndex([], tz=tz) else: @@ -3413,7 +3414,7 @@ def _try_cast(arr, take_fast_path): elif subarr.ndim > 1: if isinstance(data, pa.Array): - raise Exception('Data must be 1-dimensional') + raise ValueError('Data must be 1-dimensional') else: subarr = _asarray_tuplesafe(data, dtype=dtype) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 95702847d9c7f..0a57b13cc10fe 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -368,7 +368,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0): if sheet_name is None: sheet_name = self.cur_sheet if sheet_name is None: # pragma: no cover - raise Exception('Must pass explicit sheet_name or set ' + raise TypeError('Must pass explicit sheet_name or set ' 'cur_sheet property') if self.use_xlsx: self._writecells_xlsx(cells, sheet_name, startrow, startcol) diff --git a/pandas/rpy/common.py b/pandas/rpy/common.py index acc562925c925..4d0becdfc2cef 100644 --- a/pandas/rpy/common.py +++ b/pandas/rpy/common.py @@ -195,7 +195,7 @@ def convert_robj(obj, use_pandas=True): if isinstance(obj, rpy_type): return converter(obj) - raise Exception('Do not know what to do with %s object' % type(obj)) + raise TypeError('Do not know what to do with %s object' % type(obj)) def convert_to_r_posixct(obj): diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py index 035db279064a0..028cad820abf2 100644 --- a/pandas/sparse/array.py +++ b/pandas/sparse/array.py @@ -261,7 +261,7 @@ def _get_val_at(self, loc): loc += n if loc >= len(self) or loc < 0: - raise Exception('Out of bounds access') + raise IndexError('Out of bounds access') sp_loc = self.sp_index.lookup(loc) if sp_loc == -1: @@ -283,7 +283,7 @@ def take(self, indices, axis=0): n = len(self) if (indices < 0).any() or (indices >= n).any(): - raise Exception('out of bounds access') + raise IndexError('out of bounds access') if self.sp_index.npoints > 0: locs = np.array([self.sp_index.lookup(loc) for loc in indices]) @@ -296,10 +296,10 @@ def take(self, indices, axis=0): return result def __setitem__(self, key, value): - raise Exception('SparseArray objects are immutable') + raise TypeError('%r object does not support item assignment' % self.__class__.__name__) def __setslice__(self, i, j, value): - raise Exception('SparseArray objects are immutable') + raise TypeError('%r object does not support item assignment' % self.__class__.__name__) def to_dense(self): """ @@ -313,7 +313,7 @@ def astype(self, dtype=None): """ dtype = np.dtype(dtype) if dtype is not None and dtype not in (np.float_, float): - raise Exception('Can only support floating point data for now') + raise TypeError('Can only support floating point data for now') return self.copy() def copy(self, deep=True): diff --git a/pandas/sparse/frame.py b/pandas/sparse/frame.py index 9694cc005d178..bb1f5555f60b9 100644 --- a/pandas/sparse/frame.py +++ b/pandas/sparse/frame.py @@ -195,10 +195,10 @@ def _init_matrix(self, data, index, columns, dtype=None): columns = _default_index(K) if len(columns) != K: - raise Exception('Column length mismatch: %d vs. %d' % + raise ValueError('Column length mismatch: %d vs. %d' % (len(columns), K)) if len(index) != N: - raise Exception('Index length mismatch: %d vs. %d' % + raise ValueError('Index length mismatch: %d vs. %d' % (len(index), N)) data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)]) @@ -585,7 +585,7 @@ def _combine_const(self, other, func): def _reindex_index(self, index, method, copy, level, fill_value=np.nan, limit=None): if level is not None: - raise Exception('Reindex by level not supported for sparse') + raise TypeError('Reindex by level not supported for sparse') if self.index.equals(index): if copy: @@ -616,7 +616,7 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan, def _reindex_columns(self, columns, copy, level, fill_value, limit=None): if level is not None: - raise Exception('Reindex by level not supported for sparse') + raise TypeError('Reindex by level not supported for sparse') if com.notnull(fill_value): raise NotImplementedError @@ -891,7 +891,7 @@ def stack_sparse_frame(frame): vals_to_concat = [] for _, series in frame.iteritems(): if not np.isnan(series.fill_value): - raise Exception('This routine assumes NaN fill value') + raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) @@ -931,7 +931,7 @@ def homogenize(series_dict): for _, series in series_dict.iteritems(): if not np.isnan(series.fill_value): - raise Exception('this method is only valid with NaN fill values') + raise TypeError('this method is only valid with NaN fill values') if index is None: index = series.sp_index diff --git a/pandas/sparse/panel.py b/pandas/sparse/panel.py index 0b2842155b299..246e6fa93918f 100644 --- a/pandas/sparse/panel.py +++ b/pandas/sparse/panel.py @@ -249,7 +249,7 @@ def to_frame(self, filter_observations=True): frame : DataFrame """ if not filter_observations: - raise Exception('filter_observations=False not supported for ' + raise TypeError('filter_observations=False not supported for ' 'SparsePanel.to_long') I, N, K = self.shape @@ -325,7 +325,7 @@ def reindex(self, major=None, items=None, minor=None, major_axis=None, if item in self._frames: new_frames[item] = self._frames[item] else: - raise Exception('Reindexing with new items not yet ' + raise NotImplementedError('Reindexing with new items not yet ' 'supported') else: new_frames = self._frames @@ -488,7 +488,7 @@ def _stack_sparse_info(frame): series = frame[col] if not np.isnan(series.fill_value): - raise Exception('This routine assumes NaN fill value') + raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index bd01845a295b6..1b8d3541da289 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -133,7 +133,7 @@ def __new__(cls, data, index=None, sparse_index=None, kind='block', raise AssertionError() else: if index is None: - raise Exception('must pass index!') + raise TypeError('must pass index!') length = len(index) @@ -388,7 +388,7 @@ def astype(self, dtype=None): """ if dtype is not None and dtype not in (np.float_, float): - raise Exception('Can only support floating point data') + raise TypeError('Can only support floating point data') return self.copy() diff --git a/pandas/stats/common.py b/pandas/stats/common.py index c3034dbc390bf..85e8916b30169 100644 --- a/pandas/stats/common.py +++ b/pandas/stats/common.py @@ -10,7 +10,7 @@ def _get_cluster_type(cluster_type): elif cluster_type_up == 'TIME': return 'time' else: # pragma: no cover - raise Exception('Unrecognized cluster type: %s' % cluster_type) + raise ValueError('Unrecognized cluster type: %s' % cluster_type) _CLUSTER_TYPES = { 0: 'time', @@ -35,7 +35,7 @@ def _get_window_type(window_type): elif window_type_up == 'EXPANDING': return 'expanding' else: # pragma: no cover - raise Exception('Unrecognized window type: %s' % window_type) + raise ValueError('Unrecognized window type: %s' % window_type) def banner(text, width=80): diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py index 75e35b403dd78..d11fa4a20b084 100644 --- a/pandas/tools/merge.py +++ b/pandas/tools/merge.py @@ -1039,7 +1039,7 @@ def _concat_blocks(self, blocks): if self.axis > 0: # Not safe to remove this check, need to profile if not _all_indexes_same([b.items for b in blocks]): - raise Exception('dtypes are not consistent throughout ' + raise TypeError('dtypes are not consistent throughout ' 'DataFrames') return make_block(concat_values, blocks[0].items, self.new_axes[0]) else: @@ -1184,7 +1184,7 @@ def _maybe_check_integrity(self, concat_index): if self.verify_integrity: if not concat_index.is_unique: overlap = concat_index.get_duplicates() - raise Exception('Indexes have overlapping values: %s' + raise ValueError('Indexes have overlapping values: %s' % str(overlap)) diff --git a/pandas/tools/tile.py b/pandas/tools/tile.py index 4c68594a8a093..ffed6cafc1047 100644 --- a/pandas/tools/tile.py +++ b/pandas/tools/tile.py @@ -151,7 +151,7 @@ def _bins_to_cuts(x, bins, right=True, labels=None, retbins=False, ids = bins.searchsorted(x, side=side) if len(algos.unique(bins)) < len(bins): - raise Exception('Bin edges must be unique: %s' % repr(bins)) + raise ValueError('Bin edges must be unique: %s' % repr(bins)) if include_lowest: ids[x == bins[0]] = 1 diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 5985a8a898b27..28127c34c2e96 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -499,7 +499,7 @@ def get_offset(name): if offset is not None: return offset else: - raise Exception('Bad rule name requested: %s!' % name) + raise ValueError('Bad rule name requested: %s!' % name) getOffset = get_offset @@ -522,7 +522,7 @@ def get_offset_name(offset): if name is not None: return name else: - raise Exception('Bad rule given: %s!' % offset) + raise ValueError('Bad rule given: %s!' % offset) def get_legacy_offset_name(offset): diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 1cb986ee6cd7c..7016a216f5d60 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -432,7 +432,7 @@ def _cached_range(cls, start=None, end=None, periods=None, offset=None, end = Timestamp(end) if offset is None: - raise Exception('Must provide a DateOffset!') + raise TypeError('Must provide a DateOffset!') drc = _daterange_cache if offset not in _daterange_cache: @@ -926,10 +926,10 @@ def _maybe_utc_convert(self, other): if isinstance(other, DatetimeIndex): if self.tz is not None: if other.tz is None: - raise Exception('Cannot join tz-naive with tz-aware ' + raise TypeError('Cannot join tz-naive with tz-aware ' 'DatetimeIndex') elif other.tz is not None: - raise Exception('Cannot join tz-naive with tz-aware ' + raise TypeError('Cannot join tz-naive with tz-aware ' 'DatetimeIndex') if self.tz != other.tz: @@ -1482,7 +1482,7 @@ def tz_convert(self, tz): if self.tz is None: # tz naive, use tz_localize - raise Exception('Cannot convert tz-naive timestamps, use ' + raise TypeError('Cannot convert tz-naive timestamps, use ' 'tz_localize to localize') # No conversion since timestamps are all UTC to begin with diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 025a12a17687e..9585d1f81e81d 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -351,7 +351,7 @@ def apply(self, other): return BDay(self.n, offset=self.offset + other, normalize=self.normalize) else: - raise Exception('Only know how to combine business day with ' + raise TypeError('Only know how to combine business day with ' 'datetime or timedelta!') @classmethod @@ -487,7 +487,7 @@ def __init__(self, n=1, **kwds): if self.weekday is not None: if self.weekday < 0 or self.weekday > 6: - raise Exception('Day must be 0<=day<=6, got %d' % + raise ValueError('Day must be 0<=day<=6, got %d' % self.weekday) self._inc = timedelta(weeks=1) @@ -562,13 +562,13 @@ def __init__(self, n=1, **kwds): self.week = kwds['week'] if self.n == 0: - raise Exception('N cannot be 0') + raise ValueError('N cannot be 0') if self.weekday < 0 or self.weekday > 6: - raise Exception('Day must be 0<=day<=6, got %d' % + raise ValueError('Day must be 0<=day<=6, got %d' % self.weekday) if self.week < 0 or self.week > 3: - raise Exception('Week must be 0<=day<=3, got %d' % + raise ValueError('Week must be 0<=day<=3, got %d' % self.week) self.kwds = kwds diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index 90bc0beb8eb84..de6112f40b4ad 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -354,6 +354,6 @@ def ole2datetime(oledt): # Excel has a bug where it thinks the date 2/29/1900 exists # we just reject any date before 3/1/1900. if val < 61: - raise Exception("Value is outside of acceptable range: %s " % val) + raise ValueError("Value is outside of acceptable range: %s " % val) return OLE_TIME_ZERO + timedelta(days=val)