Skip to content

Commit 04c3d51

Browse files
ShaharNavehWillAyd
authored andcommitted
STY: Spaces over concat strings - batch 1 (#30707)
1 parent 2128b2a commit 04c3d51

33 files changed

+154
-161
lines changed

pandas/_libs/tslib.pyx

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -120,8 +120,7 @@ def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None,
120120
elif box == "datetime":
121121
func_create = create_datetime_from_ts
122122
else:
123-
raise ValueError("box must be one of 'datetime', 'date', 'time' or"
124-
" 'timestamp'")
123+
raise ValueError("box must be one of 'datetime', 'date', 'time' or 'timestamp'")
125124

126125
if is_utc(tz) or tz is None:
127126
for i in range(n):

pandas/_libs/tslibs/strptime.pyx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -278,8 +278,8 @@ def array_strptime(object[:] values, object fmt,
278278
"the ISO year directive '%G' and a weekday "
279279
"directive '%A', '%a', '%w', or '%u'.")
280280
else:
281-
raise ValueError("ISO week directive '%V' is incompatible with"
282-
" the year directive '%Y'. Use the ISO year "
281+
raise ValueError("ISO week directive '%V' is incompatible with "
282+
"the year directive '%Y'. Use the ISO year "
283283
"'%G' instead.")
284284

285285
# If we know the wk of the year and what day of that wk, we can figure

pandas/_libs/tslibs/timestamps.pyx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -814,9 +814,9 @@ default 'raise'
814814
'shift_backward')
815815
if nonexistent not in nonexistent_options and not isinstance(
816816
nonexistent, timedelta):
817-
raise ValueError("The nonexistent argument must be one of 'raise',"
818-
" 'NaT', 'shift_forward', 'shift_backward' or"
819-
" a timedelta object")
817+
raise ValueError("The nonexistent argument must be one of 'raise', "
818+
"'NaT', 'shift_forward', 'shift_backward' or "
819+
"a timedelta object")
820820

821821
if self.tzinfo is None:
822822
# tz naive, localize

pandas/core/arrays/datetimes.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -333,8 +333,7 @@ def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False):
333333
if not isinstance(values, np.ndarray):
334334
msg = (
335335
f"Unexpected type '{type(values).__name__}'. 'values' must be "
336-
"a DatetimeArray ndarray, or Series or Index containing one of"
337-
" those."
336+
"a DatetimeArray ndarray, or Series or Index containing one of those."
338337
)
339338
raise ValueError(msg)
340339
if values.ndim not in [1, 2]:

pandas/core/arrays/timedeltas.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -238,8 +238,8 @@ def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False):
238238

239239
if not isinstance(values, np.ndarray):
240240
msg = (
241-
f"Unexpected type '{type(values).__name__}'. 'values' must be a"
242-
" TimedeltaArray ndarray, or Series or Index containing one of those."
241+
f"Unexpected type '{type(values).__name__}'. 'values' must be a "
242+
"TimedeltaArray ndarray, or Series or Index containing one of those."
243243
)
244244
raise ValueError(msg)
245245
if values.ndim not in [1, 2]:

pandas/core/computation/eval.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -339,8 +339,8 @@ def eval(
339339
if parsed_expr.assigner is None:
340340
if multi_line:
341341
raise ValueError(
342-
"Multi-line expressions are only valid"
343-
" if all expressions contain an assignment"
342+
"Multi-line expressions are only valid "
343+
"if all expressions contain an assignment"
344344
)
345345
elif inplace:
346346
raise ValueError("Cannot operate inplace if there is no assignment")

pandas/core/frame.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7000,8 +7000,8 @@ def append(self, other, ignore_index=False, verify_integrity=False, sort=False):
70007000
other = Series(other)
70017001
if other.name is None and not ignore_index:
70027002
raise TypeError(
7003-
"Can only append a Series if ignore_index=True"
7004-
" or if the Series has a name"
7003+
"Can only append a Series if ignore_index=True "
7004+
"or if the Series has a name"
70057005
)
70067006

70077007
index = Index([other.name], name=self.index.name)

pandas/core/generic.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6440,8 +6440,8 @@ def replace(
64406440
if not is_dict_like(to_replace):
64416441
if not is_dict_like(regex):
64426442
raise TypeError(
6443-
'If "to_replace" and "value" are both None'
6444-
' and "to_replace" is not a list, then '
6443+
'If "to_replace" and "value" are both None '
6444+
'and "to_replace" is not a list, then '
64456445
"regex must be a mapping"
64466446
)
64476447
to_replace = regex
@@ -6455,9 +6455,8 @@ def replace(
64556455
if any(are_mappings):
64566456
if not all(are_mappings):
64576457
raise TypeError(
6458-
"If a nested mapping is passed, all values"
6459-
" of the top level mapping must be "
6460-
"mappings"
6458+
"If a nested mapping is passed, all values "
6459+
"of the top level mapping must be mappings"
64616460
)
64626461
# passed a nested dict/Series
64636462
to_rep_dict = {}

pandas/core/groupby/groupby.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -485,8 +485,8 @@ def get_converter(s):
485485
except KeyError:
486486
# turns out it wasn't a tuple
487487
msg = (
488-
"must supply a same-length tuple to get_group"
489-
" with multiple grouping keys"
488+
"must supply a same-length tuple to get_group "
489+
"with multiple grouping keys"
490490
)
491491
raise ValueError(msg)
492492

pandas/core/groupby/grouper.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -605,8 +605,8 @@ def is_in_obj(gpr) -> bool:
605605

606606
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
607607
raise ValueError(
608-
f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]})"
609-
" must be same length"
608+
f"Length of grouper ({len(gpr)}) and axis ({obj.shape[axis]}) "
609+
"must be same length"
610610
)
611611

612612
# create the Grouping

pandas/core/indexes/multi.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2073,9 +2073,8 @@ def drop(self, codes, level=None, errors="raise"):
20732073
elif com.is_bool_indexer(loc):
20742074
if self.lexsort_depth == 0:
20752075
warnings.warn(
2076-
"dropping on a non-lexsorted multi-index"
2077-
" without a level parameter may impact "
2078-
"performance.",
2076+
"dropping on a non-lexsorted multi-index "
2077+
"without a level parameter may impact performance.",
20792078
PerformanceWarning,
20802079
stacklevel=3,
20812080
)

pandas/core/resample.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1076,10 +1076,9 @@ def _upsample(self, method, limit=None, fill_value=None):
10761076
raise AssertionError("axis must be 0")
10771077
if self._from_selection:
10781078
raise ValueError(
1079-
"Upsampling from level= or on= selection"
1080-
" is not supported, use .set_index(...)"
1081-
" to explicitly set index to"
1082-
" datetime-like"
1079+
"Upsampling from level= or on= selection "
1080+
"is not supported, use .set_index(...) "
1081+
"to explicitly set index to datetime-like"
10831082
)
10841083

10851084
ax = self.ax
@@ -1135,9 +1134,9 @@ def _convert_obj(self, obj):
11351134
if self._from_selection:
11361135
# see GH 14008, GH 12871
11371136
msg = (
1138-
"Resampling from level= or on= selection"
1139-
" with a PeriodIndex is not currently supported,"
1140-
" use .set_index(...) to explicitly set index"
1137+
"Resampling from level= or on= selection "
1138+
"with a PeriodIndex is not currently supported, "
1139+
"use .set_index(...) to explicitly set index"
11411140
)
11421141
raise NotImplementedError(msg)
11431142

pandas/core/reshape/concat.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -351,8 +351,8 @@ def __init__(
351351
for obj in objs:
352352
if not isinstance(obj, (Series, DataFrame)):
353353
msg = (
354-
"cannot concatenate object of type '{typ}';"
355-
" only Series and DataFrame objs are valid".format(typ=type(obj))
354+
"cannot concatenate object of type '{typ}'; "
355+
"only Series and DataFrame objs are valid".format(typ=type(obj))
356356
)
357357
raise TypeError(msg)
358358

@@ -402,8 +402,8 @@ def __init__(
402402
self._is_series = isinstance(sample, Series)
403403
if not 0 <= axis <= sample.ndim:
404404
raise AssertionError(
405-
"axis must be between 0 and {ndim}, input was"
406-
" {axis}".format(ndim=sample.ndim, axis=axis)
405+
"axis must be between 0 and {ndim}, input was "
406+
"{axis}".format(ndim=sample.ndim, axis=axis)
407407
)
408408

409409
# if we have mixed ndims, then convert to highest ndim
@@ -648,8 +648,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde
648648
# make sure that all of the passed indices have the same nlevels
649649
if not len({idx.nlevels for idx in indexes}) == 1:
650650
raise AssertionError(
651-
"Cannot concat indices that do"
652-
" not have the same number of levels"
651+
"Cannot concat indices that do "
652+
"not have the same number of levels"
653653
)
654654

655655
# also copies

pandas/core/reshape/melt.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ def melt(
5151
missing = Index(com.flatten(id_vars)).difference(cols)
5252
if not missing.empty:
5353
raise KeyError(
54-
"The following 'id_vars' are not present"
55-
" in the DataFrame: {missing}"
54+
"The following 'id_vars' are not present "
55+
"in the DataFrame: {missing}"
5656
"".format(missing=list(missing))
5757
)
5858
else:
@@ -73,8 +73,8 @@ def melt(
7373
missing = Index(com.flatten(value_vars)).difference(cols)
7474
if not missing.empty:
7575
raise KeyError(
76-
"The following 'value_vars' are not present in"
77-
" the DataFrame: {missing}"
76+
"The following 'value_vars' are not present in "
77+
"the DataFrame: {missing}"
7878
"".format(missing=list(missing))
7979
)
8080
frame = frame.loc[:, id_vars + value_vars]

pandas/core/reshape/merge.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1246,32 +1246,32 @@ def _validate(self, validate: str):
12461246
if validate in ["one_to_one", "1:1"]:
12471247
if not left_unique and not right_unique:
12481248
raise MergeError(
1249-
"Merge keys are not unique in either left"
1250-
" or right dataset; not a one-to-one merge"
1249+
"Merge keys are not unique in either left "
1250+
"or right dataset; not a one-to-one merge"
12511251
)
12521252
elif not left_unique:
12531253
raise MergeError(
1254-
"Merge keys are not unique in left dataset;"
1255-
" not a one-to-one merge"
1254+
"Merge keys are not unique in left dataset; "
1255+
"not a one-to-one merge"
12561256
)
12571257
elif not right_unique:
12581258
raise MergeError(
1259-
"Merge keys are not unique in right dataset;"
1260-
" not a one-to-one merge"
1259+
"Merge keys are not unique in right dataset; "
1260+
"not a one-to-one merge"
12611261
)
12621262

12631263
elif validate in ["one_to_many", "1:m"]:
12641264
if not left_unique:
12651265
raise MergeError(
1266-
"Merge keys are not unique in left dataset;"
1267-
" not a one-to-many merge"
1266+
"Merge keys are not unique in left dataset; "
1267+
"not a one-to-many merge"
12681268
)
12691269

12701270
elif validate in ["many_to_one", "m:1"]:
12711271
if not right_unique:
12721272
raise MergeError(
1273-
"Merge keys are not unique in right dataset;"
1274-
" not a many-to-one merge"
1273+
"Merge keys are not unique in right dataset; "
1274+
"not a many-to-one merge"
12751275
)
12761276

12771277
elif validate in ["many_to_many", "m:m"]:

pandas/core/strings.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -438,8 +438,8 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
438438

439439
if regex.groups > 0:
440440
warnings.warn(
441-
"This pattern has match groups. To actually get the"
442-
" groups, use str.extract.",
441+
"This pattern has match groups. To actually get the "
442+
"groups, use str.extract.",
443443
UserWarning,
444444
stacklevel=3,
445445
)

pandas/io/clipboards.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,8 @@ def read_clipboard(sep=r"\s+", **kwargs): # pragma: no cover
6969
kwargs["engine"] = "python"
7070
elif len(sep) > 1 and kwargs.get("engine") == "c":
7171
warnings.warn(
72-
"read_clipboard with regex separator does not work"
73-
" properly with c engine"
72+
"read_clipboard with regex separator does not work "
73+
"properly with c engine"
7474
)
7575

7676
return read_csv(StringIO(text), sep=sep, **kwargs)

pandas/io/excel/_util.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -154,8 +154,8 @@ def _validate_freeze_panes(freeze_panes):
154154
return True
155155

156156
raise ValueError(
157-
"freeze_panes must be of form (row, column)"
158-
" where row and column are integers"
157+
"freeze_panes must be of form (row, column) "
158+
"where row and column are integers"
159159
)
160160

161161
# freeze_panes wasn't specified, return False so it won't be applied

pandas/io/formats/format.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -579,8 +579,8 @@ def __init__(
579579
else:
580580
raise ValueError(
581581
(
582-
"Formatters length({flen}) should match"
583-
" DataFrame number of columns({dlen})"
582+
"Formatters length({flen}) should match "
583+
"DataFrame number of columns({dlen})"
584584
).format(flen=len(formatters), dlen=len(frame.columns))
585585
)
586586
self.na_rep = na_rep

pandas/io/formats/style.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1272,9 +1272,9 @@ def bar(
12721272
color = [color[0], color[0]]
12731273
elif len(color) > 2:
12741274
raise ValueError(
1275-
"`color` must be string or a list-like"
1276-
" of length 2: [`color_neg`, `color_pos`]"
1277-
" (eg: color=['#d65f5f', '#5fba7d'])"
1275+
"`color` must be string or a list-like "
1276+
"of length 2: [`color_neg`, `color_pos`] "
1277+
"(eg: color=['#d65f5f', '#5fba7d'])"
12781278
)
12791279

12801280
subset = _maybe_numeric_slice(self.data, subset)

pandas/io/parsers.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -612,9 +612,9 @@ def parser_f(
612612

613613
if delim_whitespace and delimiter != default_sep:
614614
raise ValueError(
615-
"Specified a delimiter with both sep and"
616-
" delim_whitespace=True; you can only"
617-
" specify one."
615+
"Specified a delimiter with both sep and "
616+
"delim_whitespace=True; you can only "
617+
"specify one."
618618
)
619619

620620
if engine is not None:
@@ -956,8 +956,8 @@ def _clean_options(self, options, engine):
956956
if sep is None and not delim_whitespace:
957957
if engine == "c":
958958
fallback_reason = (
959-
"the 'c' engine does not support"
960-
" sep=None with delim_whitespace=False"
959+
"the 'c' engine does not support "
960+
"sep=None with delim_whitespace=False"
961961
)
962962
engine = "python"
963963
elif sep is not None and len(sep) > 1:
@@ -1120,9 +1120,9 @@ def _make_engine(self, engine="c"):
11201120
klass = FixedWidthFieldParser
11211121
else:
11221122
raise ValueError(
1123-
f"Unknown engine: {engine} (valid options are"
1124-
' "c", "python", or'
1125-
' "python-fwf")'
1123+
f"Unknown engine: {engine} (valid options are "
1124+
'"c", "python", or '
1125+
'"python-fwf")'
11261126
)
11271127
self._engine = klass(self.f, **self.options)
11281128

pandas/io/pytables.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1215,9 +1215,8 @@ def append_to_multiple(
12151215
"""
12161216
if axes is not None:
12171217
raise TypeError(
1218-
"axes is currently not accepted as a parameter to"
1219-
" append_to_multiple; you can create the "
1220-
"tables independently instead"
1218+
"axes is currently not accepted as a parameter to append_to_multiple; "
1219+
"you can create the tables independently instead"
12211220
)
12221221

12231222
if not isinstance(d, dict):
@@ -3548,9 +3547,8 @@ def create_index(self, columns=None, optlevel=None, kind: Optional[str] = None):
35483547
if not v.is_indexed:
35493548
if v.type.startswith("complex"):
35503549
raise TypeError(
3551-
"Columns containing complex values can be stored "
3552-
"but cannot"
3553-
" be indexed when using table format. Either use "
3550+
"Columns containing complex values can be stored but "
3551+
"cannot be indexed when using table format. Either use "
35543552
"fixed format, set index=False, or do not include "
35553553
"the columns containing complex values to "
35563554
"data_columns when initializing the table."

pandas/plotting/_matplotlib/core.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -229,10 +229,9 @@ def _validate_color_args(self):
229229
for char in s:
230230
if char in matplotlib.colors.BASE_COLORS:
231231
raise ValueError(
232-
"Cannot pass 'style' string with a color "
233-
"symbol and 'color' keyword argument. Please"
234-
" use one or the other or pass 'style' "
235-
"without a color symbol"
232+
"Cannot pass 'style' string with a color symbol and "
233+
"'color' keyword argument. Please use one or the other or "
234+
"pass 'style' without a color symbol"
236235
)
237236

238237
def _iter_data(self, data=None, keep_index=False, fillna=None):

0 commit comments

Comments
 (0)