diff --git a/pandas/_libs/tslibs/c_timestamp.pyx b/pandas/_libs/tslibs/c_timestamp.pyx
index 2c72cec18f096..5be35c13f5737 100644
--- a/pandas/_libs/tslibs/c_timestamp.pyx
+++ b/pandas/_libs/tslibs/c_timestamp.pyx
@@ -59,10 +59,10 @@ def integer_op_not_supported(obj):
# GH#30886 using an fstring raises SystemError
int_addsub_msg = (
- "Addition/subtraction of integers and integer-arrays with {cls} is "
- "no longer supported. Instead of adding/subtracting `n`, "
- "use `n * obj.freq`"
- ).format(cls=cls)
+ f"Addition/subtraction of integers and integer-arrays with {cls} is "
+ f"no longer supported. Instead of adding/subtracting `n`, "
+ f"use `n * obj.freq`"
+ )
return TypeError(int_addsub_msg)
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx
index 3742506a7f8af..67bc51892a4e1 100644
--- a/pandas/_libs/tslibs/timedeltas.pyx
+++ b/pandas/_libs/tslibs/timedeltas.pyx
@@ -639,7 +639,7 @@ cdef inline int64_t parse_iso_format_string(object ts) except? -1:
bint have_dot = 0, have_value = 0, neg = 0
list number = [], unit = []
- err_msg = "Invalid ISO 8601 Duration format - {}".format(ts)
+ err_msg = f"Invalid ISO 8601 Duration format - {ts}"
for c in ts:
# number (ascii codes)
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py
index c8bb0878b564d..528cc32b7fbeb 100644
--- a/pandas/core/arrays/interval.py
+++ b/pandas/core/arrays/interval.py
@@ -1126,8 +1126,8 @@ def __arrow_array__(self, type=None):
subtype = pyarrow.from_numpy_dtype(self.dtype.subtype)
except TypeError:
raise TypeError(
- "Conversion to arrow with subtype '{}' "
- "is not supported".format(self.dtype.subtype)
+ f"Conversion to arrow with subtype '{self.dtype.subtype}' "
+ f"is not supported"
)
interval_type = ArrowIntervalType(subtype, self.closed)
storage_array = pyarrow.StructArray.from_arrays(
@@ -1155,15 +1155,13 @@ def __arrow_array__(self, type=None):
# ensure we have the same subtype and closed attributes
if not type.equals(interval_type):
raise TypeError(
- "Not supported to convert IntervalArray to type with "
- "different 'subtype' ({0} vs {1}) and 'closed' ({2} vs {3}) "
- "attributes".format(
- self.dtype.subtype, type.subtype, self.closed, type.closed
- )
+ f"Not supported to convert IntervalArray to type with "
+ f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) "
+ f"and 'closed' ({self.closed} vs {type.closed}) attributes"
)
else:
raise TypeError(
- "Not supported to convert IntervalArray to '{0}' type".format(type)
+ f"Not supported to convert IntervalArray to '{type}' type"
)
return pyarrow.ExtensionArray.from_storage(interval_type, storage_array)
diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py
index 3366f10b92604..b9cbc6c3ad8bd 100644
--- a/pandas/core/util/hashing.py
+++ b/pandas/core/util/hashing.py
@@ -295,7 +295,7 @@ def hash_array(
elif issubclass(dtype.type, (np.datetime64, np.timedelta64)):
vals = vals.view("i8").astype("u8", copy=False)
elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8:
- vals = vals.view("u{}".format(vals.dtype.itemsize)).astype("u8")
+ vals = vals.view(f"u{vals.dtype.itemsize}").astype("u8")
else:
# With repeated values, its MUCH faster to categorize object dtypes,
# then hash and rename categories. We allow skipping the categorization
diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py
index 149533bf0c238..4a429949c9a08 100644
--- a/pandas/io/formats/format.py
+++ b/pandas/io/formats/format.py
@@ -187,7 +187,7 @@ def _get_footer(self) -> str:
if self.length:
if footer:
footer += ", "
- footer += "Length: {length}".format(length=len(self.categorical))
+ footer += f"Length: {len(self.categorical)}"
level_info = self.categorical._repr_categories_info()
@@ -217,7 +217,7 @@ def to_string(self) -> str:
fmt_values = self._get_formatted_values()
- fmt_values = ["{i}".format(i=i) for i in fmt_values]
+ fmt_values = [f"{i}" for i in fmt_values]
fmt_values = [i.strip() for i in fmt_values]
values = ", ".join(fmt_values)
result = ["[" + values + "]"]
@@ -301,28 +301,26 @@ def _get_footer(self) -> str:
assert isinstance(
self.series.index, (ABCDatetimeIndex, ABCPeriodIndex, ABCTimedeltaIndex)
)
- footer += "Freq: {freq}".format(freq=self.series.index.freqstr)
+ footer += f"Freq: {self.series.index.freqstr}"
if self.name is not False and name is not None:
if footer:
footer += ", "
series_name = pprint_thing(name, escape_chars=("\t", "\r", "\n"))
- footer += (
- ("Name: {sname}".format(sname=series_name)) if name is not None else ""
- )
+ footer += f"Name: {series_name}" if name is not None else ""
if self.length is True or (self.length == "truncate" and self.truncate_v):
if footer:
footer += ", "
- footer += "Length: {length}".format(length=len(self.series))
+ footer += f"Length: {len(self.series)}"
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, "name", None)
if name:
if footer:
footer += ", "
- footer += "dtype: {typ}".format(typ=pprint_thing(name))
+ footer += f"dtype: {pprint_thing(name)}"
# level infos are added to the end and in a new line, like it is done
# for Categoricals
@@ -359,9 +357,7 @@ def to_string(self) -> str:
footer = self._get_footer()
if len(series) == 0:
- return "{name}([], {footer})".format(
- name=type(self.series).__name__, footer=footer
- )
+ return f"{type(self.series).__name__}([], {footer})"
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
@@ -584,10 +580,8 @@ def __init__(
self.formatters = formatters
else:
raise ValueError(
- (
- "Formatters length({flen}) should match "
- "DataFrame number of columns({dlen})"
- ).format(flen=len(formatters), dlen=len(frame.columns))
+ f"Formatters length({len(formatters)}) should match "
+ f"DataFrame number of columns({len(frame.columns)})"
)
self.na_rep = na_rep
self.decimal = decimal
@@ -816,10 +810,10 @@ def write_result(self, buf: IO[str]) -> None:
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
- info_line = "Empty {name}\nColumns: {col}\nIndex: {idx}".format(
- name=type(self.frame).__name__,
- col=pprint_thing(frame.columns),
- idx=pprint_thing(frame.index),
+ info_line = (
+ f"Empty {type(self.frame).__name__}\n"
+ f"Columns: {pprint_thing(frame.columns)}\n"
+ f"Index: {pprint_thing(frame.index)}"
)
text = info_line
else:
@@ -865,11 +859,7 @@ def write_result(self, buf: IO[str]) -> None:
buf.writelines(text)
if self.should_show_dimensions:
- buf.write(
- "\n\n[{nrows} rows x {ncols} columns]".format(
- nrows=len(frame), ncols=len(frame.columns)
- )
- )
+ buf.write(f"\n\n[{len(frame)} rows x {len(frame.columns)} columns]")
def _join_multiline(self, *args) -> str:
lwidth = self.line_width
@@ -1075,7 +1065,7 @@ def _get_formatted_index(self, frame: "DataFrame") -> List[str]:
# empty space for columns
if self.show_col_idx_names:
- col_header = ["{x}".format(x=x) for x in self._get_column_name_list()]
+ col_header = [f"{x}" for x in self._get_column_name_list()]
else:
col_header = [""] * columns.nlevels
@@ -1211,10 +1201,8 @@ def _format_strings(self) -> List[str]:
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
- fmt_str = "{{x: .{prec:d}g}}".format(
- prec=get_option("display.precision")
- )
- float_format = lambda x: fmt_str.format(x=x)
+ precision = get_option("display.precision")
+ float_format = lambda x: f"{x: .{precision:d}g}"
else:
float_format = self.float_format
@@ -1240,10 +1228,10 @@ def _format(x):
pass
return self.na_rep
elif isinstance(x, PandasObject):
- return "{x}".format(x=x)
+ return f"{x}"
else:
# object dtype
- return "{x}".format(x=formatter(x))
+ return f"{formatter(x)}"
vals = self.values
if isinstance(vals, Index):
@@ -1259,7 +1247,7 @@ def _format(x):
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
- fmt_values.append(" {v}".format(v=_format(v)))
+ fmt_values.append(f" {_format(v)}")
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
@@ -1268,8 +1256,8 @@ def _format(x):
# to include a space if we get here.
tpl = "{v}"
else:
- tpl = " {v}"
- fmt_values.append(tpl.format(v=_format(v)))
+ tpl = f" {_format(v)}"
+ fmt_values.append(tpl)
return fmt_values
@@ -1442,7 +1430,7 @@ def _format_strings(self) -> List[str]:
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self) -> List[str]:
- formatter = self.formatter or (lambda x: "{x: d}".format(x=x))
+ formatter = self.formatter or (lambda x: f"{x: d}")
fmt_values = [formatter(x) for x in self.values]
return fmt_values
@@ -1726,7 +1714,7 @@ def _formatter(x):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
- result = "'{res}'".format(res=result)
+ result = f"'{result}'"
return result
return _formatter
@@ -1889,16 +1877,16 @@ def __call__(self, num: Union[int, float]) -> str:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
- prefix = "E-{pow10:02d}".format(pow10=-int_pow10)
+ prefix = f"E-{-int_pow10:02d}"
else:
- prefix = "E+{pow10:02d}".format(pow10=int_pow10)
+ prefix = f"E+{int_pow10:02d}"
mant = sign * dnum / (10 ** pow10)
if self.accuracy is None: # pragma: no cover
format_str = "{mant: g}{prefix}"
else:
- format_str = "{{mant: .{acc:d}f}}{{prefix}}".format(acc=self.accuracy)
+ format_str = f"{{mant: .{self.accuracy:d}f}}{{prefix}}"
formatted = format_str.format(mant=mant, prefix=prefix)
diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py
index e3161415fe2bc..3bc47cefd45c0 100644
--- a/pandas/io/formats/html.py
+++ b/pandas/io/formats/html.py
@@ -56,7 +56,7 @@ def __init__(
self.table_id = self.fmt.table_id
self.render_links = self.fmt.render_links
if isinstance(self.fmt.col_space, int):
- self.fmt.col_space = "{colspace}px".format(colspace=self.fmt.col_space)
+ self.fmt.col_space = f"{self.fmt.col_space}px"
@property
def show_row_idx_names(self) -> bool:
@@ -124,7 +124,7 @@ def write_th(
"""
if header and self.fmt.col_space is not None:
tags = tags or ""
- tags += 'style="min-width: {colspace};"'.format(colspace=self.fmt.col_space)
+ tags += f'style="min-width: {self.fmt.col_space};"'
self._write_cell(s, kind="th", indent=indent, tags=tags)
@@ -135,9 +135,9 @@ def _write_cell(
self, s: Any, kind: str = "td", indent: int = 0, tags: Optional[str] = None
) -> None:
if tags is not None:
- start_tag = "<{kind} {tags}>".format(kind=kind, tags=tags)
+ start_tag = f"<{kind} {tags}>"
else:
- start_tag = "<{kind}>".format(kind=kind)
+ start_tag = f"<{kind}>"
if self.escape:
# escape & first to prevent double escaping of &
@@ -149,16 +149,13 @@ def _write_cell(
if self.render_links and is_url(rs):
rs_unescaped = pprint_thing(s, escape_chars={}).strip()
- start_tag += ''.format(url=rs_unescaped)
+ start_tag += f''
end_a = ""
else:
end_a = ""
self.write(
- "{start}{rs}{end_a}{kind}>".format(
- start=start_tag, rs=rs, end_a=end_a, kind=kind
- ),
- indent,
+ f"{start_tag}{rs}{end_a}{kind}>", indent,
)
def write_tr(
@@ -177,7 +174,7 @@ def write_tr(
if align is None:
self.write("
", indent)
else:
- self.write('
'.format(align=align), indent)
+ self.write(f'
', indent)
indent += indent_delta
for i, s in enumerate(line):
@@ -196,9 +193,7 @@ def render(self) -> List[str]:
if self.should_show_dimensions:
by = chr(215) # ×
self.write(
- "{rows} rows {by} {cols} columns
".format(
- rows=len(self.frame), by=by, cols=len(self.frame.columns)
- )
+ f"{len(self.frame)} rows {by} {len(self.frame.columns)} columns
"
)
return self.elements
@@ -216,7 +211,7 @@ def _write_table(self, indent: int = 0) -> None:
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise TypeError(
- "classes must be a string, list, "
+ f"classes must be a string, list, "
f"or tuple, not {type(self.classes)}"
)
_classes.extend(self.classes)
@@ -224,12 +219,10 @@ def _write_table(self, indent: int = 0) -> None:
if self.table_id is None:
id_section = ""
else:
- id_section = ' id="{table_id}"'.format(table_id=self.table_id)
+ id_section = f' id="{self.table_id}"'
self.write(
- ''.format(
- border=self.border, cls=" ".join(_classes), id_section=id_section
- ),
+ f'',
indent,
)
diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py
index 8ab56437d5c05..c6d2055ecfea8 100644
--- a/pandas/io/formats/latex.py
+++ b/pandas/io/formats/latex.py
@@ -59,10 +59,9 @@ def write_result(self, buf: IO[str]) -> None:
# string representation of the columns
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
- info_line = "Empty {name}\nColumns: {col}\nIndex: {idx}".format(
- name=type(self.frame).__name__,
- col=self.frame.columns,
- idx=self.frame.index,
+ info_line = (
+ f"Empty {type(self.frame).__name__}\n"
+ f"Columns: {self.frame.columns}\nIndex: {self.frame.index}"
)
strcols = [[info_line]]
else:
@@ -141,8 +140,8 @@ def pad_empties(x):
buf.write("\\endhead\n")
buf.write("\\midrule\n")
buf.write(
- "\\multicolumn{{{n}}}{{r}}{{{{Continued on next "
- "page}}}} \\\\\n".format(n=len(row))
+ f"\\multicolumn{{{len(row)}}}{{r}}{{{{Continued on next "
+ f"page}}}} \\\\\n"
)
buf.write("\\midrule\n")
buf.write("\\endfoot\n\n")
@@ -172,7 +171,7 @@ def pad_empties(x):
if self.bold_rows and self.fmt.index:
# bold row labels
crow = [
- "\\textbf{{{x}}}".format(x=x)
+ f"\\textbf{{{x}}}"
if j < ilevels and x.strip() not in ["", "{}"]
else x
for j, x in enumerate(crow)
@@ -211,9 +210,8 @@ def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append(
- "\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}".format(
- ncol=ncol, fmt=self.multicolumn_format, txt=coltext.strip()
- )
+ f"\\multicolumn{{{ncol:d}}}{{{self.multicolumn_format:s}}}"
+ f"{{{coltext.strip():s}}}"
)
# don't modify where not needed
else:
@@ -256,9 +254,7 @@ def _format_multirow(
break
if nrow > 1:
# overwrite non-multirow entry
- row[j] = "\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}".format(
- nrow=nrow, row=row[j].strip()
- )
+ row[j] = f"\\multirow{{{nrow:d}}}{{*}}{{{row[j].strip():s}}}"
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
@@ -269,7 +265,7 @@ def _print_cline(self, buf: IO[str], i: int, icol: int) -> None:
"""
for cl in self.clinebuf:
if cl[0] == i:
- buf.write("\\cline{{{cl:d}-{icol:d}}}\n".format(cl=cl[1], icol=icol))
+ buf.write(f"\\cline{{{cl[1]:d}-{icol:d}}}\n")
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
@@ -293,19 +289,19 @@ def _write_tabular_begin(self, buf, column_format: str):
if self.caption is None:
caption_ = ""
else:
- caption_ = "\n\\caption{{{}}}".format(self.caption)
+ caption_ = f"\n\\caption{{{self.caption}}}"
if self.label is None:
label_ = ""
else:
- label_ = "\n\\label{{{}}}".format(self.label)
+ label_ = f"\n\\label{{{self.label}}}"
- buf.write("\\begin{{table}}\n\\centering{}{}\n".format(caption_, label_))
+ buf.write(f"\\begin{{table}}\n\\centering{caption_}{label_}\n")
else:
# then write output only in a tabular environment
pass
- buf.write("\\begin{{tabular}}{{{fmt}}}\n".format(fmt=column_format))
+ buf.write(f"\\begin{{tabular}}{{{column_format}}}\n")
def _write_tabular_end(self, buf):
"""
@@ -341,18 +337,18 @@ def _write_longtable_begin(self, buf, column_format: str):
`__ e.g 'rcl'
for 3 columns
"""
- buf.write("\\begin{{longtable}}{{{fmt}}}\n".format(fmt=column_format))
+ buf.write(f"\\begin{{longtable}}{{{column_format}}}\n")
if self.caption is not None or self.label is not None:
if self.caption is None:
pass
else:
- buf.write("\\caption{{{}}}".format(self.caption))
+ buf.write(f"\\caption{{{self.caption}}}")
if self.label is None:
pass
else:
- buf.write("\\label{{{}}}".format(self.label))
+ buf.write(f"\\label{{{self.label}}}")
# a double-backslash is required at the end of the line
# as discussed here:
diff --git a/pandas/io/formats/printing.py b/pandas/io/formats/printing.py
index 13b18a0b5fb6f..36e774305b577 100644
--- a/pandas/io/formats/printing.py
+++ b/pandas/io/formats/printing.py
@@ -229,7 +229,7 @@ def as_escaped_string(
max_seq_items=max_seq_items,
)
elif isinstance(thing, str) and quote_strings:
- result = "'{thing}'".format(thing=as_escaped_string(thing))
+ result = f"'{as_escaped_string(thing)}'"
else:
result = as_escaped_string(thing)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 4e26ceef0af26..b661770dc80a2 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -1493,10 +1493,8 @@ def extract(r):
for n in range(len(columns[0])):
if all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
raise ParserError(
- "Passed header=[{header}] are too many rows for this "
- "multi_index of columns".format(
- header=",".join(str(x) for x in self.header)
- )
+ f"Passed header=[{','.join(str(x) for x in self.header)}] "
+ f"are too many rows for this multi_index of columns"
)
# Clean the column names (if we have an index_col).
@@ -3613,8 +3611,8 @@ def get_rows(self, infer_nrows, skiprows=None):
def detect_colspecs(self, infer_nrows=100, skiprows=None):
# Regex escape the delimiters
- delimiters = "".join(r"\{}".format(x) for x in self.delimiter)
- pattern = re.compile("([^{}]+)".format(delimiters))
+ delimiters = "".join(fr"\{x}" for x in self.delimiter)
+ pattern = re.compile(f"([^{delimiters}]+)")
rows = self.get_rows(infer_nrows, skiprows)
if not rows:
raise EmptyDataError("No rows from which to infer column width")
diff --git a/pandas/tests/arrays/categorical/test_dtypes.py b/pandas/tests/arrays/categorical/test_dtypes.py
index 19746d7d72162..9922a8863ebc2 100644
--- a/pandas/tests/arrays/categorical/test_dtypes.py
+++ b/pandas/tests/arrays/categorical/test_dtypes.py
@@ -92,22 +92,20 @@ def test_codes_dtypes(self):
result = Categorical(["foo", "bar", "baz"])
assert result.codes.dtype == "int8"
- result = Categorical(["foo{i:05d}".format(i=i) for i in range(400)])
+ result = Categorical([f"foo{i:05d}" for i in range(400)])
assert result.codes.dtype == "int16"
- result = Categorical(["foo{i:05d}".format(i=i) for i in range(40000)])
+ result = Categorical([f"foo{i:05d}" for i in range(40000)])
assert result.codes.dtype == "int32"
# adding cats
result = Categorical(["foo", "bar", "baz"])
assert result.codes.dtype == "int8"
- result = result.add_categories(["foo{i:05d}".format(i=i) for i in range(400)])
+ result = result.add_categories([f"foo{i:05d}" for i in range(400)])
assert result.codes.dtype == "int16"
# removing cats
- result = result.remove_categories(
- ["foo{i:05d}".format(i=i) for i in range(300)]
- )
+ result = result.remove_categories([f"foo{i:05d}" for i in range(300)])
assert result.codes.dtype == "int8"
@pytest.mark.parametrize("ordered", [True, False])
diff --git a/pandas/tests/arrays/categorical/test_operators.py b/pandas/tests/arrays/categorical/test_operators.py
index 0c830c65e0f8b..c3006687ca6dd 100644
--- a/pandas/tests/arrays/categorical/test_operators.py
+++ b/pandas/tests/arrays/categorical/test_operators.py
@@ -338,7 +338,7 @@ def test_compare_unordered_different_order(self):
def test_numeric_like_ops(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
- labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
+ labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
@@ -353,9 +353,7 @@ def test_numeric_like_ops(self):
("__mul__", r"\*"),
("__truediv__", "/"),
]:
- msg = r"Series cannot perform the operation {}|unsupported operand".format(
- str_rep
- )
+ msg = fr"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(df, op)(df)
@@ -363,7 +361,7 @@ def test_numeric_like_ops(self):
# min/max)
s = df["value_group"]
for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]:
- msg = "Categorical cannot perform the operation {}".format(op)
+ msg = f"Categorical cannot perform the operation {op}"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(numeric_only=False)
@@ -383,9 +381,7 @@ def test_numeric_like_ops(self):
("__mul__", r"\*"),
("__truediv__", "/"),
]:
- msg = r"Series cannot perform the operation {}|unsupported operand".format(
- str_rep
- )
+ msg = fr"Series cannot perform the operation {str_rep}|unsupported operand"
with pytest.raises(TypeError, match=msg):
getattr(s, op)(2)
diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py
index bd9b77a2bc419..a78e4bb34e42a 100644
--- a/pandas/tests/extension/decimal/test_decimal.py
+++ b/pandas/tests/extension/decimal/test_decimal.py
@@ -99,7 +99,7 @@ def assert_frame_equal(cls, left, right, *args, **kwargs):
check_names=kwargs.get("check_names", True),
check_exact=kwargs.get("check_exact", False),
check_categorical=kwargs.get("check_categorical", True),
- obj="{obj}.columns".format(obj=kwargs.get("obj", "DataFrame")),
+ obj=f"{kwargs.get('obj', 'DataFrame')}.columns",
)
decimals = (left.dtypes == "decimal").index
diff --git a/pandas/tests/frame/indexing/test_categorical.py b/pandas/tests/frame/indexing/test_categorical.py
index a29c193676db2..3a472a8b58b6c 100644
--- a/pandas/tests/frame/indexing/test_categorical.py
+++ b/pandas/tests/frame/indexing/test_categorical.py
@@ -14,9 +14,7 @@ def test_assignment(self):
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
- labels = Categorical(
- ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
- )
+ labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
@@ -348,7 +346,7 @@ def test_assigning_ops(self):
def test_functions_no_warnings(self):
df = DataFrame({"value": np.random.randint(0, 100, 20)})
- labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
+ labels = [f"{i} - {i + 9}" for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df["group"] = pd.cut(
df.value, range(0, 105, 10), right=False, labels=labels
diff --git a/pandas/tests/frame/methods/test_describe.py b/pandas/tests/frame/methods/test_describe.py
index 127233ed2713e..8a75e80a12f52 100644
--- a/pandas/tests/frame/methods/test_describe.py
+++ b/pandas/tests/frame/methods/test_describe.py
@@ -86,7 +86,7 @@ def test_describe_bool_frame(self):
def test_describe_categorical(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
- labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
+ labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
diff --git a/pandas/tests/frame/methods/test_duplicated.py b/pandas/tests/frame/methods/test_duplicated.py
index 72eec8753315c..38b9d7fd049ab 100644
--- a/pandas/tests/frame/methods/test_duplicated.py
+++ b/pandas/tests/frame/methods/test_duplicated.py
@@ -22,9 +22,7 @@ def test_duplicated_do_not_fail_on_wide_dataframes():
# gh-21524
# Given the wide dataframe with a lot of columns
# with different (important!) values
- data = {
- "col_{0:02d}".format(i): np.random.randint(0, 1000, 30000) for i in range(100)
- }
+ data = {f"col_{i:02d}": np.random.randint(0, 1000, 30000) for i in range(100)}
df = DataFrame(data).T
result = df.duplicated()
diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py
index 7b0adceb57668..40393721c4ac6 100644
--- a/pandas/tests/frame/methods/test_to_dict.py
+++ b/pandas/tests/frame/methods/test_to_dict.py
@@ -236,9 +236,9 @@ def test_to_dict_numeric_names(self):
def test_to_dict_wide(self):
# GH#24939
- df = DataFrame({("A_{:d}".format(i)): [i] for i in range(256)})
+ df = DataFrame({(f"A_{i:d}"): [i] for i in range(256)})
result = df.to_dict("records")[0]
- expected = {"A_{:d}".format(i): i for i in range(256)}
+ expected = {f"A_{i:d}": i for i in range(256)}
assert result == expected
def test_to_dict_orient_dtype(self):
diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py
index 602ea9ca0471a..ad244d8f359a9 100644
--- a/pandas/tests/frame/test_alter_axes.py
+++ b/pandas/tests/frame/test_alter_axes.py
@@ -383,7 +383,7 @@ class Thing(frozenset):
def __repr__(self) -> str:
tmp = sorted(self)
# double curly brace prints one brace in format string
- return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
+ return f"frozenset({{{', '.join(map(repr, tmp))}}})"
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py
index 17cc50661e3cb..a021dd91a7d26 100644
--- a/pandas/tests/frame/test_api.py
+++ b/pandas/tests/frame/test_api.py
@@ -46,19 +46,19 @@ def test_get_value(self, float_frame):
def test_add_prefix_suffix(self, float_frame):
with_prefix = float_frame.add_prefix("foo#")
- expected = pd.Index(["foo#{c}".format(c=c) for c in float_frame.columns])
+ expected = pd.Index([f"foo#{c}" for c in float_frame.columns])
tm.assert_index_equal(with_prefix.columns, expected)
with_suffix = float_frame.add_suffix("#foo")
- expected = pd.Index(["{c}#foo".format(c=c) for c in float_frame.columns])
+ expected = pd.Index([f"{c}#foo" for c in float_frame.columns])
tm.assert_index_equal(with_suffix.columns, expected)
with_pct_prefix = float_frame.add_prefix("%")
- expected = pd.Index(["%{c}".format(c=c) for c in float_frame.columns])
+ expected = pd.Index([f"%{c}" for c in float_frame.columns])
tm.assert_index_equal(with_pct_prefix.columns, expected)
with_pct_suffix = float_frame.add_suffix("%")
- expected = pd.Index(["{c}%".format(c=c) for c in float_frame.columns])
+ expected = pd.Index([f"{c}%" for c in float_frame.columns])
tm.assert_index_equal(with_pct_suffix.columns, expected)
def test_get_axis(self, float_frame):
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py
index 5f4c78449f71d..8c9b7cd060059 100644
--- a/pandas/tests/frame/test_constructors.py
+++ b/pandas/tests/frame/test_constructors.py
@@ -278,7 +278,7 @@ def test_constructor_ordereddict(self):
nitems = 100
nums = list(range(nitems))
random.shuffle(nums)
- expected = ["A{i:d}".format(i=i) for i in nums]
+ expected = [f"A{i:d}" for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py
index 966f0d416676c..8b63f0614eebf 100644
--- a/pandas/tests/frame/test_dtypes.py
+++ b/pandas/tests/frame/test_dtypes.py
@@ -702,7 +702,7 @@ def test_astype_categorical(self, dtype):
@pytest.mark.parametrize("cls", [CategoricalDtype, DatetimeTZDtype, IntervalDtype])
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ["a", "a", "b", "c"]})
- xpr = "Expected an instance of {}".format(cls.__name__)
+ xpr = f"Expected an instance of {cls.__name__}"
with pytest.raises(TypeError, match=xpr):
df.astype({"A": cls})
@@ -827,7 +827,7 @@ def test_df_where_change_dtype(self):
def test_astype_from_datetimelike_to_objectt(self, dtype, unit):
# tests astype to object dtype
# gh-19223 / gh-12425
- dtype = "{}[{}]".format(dtype, unit)
+ dtype = f"{dtype}[{unit}]"
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(object)
@@ -844,7 +844,7 @@ def test_astype_from_datetimelike_to_objectt(self, dtype, unit):
def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units from numeric origination
# gh-19223 / gh-12425
- dtype = "{}[{}]".format(dtype, unit)
+ dtype = f"{dtype}[{unit}]"
arr = np.array([[1, 2, 3]], dtype=arr_dtype)
df = DataFrame(arr)
result = df.astype(dtype)
@@ -856,7 +856,7 @@ def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):
def test_astype_to_datetime_unit(self, unit):
# tests all units from datetime origination
# gh-19223
- dtype = "M8[{}]".format(unit)
+ dtype = f"M8[{unit}]"
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
@@ -868,7 +868,7 @@ def test_astype_to_datetime_unit(self, unit):
def test_astype_to_timedelta_unit_ns(self, unit):
# preserver the timedelta conversion
# gh-19223
- dtype = "m8[{}]".format(unit)
+ dtype = f"m8[{unit}]"
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
@@ -880,7 +880,7 @@ def test_astype_to_timedelta_unit_ns(self, unit):
def test_astype_to_timedelta_unit(self, unit):
# coerce to float
# gh-19223
- dtype = "m8[{}]".format(unit)
+ dtype = f"m8[{unit}]"
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
@@ -892,21 +892,21 @@ def test_astype_to_timedelta_unit(self, unit):
def test_astype_to_incorrect_datetimelike(self, unit):
# trying to astype a m to a M, or vice-versa
# gh-19224
- dtype = "M8[{}]".format(unit)
- other = "m8[{}]".format(unit)
+ dtype = f"M8[{unit}]"
+ other = f"m8[{unit}]"
df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))
msg = (
- r"cannot astype a datetimelike from \[datetime64\[ns\]\] to "
- r"\[timedelta64\[{}\]\]"
- ).format(unit)
+ fr"cannot astype a datetimelike from \[datetime64\[ns\]\] to "
+ fr"\[timedelta64\[{unit}\]\]"
+ )
with pytest.raises(TypeError, match=msg):
df.astype(other)
msg = (
- r"cannot astype a timedelta from \[timedelta64\[ns\]\] to "
- r"\[datetime64\[{}\]\]"
- ).format(unit)
+ fr"cannot astype a timedelta from \[timedelta64\[ns\]\] to "
+ fr"\[datetime64\[{unit}\]\]"
+ )
df = DataFrame(np.array([[1, 2, 3]], dtype=other))
with pytest.raises(TypeError, match=msg):
df.astype(dtype)
diff --git a/pandas/tests/frame/test_join.py b/pandas/tests/frame/test_join.py
index c6e28f3c64f12..8c388a887158f 100644
--- a/pandas/tests/frame/test_join.py
+++ b/pandas/tests/frame/test_join.py
@@ -161,7 +161,7 @@ def test_join_overlap(float_frame):
def test_join_period_index(frame_with_period_index):
- other = frame_with_period_index.rename(columns=lambda x: "{key}{key}".format(key=x))
+ other = frame_with_period_index.rename(columns=lambda key: f"{key}{key}")
joined_values = np.concatenate([frame_with_period_index.values] * 2, axis=1)
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py
index 162f3c114fa5d..df40c2e7e2a11 100644
--- a/pandas/tests/frame/test_operators.py
+++ b/pandas/tests/frame/test_operators.py
@@ -840,8 +840,8 @@ def test_inplace_ops_identity2(self, op):
df["a"] = [True, False, True]
df_copy = df.copy()
- iop = "__i{}__".format(op)
- op = "__{}__".format(op)
+ iop = f"__i{op}__"
+ op = f"__{op}__"
# no id change and value is correct
getattr(df, iop)(operand)
diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py
index 703e05998e93c..fede07e28dbef 100644
--- a/pandas/tests/frame/test_query_eval.py
+++ b/pandas/tests/frame/test_query_eval.py
@@ -101,10 +101,10 @@ def test_ops(self):
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
- expected = eval("base{op}df".format(op=op_str))
+ expected = eval(f"base{op_str}df")
# ops as strings
- result = eval("m{op}df".format(op=op_str))
+ result = eval(f"m{op_str}df")
tm.assert_frame_equal(result, expected)
# these are commutative
@@ -451,9 +451,7 @@ def test_date_query_with_non_date(self):
for op in ["<", ">", "<=", ">="]:
with pytest.raises(TypeError):
- df.query(
- "dates {op} nondate".format(op=op), parser=parser, engine=engine
- )
+ df.query(f"dates {op} nondate", parser=parser, engine=engine)
def test_query_syntax_error(self):
engine, parser = self.engine, self.parser
@@ -690,7 +688,7 @@ def test_inf(self):
ops = "==", "!="
d = dict(zip(ops, (operator.eq, operator.ne)))
for op, f in d.items():
- q = "a {op} inf".format(op=op)
+ q = f"a {op} inf"
expected = df[f(df.a, np.inf)]
result = df.query(q, engine=self.engine, parser=self.parser)
tm.assert_frame_equal(result, expected)
@@ -854,7 +852,7 @@ def test_str_query_method(self, parser, engine):
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
- ex = "{lhs} {op} {rhs}".format(lhs=lhs, op=op, rhs=rhs)
+ ex = f"{lhs} {op} {rhs}"
msg = r"'(Not)?In' nodes are not implemented"
with pytest.raises(NotImplementedError, match=msg):
df.query(
@@ -895,7 +893,7 @@ def test_str_list_query_method(self, parser, engine):
ops = 2 * ([eq] + [ne])
for lhs, op, rhs in zip(lhs, ops, rhs):
- ex = "{lhs} {op} {rhs}".format(lhs=lhs, op=op, rhs=rhs)
+ ex = f"{lhs} {op} {rhs}"
with pytest.raises(NotImplementedError):
df.query(ex, engine=engine, parser=parser)
else:
@@ -1042,7 +1040,7 @@ def test_invalid_type_for_operator_raises(self, parser, engine, op):
msg = r"unsupported operand type\(s\) for .+: '.+' and '.+'"
with pytest.raises(TypeError, match=msg):
- df.eval("a {0} b".format(op), engine=engine, parser=parser)
+ df.eval(f"a {op} b", engine=engine, parser=parser)
class TestDataFrameQueryBacktickQuoting:
diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py
index b3af5a7b7317e..68519d1dfa33c 100644
--- a/pandas/tests/frame/test_reshape.py
+++ b/pandas/tests/frame/test_reshape.py
@@ -765,7 +765,7 @@ def test_unstack_unused_level(self, cols):
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index(self): # GH7466
- cast = lambda val: "{0:1}".format("" if val != val else val)
+ cast = lambda val: f"{'' if val != val else val:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py
index e89f4ee07ea00..5e06b6402c34f 100644
--- a/pandas/tests/frame/test_timeseries.py
+++ b/pandas/tests/frame/test_timeseries.py
@@ -54,7 +54,7 @@ def test_frame_append_datetime64_col_other_units(self):
ns_dtype = np.dtype("M8[ns]")
for unit in units:
- dtype = np.dtype("M8[{unit}]".format(unit=unit))
+ dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
@@ -70,7 +70,7 @@ def test_frame_append_datetime64_col_other_units(self):
df["dates"] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
- dtype = np.dtype("M8[{unit}]".format(unit=unit))
+ dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py
index 84eee2419f0b8..21ee8649172da 100644
--- a/pandas/tests/indexes/datetimes/test_scalar_compat.py
+++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py
@@ -248,21 +248,21 @@ def test_round_int64(self, start, index_freq, periods, round_freq):
result = dt.floor(round_freq)
diff = dt.asi8 - result.asi8
mod = result.asi8 % unit
- assert (mod == 0).all(), "floor not a {} multiple".format(round_freq)
+ assert (mod == 0).all(), f"floor not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "floor error"
# test ceil
result = dt.ceil(round_freq)
diff = result.asi8 - dt.asi8
mod = result.asi8 % unit
- assert (mod == 0).all(), "ceil not a {} multiple".format(round_freq)
+ assert (mod == 0).all(), f"ceil not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "ceil error"
# test round
result = dt.round(round_freq)
diff = abs(result.asi8 - dt.asi8)
mod = result.asi8 % unit
- assert (mod == 0).all(), "round not a {} multiple".format(round_freq)
+ assert (mod == 0).all(), f"round not a {round_freq} multiple"
assert (diff <= unit // 2).all(), "round error"
if unit % 2 == 0:
assert (
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index df3a49fb7c292..13723f6455bff 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -199,7 +199,7 @@ def test_to_datetime_format_microsecond(self, cache):
# these are locale dependent
lang, _ = locale.getlocale()
month_abbr = calendar.month_abbr[4]
- val = "01-{}-2011 00:00:01.978".format(month_abbr)
+ val = f"01-{month_abbr}-2011 00:00:01.978"
format = "%d-%b-%Y %H:%M:%S.%f"
result = to_datetime(val, format=format, cache=cache)
@@ -551,7 +551,7 @@ def test_to_datetime_dt64s(self, cache):
)
@pytest.mark.parametrize("cache", [True, False])
def test_to_datetime_dt64s_out_of_bounds(self, cache, dt):
- msg = "Out of bounds nanosecond timestamp: {}".format(dt)
+ msg = f"Out of bounds nanosecond timestamp: {dt}"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pd.to_datetime(dt, errors="raise")
with pytest.raises(OutOfBoundsDatetime, match=msg):
diff --git a/pandas/tests/indexes/interval/test_indexing.py b/pandas/tests/indexes/interval/test_indexing.py
index 87b72f702e2aa..0e5721bfd83fd 100644
--- a/pandas/tests/indexes/interval/test_indexing.py
+++ b/pandas/tests/indexes/interval/test_indexing.py
@@ -24,11 +24,7 @@ def test_get_loc_interval(self, closed, side):
for bound in [[0, 1], [1, 2], [2, 3], [3, 4], [0, 2], [2.5, 3], [-1, 4]]:
# if get_loc is supplied an interval, it should only search
# for exact matches, not overlaps or covers, else KeyError.
- msg = re.escape(
- "Interval({bound[0]}, {bound[1]}, closed='{side}')".format(
- bound=bound, side=side
- )
- )
+ msg = re.escape(f"Interval({bound[0]}, {bound[1]}, closed='{side}')")
if closed == side:
if bound == [0, 1]:
assert idx.get_loc(Interval(0, 1, closed=side)) == 0
@@ -86,11 +82,7 @@ def test_get_loc_length_one_interval(self, left, right, closed, other_closed):
else:
with pytest.raises(
KeyError,
- match=re.escape(
- "Interval({left}, {right}, closed='{other_closed}')".format(
- left=left, right=right, other_closed=other_closed
- )
- ),
+ match=re.escape(f"Interval({left}, {right}, closed='{other_closed}')"),
):
index.get_loc(interval)
diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py
index d010060880703..c2b209c810af9 100644
--- a/pandas/tests/indexes/interval/test_interval.py
+++ b/pandas/tests/indexes/interval/test_interval.py
@@ -845,7 +845,7 @@ def test_set_closed(self, name, closed, new_closed):
def test_set_closed_errors(self, bad_closed):
# GH 21670
index = interval_range(0, 5)
- msg = "invalid option for 'closed': {closed}".format(closed=bad_closed)
+ msg = f"invalid option for 'closed': {bad_closed}"
with pytest.raises(ValueError, match=msg):
index.set_closed(bad_closed)
diff --git a/pandas/tests/indexes/interval/test_setops.py b/pandas/tests/indexes/interval/test_setops.py
index 3246ac6bafde9..b9eb8b7c41018 100644
--- a/pandas/tests/indexes/interval/test_setops.py
+++ b/pandas/tests/indexes/interval/test_setops.py
@@ -180,8 +180,8 @@ def test_set_incompatible_types(self, closed, op_name, sort):
# GH 19016: incompatible dtypes
other = interval_range(Timestamp("20180101"), periods=9, closed=closed)
msg = (
- "can only do {op} between two IntervalIndex objects that have "
- "compatible dtypes"
- ).format(op=op_name)
+ f"can only do {op_name} between two IntervalIndex objects that have "
+ f"compatible dtypes"
+ )
with pytest.raises(TypeError, match=msg):
set_op(other, sort=sort)
diff --git a/pandas/tests/indexes/multi/test_compat.py b/pandas/tests/indexes/multi/test_compat.py
index 9a76f0623eb31..ef549beccda5d 100644
--- a/pandas/tests/indexes/multi/test_compat.py
+++ b/pandas/tests/indexes/multi/test_compat.py
@@ -29,7 +29,7 @@ def test_numeric_compat(idx):
@pytest.mark.parametrize("method", ["all", "any"])
def test_logical_compat(idx, method):
- msg = "cannot perform {method}".format(method=method)
+ msg = f"cannot perform {method}"
with pytest.raises(TypeError, match=msg):
getattr(idx, method)()
diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py
index fcbadce3d63b1..418f53591b913 100644
--- a/pandas/tests/indexes/period/test_constructors.py
+++ b/pandas/tests/indexes/period/test_constructors.py
@@ -364,7 +364,7 @@ def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
- strs = ["{t[0]:d}Q{t[1]:d}".format(t=t) for t in zip(quarter, year)]
+ strs = [f"{t[0]:d}Q{t[1]:d}" for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
diff --git a/pandas/tests/indexes/timedeltas/test_constructors.py b/pandas/tests/indexes/timedeltas/test_constructors.py
index 0de10b5d82171..8e54561df1624 100644
--- a/pandas/tests/indexes/timedeltas/test_constructors.py
+++ b/pandas/tests/indexes/timedeltas/test_constructors.py
@@ -155,7 +155,7 @@ def test_constructor(self):
def test_constructor_iso(self):
# GH #21877
expected = timedelta_range("1s", periods=9, freq="s")
- durations = ["P0DT0H0M{}S".format(i) for i in range(1, 10)]
+ durations = [f"P0DT0H0M{i}S" for i in range(1, 10)]
result = to_timedelta(durations)
tm.assert_index_equal(result, expected)
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py
index 6cc18a3989266..e85561fce0668 100644
--- a/pandas/tests/indexing/test_floats.py
+++ b/pandas/tests/indexing/test_floats.py
@@ -53,8 +53,8 @@ def test_scalar_error(self, index_func):
s.iloc[3.0]
msg = (
- "cannot do positional indexing on {klass} with these "
- r"indexers \[3\.0\] of type float".format(klass=type(i).__name__)
+ fr"cannot do positional indexing on {type(i).__name__} with these "
+ fr"indexers \[3\.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
@@ -94,11 +94,11 @@ def test_scalar_non_numeric(self, index_func):
else:
error = TypeError
msg = (
- r"cannot do (label|positional) indexing "
- r"on {klass} with these indexers \[3\.0\] of "
- r"type float|"
- "Cannot index by location index with a "
- "non-integer key".format(klass=type(i).__name__)
+ fr"cannot do (label|positional) indexing "
+ fr"on {type(i).__name__} with these indexers \[3\.0\] of "
+ fr"type float|"
+ fr"Cannot index by location index with a "
+ fr"non-integer key"
)
with pytest.raises(error, match=msg):
idxr(s)[3.0]
@@ -115,9 +115,9 @@ def test_scalar_non_numeric(self, index_func):
else:
error = TypeError
msg = (
- r"cannot do (label|positional) indexing "
- r"on {klass} with these indexers \[3\.0\] of "
- r"type float".format(klass=type(i).__name__)
+ fr"cannot do (label|positional) indexing "
+ fr"on {type(i).__name__} with these indexers \[3\.0\] of "
+ fr"type float"
)
with pytest.raises(error, match=msg):
s.loc[3.0]
@@ -127,9 +127,9 @@ def test_scalar_non_numeric(self, index_func):
# setting with a float fails with iloc
msg = (
- r"cannot do (label|positional) indexing "
- r"on {klass} with these indexers \[3\.0\] of "
- r"type float".format(klass=type(i).__name__)
+ fr"cannot do (label|positional) indexing "
+ fr"on {type(i).__name__} with these indexers \[3\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s.iloc[3.0] = 0
@@ -164,9 +164,9 @@ def test_scalar_non_numeric(self, index_func):
s = Series(np.arange(len(i)), index=i)
s[3]
msg = (
- r"cannot do (label|positional) indexing "
- r"on {klass} with these indexers \[3\.0\] of "
- r"type float".format(klass=type(i).__name__)
+ fr"cannot do (label|positional) indexing "
+ fr"on {type(i).__name__} with these indexers \[3\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s[3.0]
@@ -181,12 +181,10 @@ def test_scalar_with_mixed(self):
for idxr in [lambda x: x, lambda x: x.iloc]:
msg = (
- r"cannot do label indexing "
- r"on {klass} with these indexers \[1\.0\] of "
- r"type float|"
- "Cannot index by location index with a non-integer key".format(
- klass=Index.__name__
- )
+ fr"cannot do label indexing "
+ fr"on {Index.__name__} with these indexers \[1\.0\] of "
+ fr"type float|"
+ fr"Cannot index by location index with a non-integer key"
)
with pytest.raises(TypeError, match=msg):
idxr(s2)[1.0]
@@ -203,9 +201,9 @@ def test_scalar_with_mixed(self):
for idxr in [lambda x: x]:
msg = (
- r"cannot do label indexing "
- r"on {klass} with these indexers \[1\.0\] of "
- r"type float".format(klass=Index.__name__)
+ fr"cannot do label indexing "
+ fr"on {Index.__name__} with these indexers \[1\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
idxr(s3)[1.0]
@@ -321,9 +319,9 @@ def test_scalar_float(self):
s.iloc[3.0]
msg = (
- r"cannot do positional indexing "
- r"on {klass} with these indexers \[3\.0\] of "
- r"type float".format(klass=Float64Index.__name__)
+ fr"cannot do positional indexing "
+ fr"on {Float64Index.__name__} with these indexers \[3\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s2.iloc[3.0] = 0
@@ -354,9 +352,9 @@ def test_slice_non_numeric(self, index_func):
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
msg = (
- "cannot do positional indexing "
- r"on {klass} with these indexers \[(3|4)\.0\] of "
- "type float".format(klass=type(index).__name__)
+ fr"cannot do positional indexing "
+ fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s.iloc[l]
@@ -364,10 +362,10 @@ def test_slice_non_numeric(self, index_func):
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
- "cannot do (slice|positional) indexing "
- r"on {klass} with these indexers "
- r"\[(3|4)(\.0)?\] "
- r"of type (float|int)".format(klass=type(index).__name__)
+ fr"cannot do (slice|positional) indexing "
+ fr"on {type(index).__name__} with these indexers "
+ fr"\[(3|4)(\.0)?\] "
+ fr"of type (float|int)"
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
@@ -376,19 +374,19 @@ def test_slice_non_numeric(self, index_func):
for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]:
msg = (
- "cannot do positional indexing "
- r"on {klass} with these indexers \[(3|4)\.0\] of "
- "type float".format(klass=type(index).__name__)
+ fr"cannot do positional indexing "
+ fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s.iloc[l] = 0
for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]:
msg = (
- "cannot do (slice|positional) indexing "
- r"on {klass} with these indexers "
- r"\[(3|4)(\.0)?\] "
- r"of type (float|int)".format(klass=type(index).__name__)
+ fr"cannot do (slice|positional) indexing "
+ fr"on {type(index).__name__} with these indexers "
+ fr"\[(3|4)(\.0)?\] "
+ fr"of type (float|int)"
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l] = 0
@@ -426,9 +424,9 @@ def test_slice_integer(self):
# positional indexing
msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[(3|4)\.0\] of "
- "type float".format(klass=type(index).__name__)
+ fr"cannot do slice indexing "
+ fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -450,9 +448,9 @@ def test_slice_integer(self):
# positional indexing
msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[-6\.0\] of "
- "type float".format(klass=type(index).__name__)
+ fr"cannot do slice indexing "
+ fr"on {type(index).__name__} with these indexers \[-6\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s[slice(-6.0, 6.0)]
@@ -476,9 +474,9 @@ def test_slice_integer(self):
# positional indexing
msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[(2|3)\.5\] of "
- "type float".format(klass=type(index).__name__)
+ fr"cannot do slice indexing "
+ fr"on {type(index).__name__} with these indexers \[(2|3)\.5\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -494,9 +492,9 @@ def test_slice_integer(self):
# positional indexing
msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[(3|4)\.0\] of "
- "type float".format(klass=type(index).__name__)
+ fr"cannot do slice indexing "
+ fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s[l] = 0
@@ -517,9 +515,9 @@ def test_integer_positional_indexing(self):
klass = RangeIndex
msg = (
- "cannot do (slice|positional) indexing "
- r"on {klass} with these indexers \[(2|4)\.0\] of "
- "type float".format(klass=klass.__name__)
+ fr"cannot do (slice|positional) indexing "
+ fr"on {klass.__name__} with these indexers \[(2|4)\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
idxr(s)[l]
@@ -545,9 +543,9 @@ def f(idxr):
# positional indexing
msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[(0|1)\.0\] of "
- "type float".format(klass=type(index).__name__)
+ fr"cannot do slice indexing "
+ fr"on {type(index).__name__} with these indexers \[(0|1)\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -560,9 +558,9 @@ def f(idxr):
# positional indexing
msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[-10\.0\] of "
- "type float".format(klass=type(index).__name__)
+ fr"cannot do slice indexing "
+ fr"on {type(index).__name__} with these indexers \[-10\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s[slice(-10.0, 10.0)]
@@ -579,9 +577,9 @@ def f(idxr):
# positional indexing
msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[0\.5\] of "
- "type float".format(klass=type(index).__name__)
+ fr"cannot do slice indexing "
+ fr"on {type(index).__name__} with these indexers \[0\.5\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s[l]
@@ -596,9 +594,9 @@ def f(idxr):
# positional indexing
msg = (
- "cannot do slice indexing "
- r"on {klass} with these indexers \[(3|4)\.0\] of "
- "type float".format(klass=type(index).__name__)
+ fr"cannot do slice indexing "
+ fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of "
+ fr"type float"
)
with pytest.raises(TypeError, match=msg):
s[l] = 0
diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py
index aa966caa63238..e12167c980dec 100644
--- a/pandas/tests/internals/test_internals.py
+++ b/pandas/tests/internals/test_internals.py
@@ -91,9 +91,7 @@ def create_block(typestr, placement, item_shape=None, num_offset=0):
elif typestr in ("complex", "c16", "c8"):
values = 1.0j * (mat.astype(typestr) + num_offset)
elif typestr in ("object", "string", "O"):
- values = np.reshape(
- ["A{i:d}".format(i=i) for i in mat.ravel() + num_offset], shape
- )
+ values = np.reshape([f"A{i:d}" for i in mat.ravel() + num_offset], shape)
elif typestr in ("b", "bool"):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ("datetime", "dt", "M8[ns]"):
@@ -101,7 +99,7 @@ def create_block(typestr, placement, item_shape=None, num_offset=0):
elif typestr.startswith("M8[ns"):
# datetime with tz
m = re.search(r"M8\[ns,\s*(\w+\/?\w*)\]", typestr)
- assert m is not None, "incompatible typestr -> {0}".format(typestr)
+ assert m is not None, f"incompatible typestr -> {typestr}"
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
@@ -610,9 +608,9 @@ def test_interleave(self):
# self
for dtype in ["f8", "i8", "object", "bool", "complex", "M8[ns]", "m8[ns]"]:
- mgr = create_mgr("a: {0}".format(dtype))
+ mgr = create_mgr(f"a: {dtype}")
assert mgr.as_array().dtype == dtype
- mgr = create_mgr("a: {0}; b: {0}".format(dtype))
+ mgr = create_mgr(f"a: {dtype}; b: {dtype}")
assert mgr.as_array().dtype == dtype
# will be converted according the actual dtype of the underlying
@@ -1164,7 +1162,7 @@ def __array__(self):
return np.array(self.value, dtype=self.dtype)
def __str__(self) -> str:
- return "DummyElement({}, {})".format(self.value, self.dtype)
+ return f"DummyElement({self.value}, {self.dtype})"
def __repr__(self) -> str:
return str(self)
diff --git a/pandas/tests/io/excel/test_readers.py b/pandas/tests/io/excel/test_readers.py
index 8d00ef1b7fe3e..d18f83982ce25 100644
--- a/pandas/tests/io/excel/test_readers.py
+++ b/pandas/tests/io/excel/test_readers.py
@@ -596,7 +596,7 @@ def test_read_from_file_url(self, read_ext, datapath):
# fails on some systems
import platform
- pytest.skip("failing on {}".format(" ".join(platform.uname()).strip()))
+ pytest.skip(f"failing on {' '.join(platform.uname()).strip()}")
tm.assert_frame_equal(url_table, local_table)
@@ -957,7 +957,7 @@ def test_excel_passes_na_filter(self, read_ext, na_filter):
def test_unexpected_kwargs_raises(self, read_ext, arg):
# gh-17964
kwarg = {arg: "Sheet1"}
- msg = r"unexpected keyword argument `{}`".format(arg)
+ msg = fr"unexpected keyword argument `{arg}`"
with pd.ExcelFile("test1" + read_ext) as excel:
with pytest.raises(TypeError, match=msg):
diff --git a/pandas/tests/io/excel/test_style.py b/pandas/tests/io/excel/test_style.py
index 88f4c3736bc0d..31b033f381f0c 100644
--- a/pandas/tests/io/excel/test_style.py
+++ b/pandas/tests/io/excel/test_style.py
@@ -45,10 +45,7 @@ def style(df):
def assert_equal_style(cell1, cell2, engine):
if engine in ["xlsxwriter", "openpyxl"]:
pytest.xfail(
- reason=(
- "GH25351: failing on some attribute "
- "comparisons in {}".format(engine)
- )
+ reason=(f"GH25351: failing on some attribute comparisons in {engine}")
)
# XXX: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
@@ -108,7 +105,7 @@ def custom_converter(css):
for col1, col2 in zip(wb["frame"].columns, wb["styled"].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
- ref = "{cell2.column}{cell2.row:d}".format(cell2=cell2)
+ ref = f"{cell2.column}{cell2.row:d}"
# XXX: this isn't as strong a test as ideal; we should
# confirm that differences are exclusive
if ref == "B2":
@@ -156,7 +153,7 @@ def custom_converter(css):
for col1, col2 in zip(wb["frame"].columns, wb["custom"].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
- ref = "{cell2.column}{cell2.row:d}".format(cell2=cell2)
+ ref = f"{cell2.column}{cell2.row:d}"
if ref in ("B2", "C3", "D4", "B5", "C6", "D7", "B8", "B9"):
assert not cell1.font.bold
assert cell2.font.bold
diff --git a/pandas/tests/io/excel/test_writers.py b/pandas/tests/io/excel/test_writers.py
index 91665a24fc4c5..506d223dbedb4 100644
--- a/pandas/tests/io/excel/test_writers.py
+++ b/pandas/tests/io/excel/test_writers.py
@@ -41,7 +41,7 @@ def set_engine(engine, ext):
which engine should be used to write Excel files. After executing
the test it rolls back said change to the global option.
"""
- option_name = "io.excel.{ext}.writer".format(ext=ext.strip("."))
+ option_name = f"io.excel.{ext.strip('.')}.writer"
prev_engine = get_option(option_name)
set_option(option_name, engine)
yield
@@ -1206,7 +1206,7 @@ def test_path_path_lib(self, engine, ext):
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
- result = tm.round_trip_pathlib(writer, reader, path="foo.{ext}".format(ext=ext))
+ result = tm.round_trip_pathlib(writer, reader, path=f"foo.{ext}")
tm.assert_frame_equal(result, df)
def test_path_local_path(self, engine, ext):
@@ -1214,7 +1214,7 @@ def test_path_local_path(self, engine, ext):
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel, index_col=0)
- result = tm.round_trip_pathlib(writer, reader, path="foo.{ext}".format(ext=ext))
+ result = tm.round_trip_pathlib(writer, reader, path=f"foo.{ext}")
tm.assert_frame_equal(result, df)
def test_merged_cell_custom_objects(self, merge_cells, path):
diff --git a/pandas/tests/io/excel/test_xlrd.py b/pandas/tests/io/excel/test_xlrd.py
index cc7e2311f362a..d456afe4ed351 100644
--- a/pandas/tests/io/excel/test_xlrd.py
+++ b/pandas/tests/io/excel/test_xlrd.py
@@ -37,7 +37,7 @@ def test_read_xlrd_book(read_ext, frame):
# TODO: test for openpyxl as well
def test_excel_table_sheet_by_index(datapath, read_ext):
- path = datapath("io", "data", "excel", "test1{}".format(read_ext))
+ path = datapath("io", "data", "excel", f"test1{read_ext}")
with pd.ExcelFile(path) as excel:
with pytest.raises(xlrd.XLRDError):
pd.read_excel(excel, "asdf")
diff --git a/pandas/tests/io/formats/test_console.py b/pandas/tests/io/formats/test_console.py
index e56d14885f11e..b57a2393461a2 100644
--- a/pandas/tests/io/formats/test_console.py
+++ b/pandas/tests/io/formats/test_console.py
@@ -34,8 +34,8 @@ def test_detect_console_encoding_from_stdout_stdin(monkeypatch, empty, filled):
# they have values filled.
# GH 21552
with monkeypatch.context() as context:
- context.setattr("sys.{}".format(empty), MockEncoding(""))
- context.setattr("sys.{}".format(filled), MockEncoding(filled))
+ context.setattr(f"sys.{empty}", MockEncoding(""))
+ context.setattr(f"sys.{filled}", MockEncoding(filled))
assert detect_console_encoding() == filled
diff --git a/pandas/tests/io/formats/test_to_html.py b/pandas/tests/io/formats/test_to_html.py
index d3f044a42eb28..9a14022d6f776 100644
--- a/pandas/tests/io/formats/test_to_html.py
+++ b/pandas/tests/io/formats/test_to_html.py
@@ -300,7 +300,7 @@ def test_to_html_border(option, result, expected):
else:
with option_context("display.html.border", option):
result = result(df)
- expected = 'border="{}"'.format(expected)
+ expected = f'border="{expected}"'
assert expected in result
@@ -318,7 +318,7 @@ def test_to_html(biggie_df_fixture):
assert isinstance(s, str)
df.to_html(columns=["B", "A"], col_space=17)
- df.to_html(columns=["B", "A"], formatters={"A": lambda x: "{x:.1f}".format(x=x)})
+ df.to_html(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
df.to_html(columns=["B", "A"], float_format=str)
df.to_html(columns=["B", "A"], col_space=12, float_format=str)
@@ -745,7 +745,7 @@ def test_to_html_with_col_space_units(unit):
if isinstance(unit, int):
unit = str(unit) + "px"
for h in hdrs:
- expected = ''.format(unit=unit)
+ expected = f' | '
assert expected in h
diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py
index bd681032f155d..c2fbc59b8f482 100644
--- a/pandas/tests/io/formats/test_to_latex.py
+++ b/pandas/tests/io/formats/test_to_latex.py
@@ -117,10 +117,10 @@ def test_to_latex_with_formatters(self):
formatters = {
"datetime64": lambda x: x.strftime("%Y-%m"),
- "float": lambda x: "[{x: 4.1f}]".format(x=x),
- "int": lambda x: "0x{x:x}".format(x=x),
- "object": lambda x: "-{x!s}-".format(x=x),
- "__index__": lambda x: "index: {x}".format(x=x),
+ "float": lambda x: f"[{x: 4.1f}]",
+ "int": lambda x: f"0x{x:x}",
+ "object": lambda x: f"-{x!s}-",
+ "__index__": lambda x: f"index: {x}",
}
result = df.to_latex(formatters=dict(formatters))
@@ -744,9 +744,7 @@ def test_to_latex_multiindex_names(self, name0, name1, axes):
idx_names = tuple(n or "{}" for n in names)
idx_names_row = (
- "{idx_names[0]} & {idx_names[1]} & & & & \\\\\n".format(
- idx_names=idx_names
- )
+ f"{idx_names[0]} & {idx_names[1]} & & & & \\\\\n"
if (0 in axes and any(names))
else ""
)
diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py
index 67b767a337a89..62cbcacc7e1f9 100755
--- a/pandas/tests/io/generate_legacy_storage_files.py
+++ b/pandas/tests/io/generate_legacy_storage_files.py
@@ -324,17 +324,17 @@ def write_legacy_pickles(output_dir):
"This script generates a storage file for the current arch, system, "
"and python version"
)
- print(" pandas version: {0}".format(version))
- print(" output dir : {0}".format(output_dir))
+ print(f" pandas version: {version}")
+ print(f" output dir : {output_dir}")
print(" storage format: pickle")
- pth = "{0}.pickle".format(platform_name())
+ pth = f"{platform_name()}.pickle"
fh = open(os.path.join(output_dir, pth), "wb")
pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)
fh.close()
- print("created pickle file: {pth}".format(pth=pth))
+ print(f"created pickle file: {pth}")
def write_legacy_file():
diff --git a/pandas/tests/io/parser/test_c_parser_only.py b/pandas/tests/io/parser/test_c_parser_only.py
index 1737f14e7adf9..5bbabc8e18c47 100644
--- a/pandas/tests/io/parser/test_c_parser_only.py
+++ b/pandas/tests/io/parser/test_c_parser_only.py
@@ -158,7 +158,7 @@ def test_precise_conversion(c_parser_only):
# test numbers between 1 and 2
for num in np.linspace(1.0, 2.0, num=500):
# 25 decimal digits of precision
- text = "a\n{0:.25}".format(num)
+ text = f"a\n{num:.25}"
normal_val = float(parser.read_csv(StringIO(text))["a"][0])
precise_val = float(
@@ -170,7 +170,7 @@ def test_precise_conversion(c_parser_only):
actual_val = Decimal(text[2:])
def error(val):
- return abs(Decimal("{0:.100}".format(val)) - actual_val)
+ return abs(Decimal(f"{val:.100}") - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
@@ -299,9 +299,7 @@ def test_grow_boundary_at_cap(c_parser_only):
def test_empty_header_read(count):
s = StringIO("," * count)
- expected = DataFrame(
- columns=["Unnamed: {i}".format(i=i) for i in range(count + 1)]
- )
+ expected = DataFrame(columns=[f"Unnamed: {i}" for i in range(count + 1)])
df = parser.read_csv(s)
tm.assert_frame_equal(df, expected)
@@ -489,7 +487,7 @@ def test_comment_whitespace_delimited(c_parser_only, capsys):
captured = capsys.readouterr()
# skipped lines 2, 3, 4, 9
for line_num in (2, 3, 4, 9):
- assert "Skipping line {}".format(line_num) in captured.err
+ assert f"Skipping line {line_num}" in captured.err
expected = DataFrame([[1, 2], [5, 2], [6, 2], [7, np.nan], [8, np.nan]])
tm.assert_frame_equal(df, expected)
diff --git a/pandas/tests/io/parser/test_common.py b/pandas/tests/io/parser/test_common.py
index c19056d434ec3..b3aa1aa14a509 100644
--- a/pandas/tests/io/parser/test_common.py
+++ b/pandas/tests/io/parser/test_common.py
@@ -957,7 +957,7 @@ def test_nonexistent_path(all_parsers):
# gh-14086: raise more helpful FileNotFoundError
# GH#29233 "File foo" instead of "File b'foo'"
parser = all_parsers
- path = "{}.csv".format(tm.rands(10))
+ path = f"{tm.rands(10)}.csv"
msg = f"File {path} does not exist" if parser.engine == "c" else r"\[Errno 2\]"
with pytest.raises(FileNotFoundError, match=msg) as e:
@@ -1872,7 +1872,7 @@ def test_internal_eof_byte_to_file(all_parsers):
parser = all_parsers
data = b'c1,c2\r\n"test \x1a test", test\r\n'
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
- path = "__{}__.csv".format(tm.rands(10))
+ path = f"__{tm.rands(10)}__.csv"
with tm.ensure_clean(path) as path:
with open(path, "wb") as f:
diff --git a/pandas/tests/io/parser/test_compression.py b/pandas/tests/io/parser/test_compression.py
index dc03370daa1e2..41bf022b7458c 100644
--- a/pandas/tests/io/parser/test_compression.py
+++ b/pandas/tests/io/parser/test_compression.py
@@ -145,7 +145,7 @@ def test_invalid_compression(all_parsers, invalid_compression):
parser = all_parsers
compress_kwargs = dict(compression=invalid_compression)
- msg = "Unrecognized compression type: {compression}".format(**compress_kwargs)
+ msg = f"Unrecognized compression type: {compress_kwargs['compression']}"
with pytest.raises(ValueError, match=msg):
parser.read_csv("test_file.zip", **compress_kwargs)
diff --git a/pandas/tests/io/parser/test_encoding.py b/pandas/tests/io/parser/test_encoding.py
index 13f72a0414bac..3661e4e056db2 100644
--- a/pandas/tests/io/parser/test_encoding.py
+++ b/pandas/tests/io/parser/test_encoding.py
@@ -45,7 +45,7 @@ def test_utf16_bom_skiprows(all_parsers, sep, encoding):
4,5,6""".replace(
",", sep
)
- path = "__{}__.csv".format(tm.rands(10))
+ path = f"__{tm.rands(10)}__.csv"
kwargs = dict(sep=sep, skiprows=2)
utf8 = "utf-8"
diff --git a/pandas/tests/io/parser/test_multi_thread.py b/pandas/tests/io/parser/test_multi_thread.py
index 64ccaf60ec230..458ff4da55ed3 100644
--- a/pandas/tests/io/parser/test_multi_thread.py
+++ b/pandas/tests/io/parser/test_multi_thread.py
@@ -41,9 +41,7 @@ def test_multi_thread_string_io_read_csv(all_parsers):
num_files = 100
bytes_to_df = [
- "\n".join(
- ["{i:d},{i:d},{i:d}".format(i=i) for i in range(max_row_range)]
- ).encode()
+ "\n".join([f"{i:d},{i:d},{i:d}" for i in range(max_row_range)]).encode()
for _ in range(num_files)
]
files = [BytesIO(b) for b in bytes_to_df]
diff --git a/pandas/tests/io/parser/test_na_values.py b/pandas/tests/io/parser/test_na_values.py
index f9a083d7f5d22..da9930d910043 100644
--- a/pandas/tests/io/parser/test_na_values.py
+++ b/pandas/tests/io/parser/test_na_values.py
@@ -111,10 +111,10 @@ def f(i, v):
elif i > 0:
buf = "".join([","] * i)
- buf = "{0}{1}".format(buf, v)
+ buf = f"{buf}{v}"
if i < nv - 1:
- buf = "{0}{1}".format(buf, "".join([","] * (nv - i - 1)))
+ buf = f"{buf}{''.join([','] * (nv - i - 1))}"
return buf
diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py
index b01b22e811ee3..31573e4e6ecce 100644
--- a/pandas/tests/io/parser/test_parse_dates.py
+++ b/pandas/tests/io/parser/test_parse_dates.py
@@ -1101,7 +1101,7 @@ def test_bad_date_parse(all_parsers, cache_dates, value):
# if we have an invalid date make sure that we handle this with
# and w/o the cache properly
parser = all_parsers
- s = StringIO(("{value},\n".format(value=value)) * 50000)
+ s = StringIO((f"{value},\n") * 50000)
parser.read_csv(
s,
diff --git a/pandas/tests/io/parser/test_read_fwf.py b/pandas/tests/io/parser/test_read_fwf.py
index 27aef2376e87d..e982667f06f31 100644
--- a/pandas/tests/io/parser/test_read_fwf.py
+++ b/pandas/tests/io/parser/test_read_fwf.py
@@ -260,7 +260,7 @@ def test_fwf_regression():
# Turns out "T060" is parsable as a datetime slice!
tz_list = [1, 10, 20, 30, 60, 80, 100]
widths = [16] + [8] * len(tz_list)
- names = ["SST"] + ["T{z:03d}".format(z=z) for z in tz_list[1:]]
+ names = ["SST"] + [f"T{z:03d}" for z in tz_list[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
diff --git a/pandas/tests/io/pytables/conftest.py b/pandas/tests/io/pytables/conftest.py
index 214f95c6fb441..38ffcb3b0e8ec 100644
--- a/pandas/tests/io/pytables/conftest.py
+++ b/pandas/tests/io/pytables/conftest.py
@@ -6,7 +6,7 @@
@pytest.fixture
def setup_path():
"""Fixture for setup path"""
- return "tmp.__{}__.h5".format(tm.rands(10))
+ return f"tmp.__{tm.rands(10)}__.h5"
@pytest.fixture(scope="module", autouse=True)
diff --git a/pandas/tests/io/pytables/test_store.py b/pandas/tests/io/pytables/test_store.py
index 547de39eec5e0..7f5217cc0d566 100644
--- a/pandas/tests/io/pytables/test_store.py
+++ b/pandas/tests/io/pytables/test_store.py
@@ -653,7 +653,7 @@ def test_getattr(self, setup_path):
# not stores
for x in ["mode", "path", "handle", "complib"]:
- getattr(store, "_{x}".format(x=x))
+ getattr(store, f"_{x}")
def test_put(self, setup_path):
@@ -690,9 +690,7 @@ def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
- index = Index(
- ["I am a very long string index: {i}".format(i=i) for i in range(20)]
- )
+ index = Index([f"I am a very long string index: {i}" for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
@@ -705,7 +703,7 @@ def test_put_string_index(self, setup_path):
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
- + ["I am a very long string index: {i}".format(i=i) for i in range(20)]
+ + [f"I am a very long string index: {i}" for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
@@ -2044,7 +2042,7 @@ def test_unimplemented_dtypes_table_columns(self, setup_path):
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
- store.append("df1_{n}".format(n=n), df)
+ store.append(f"df1_{n}", df)
# frame
df = tm.makeDataFrame()
@@ -2689,16 +2687,12 @@ def test_select_dtypes(self, setup_path):
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
- result = store.select(
- "df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
- )
+ result = store.select("df", f"boolv == {v!s}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
- result = store.select(
- "df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
- )
+ result = store.select("df", f"boolv == {v!s}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
@@ -2784,7 +2778,7 @@ def test_select_with_many_inputs(self, setup_path):
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
- + ["a{i:03d}".format(i=i) for i in range(100)],
+ + [f"a{i:03d}" for i in range(100)],
)
)
_maybe_remove(store, "df")
@@ -2805,7 +2799,7 @@ def test_select_with_many_inputs(self, setup_path):
tm.assert_frame_equal(expected, result)
# big selector along the columns
- selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
+ selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
@@ -2914,21 +2908,19 @@ def test_select_iterator_complete_8014(self, setup_path):
# select w/o iterator and where clause, single term, begin
# of range, works
- where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
+ where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
- where = "index <= '{end_dt}'".format(end_dt=end_dt)
+ where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
- where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
- beg_dt=beg_dt, end_dt=end_dt
- )
+ where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
@@ -2948,21 +2940,19 @@ def test_select_iterator_complete_8014(self, setup_path):
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
- where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
+ where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
- where = "index <= '{end_dt}'".format(end_dt=end_dt)
+ where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
- where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
- beg_dt=beg_dt, end_dt=end_dt
- )
+ where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
@@ -2984,23 +2974,21 @@ def test_select_iterator_non_complete_8014(self, setup_path):
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
- where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
+ where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
- where = "index <= '{end_dt}'".format(end_dt=end_dt)
+ where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
- where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
- beg_dt=beg_dt, end_dt=end_dt
- )
+ where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
@@ -3018,7 +3006,7 @@ def test_select_iterator_non_complete_8014(self, setup_path):
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
- where = "index > '{end_dt}'".format(end_dt=end_dt)
+ where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
@@ -3040,14 +3028,14 @@ def test_select_iterator_many_empty_frames(self, setup_path):
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
- where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
+ where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
- where = "index <= '{end_dt}'".format(end_dt=end_dt)
+ where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
@@ -3056,9 +3044,7 @@ def test_select_iterator_many_empty_frames(self, setup_path):
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
- where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
- beg_dt=beg_dt, end_dt=end_dt
- )
+ where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
@@ -3076,9 +3062,7 @@ def test_select_iterator_many_empty_frames(self, setup_path):
# return [] e.g. `for e in []: print True` never prints
# True.
- where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
- beg_dt=beg_dt, end_dt=end_dt
- )
+ where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
@@ -3807,8 +3791,8 @@ def test_start_stop_fixed(self, setup_path):
def test_select_filter_corner(self, setup_path):
df = DataFrame(np.random.randn(50, 100))
- df.index = ["{c:3d}".format(c=c) for c in df.index]
- df.columns = ["{c:3d}".format(c=c) for c in df.columns]
+ df.index = [f"{c:3d}" for c in df.index]
+ df.columns = [f"{c:3d}" for c in df.columns]
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
@@ -4259,7 +4243,7 @@ def test_append_with_diff_col_name_types_raises_value_error(self, setup_path):
df5 = DataFrame({("1", 2, object): np.random.randn(10)})
with ensure_clean_store(setup_path) as store:
- name = "df_{}".format(tm.rands(10))
+ name = f"df_{tm.rands(10)}"
store.append(name, df)
for d in (df2, df3, df4, df5):
@@ -4543,9 +4527,7 @@ def test_to_hdf_with_object_column_names(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df.to_hdf(path, "df", format="table", data_columns=True)
- result = pd.read_hdf(
- path, "df", where="index = [{0}]".format(df.index[0])
- )
+ result = pd.read_hdf(path, "df", where=f"index = [{df.index[0]}]")
assert len(result)
def test_read_hdf_open_store(self, setup_path):
@@ -4678,16 +4660,16 @@ def test_query_long_float_literal(self, setup_path):
store.append("test", df, format="table", data_columns=True)
cutoff = 1000000000.0006
- result = store.select("test", "A < {cutoff:.4f}".format(cutoff=cutoff))
+ result = store.select("test", f"A < {cutoff:.4f}")
assert result.empty
cutoff = 1000000000.0010
- result = store.select("test", "A > {cutoff:.4f}".format(cutoff=cutoff))
+ result = store.select("test", f"A > {cutoff:.4f}")
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
- result = store.select("test", "A == {exact:.4f}".format(exact=exact))
+ result = store.select("test", f"A == {exact:.4f}")
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
@@ -4714,21 +4696,21 @@ def test_query_compare_column_type(self, setup_path):
for op in ["<", ">", "=="]:
# non strings to string column always fail
for v in [2.1, True, pd.Timestamp("2014-01-01"), pd.Timedelta(1, "s")]:
- query = "date {op} v".format(op=op)
+ query = f"date {op} v"
with pytest.raises(TypeError):
store.select("test", where=query)
# strings to other columns must be convertible to type
v = "a"
for col in ["int", "float", "real_date"]:
- query = "{col} {op} v".format(op=op, col=col)
+ query = f"{col} {op} v"
with pytest.raises(ValueError):
store.select("test", where=query)
for v, col in zip(
["1", "1.1", "2014-01-01"], ["int", "float", "real_date"]
):
- query = "{col} {op} v".format(op=op, col=col)
+ query = f"{col} {op} v"
result = store.select("test", where=query)
if op == "==":
diff --git a/pandas/tests/io/pytables/test_timezones.py b/pandas/tests/io/pytables/test_timezones.py
index 2bf22d982e5fe..74d5a77f86827 100644
--- a/pandas/tests/io/pytables/test_timezones.py
+++ b/pandas/tests/io/pytables/test_timezones.py
@@ -24,9 +24,7 @@ def _compare_with_tz(a, b):
a_e = a.loc[i, c]
b_e = b.loc[i, c]
if not (a_e == b_e and a_e.tz == b_e.tz):
- raise AssertionError(
- "invalid tz comparison [{a_e}] [{b_e}]".format(a_e=a_e, b_e=b_e)
- )
+ raise AssertionError(f"invalid tz comparison [{a_e}] [{b_e}]")
def test_append_with_timezones_dateutil(setup_path):
diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py
index b649e394c780b..3fa7f8a966bda 100644
--- a/pandas/tests/io/test_html.py
+++ b/pandas/tests/io/test_html.py
@@ -39,9 +39,9 @@ def html_encoding_file(request, datapath):
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
- "lists are not of equal size "
- "len(list1) == {0}, "
- "len(list2) == {1}".format(len(list1), len(list2))
+ f"lists are not of equal size "
+ f"len(list1) == {len(list1)}, "
+ f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py
index cb2112b481952..1c9da43d4ddd6 100644
--- a/pandas/tests/io/test_stata.py
+++ b/pandas/tests/io/test_stata.py
@@ -1715,7 +1715,7 @@ def test_invalid_file_not_written(self, version):
"'ascii' codec can't decode byte 0xef in position 14: "
r"ordinal not in range\(128\)"
)
- with pytest.raises(UnicodeEncodeError, match=r"{}|{}".format(msg1, msg2)):
+ with pytest.raises(UnicodeEncodeError, match=fr"{msg1}|{msg2}"):
with tm.assert_produces_warning(ResourceWarning):
df.to_stata(path)
diff --git a/pandas/tests/resample/test_period_index.py b/pandas/tests/resample/test_period_index.py
index ff303b808f6f5..f9461860a5eed 100644
--- a/pandas/tests/resample/test_period_index.py
+++ b/pandas/tests/resample/test_period_index.py
@@ -96,9 +96,7 @@ def test_selection(self, index, freq, kind, kwargs):
def test_annual_upsample_cases(
self, targ, conv, meth, month, simple_period_range_series
):
- ts = simple_period_range_series(
- "1/1/1990", "12/31/1991", freq="A-{month}".format(month=month)
- )
+ ts = simple_period_range_series("1/1/1990", "12/31/1991", freq=f"A-{month}")
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
@@ -130,9 +128,9 @@ def test_not_subperiod(self, simple_period_range_series, rule, expected_error_ms
# These are incompatible period rules for resampling
ts = simple_period_range_series("1/1/1990", "6/30/1995", freq="w-wed")
msg = (
- "Frequency cannot be resampled to {}, as they "
- "are not sub or super periods"
- ).format(expected_error_msg)
+ f"Frequency cannot be resampled to "
+ f"{expected_error_msg}, as they are not sub or super periods"
+ )
with pytest.raises(IncompatibleFrequency, match=msg):
ts.resample(rule).mean()
@@ -176,7 +174,7 @@ def test_annual_upsample(self, simple_period_range_series):
def test_quarterly_upsample(
self, month, target, convention, simple_period_range_series
):
- freq = "Q-{month}".format(month=month)
+ freq = f"Q-{month}"
ts = simple_period_range_series("1/1/1990", "12/31/1995", freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
@@ -351,7 +349,7 @@ def test_fill_method_and_how_upsample(self):
@pytest.mark.parametrize("target", ["D", "B"])
@pytest.mark.parametrize("convention", ["start", "end"])
def test_weekly_upsample(self, day, target, convention, simple_period_range_series):
- freq = "W-{day}".format(day=day)
+ freq = f"W-{day}"
ts = simple_period_range_series("1/1/1990", "12/31/1995", freq=freq)
result = ts.resample(target, convention=convention).ffill()
expected = result.to_timestamp(target, how=convention)
@@ -367,16 +365,14 @@ def test_resample_to_timestamps(self, simple_period_range_series):
def test_resample_to_quarterly(self, simple_period_range_series):
for month in MONTHS:
- ts = simple_period_range_series(
- "1990", "1992", freq="A-{month}".format(month=month)
- )
- quar_ts = ts.resample("Q-{month}".format(month=month)).ffill()
+ ts = simple_period_range_series("1990", "1992", freq=f"A-{month}")
+ quar_ts = ts.resample(f"Q-{month}").ffill()
stamps = ts.to_timestamp("D", how="start")
qdates = period_range(
ts.index[0].asfreq("D", "start"),
ts.index[-1].asfreq("D", "end"),
- freq="Q-{month}".format(month=month),
+ freq=f"Q-{month}",
)
expected = stamps.reindex(qdates.to_timestamp("D", "s"), method="ffill")
diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py
index 7020d373caf82..7be60590966db 100644
--- a/pandas/tests/reshape/merge/test_join.py
+++ b/pandas/tests/reshape/merge/test_join.py
@@ -262,8 +262,9 @@ def test_join_on_fails_with_wrong_object_type(self, wrong_type):
# Edited test to remove the Series object from test parameters
df = DataFrame({"a": [1, 1]})
- msg = "Can only merge Series or DataFrame objects, a {} was passed".format(
- str(type(wrong_type))
+ msg = (
+ f"Can only merge Series or DataFrame objects, "
+ f"a {str(type(wrong_type))} was passed"
)
with pytest.raises(TypeError, match=msg):
merge(wrong_type, df, left_on="a", right_on="a")
@@ -812,9 +813,7 @@ def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix
except KeyError:
if how in ("left", "inner"):
raise AssertionError(
- "key {group_key!s} should not have been in the join".format(
- group_key=group_key
- )
+ f"key {group_key!s} should not have been in the join"
)
_assert_all_na(l_joined, left.columns, join_col)
@@ -826,9 +825,7 @@ def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix
except KeyError:
if how in ("right", "inner"):
raise AssertionError(
- "key {group_key!s} should not have been in the join".format(
- group_key=group_key
- )
+ f"key {group_key!s} should not have been in the join"
)
_assert_all_na(r_joined, right.columns, join_col)
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index fd189c7435b29..8fb903cc5acc1 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -710,7 +710,7 @@ def test_other_timedelta_unit(self, unit):
df1 = pd.DataFrame({"entity_id": [101, 102]})
s = pd.Series([None, None], index=[101, 102], name="days")
- dtype = "m8[{}]".format(unit)
+ dtype = f"m8[{unit}]"
df2 = s.astype(dtype).to_frame("days")
assert df2["days"].dtype == "m8[ns]"
@@ -1011,10 +1011,10 @@ def test_indicator(self):
df_badcolumn = DataFrame({"col1": [1, 2], i: [2, 2]})
msg = (
- "Cannot use `indicator=True` option when data contains a "
- "column named {}|"
- "Cannot use name of an existing column for indicator column"
- ).format(i)
+ f"Cannot use `indicator=True` option when data contains a "
+ f"column named {i}|"
+ f"Cannot use name of an existing column for indicator column"
+ )
with pytest.raises(ValueError, match=msg):
merge(df1, df_badcolumn, on="col1", how="outer", indicator=True)
with pytest.raises(ValueError, match=msg):
@@ -1555,11 +1555,9 @@ def test_merge_incompat_dtypes_error(self, df1_vals, df2_vals):
df2 = DataFrame({"A": df2_vals})
msg = (
- "You are trying to merge on {lk_dtype} and "
- "{rk_dtype} columns. If you wish to proceed "
- "you should use pd.concat".format(
- lk_dtype=df1["A"].dtype, rk_dtype=df2["A"].dtype
- )
+ f"You are trying to merge on {df1['A'].dtype} and "
+ f"{df2['A'].dtype} columns. If you wish to proceed "
+ f"you should use pd.concat"
)
msg = re.escape(msg)
with pytest.raises(ValueError, match=msg):
@@ -1567,11 +1565,9 @@ def test_merge_incompat_dtypes_error(self, df1_vals, df2_vals):
# Check that error still raised when swapping order of dataframes
msg = (
- "You are trying to merge on {lk_dtype} and "
- "{rk_dtype} columns. If you wish to proceed "
- "you should use pd.concat".format(
- lk_dtype=df2["A"].dtype, rk_dtype=df1["A"].dtype
- )
+ f"You are trying to merge on {df2['A'].dtype} and "
+ f"{df1['A'].dtype} columns. If you wish to proceed "
+ f"you should use pd.concat"
)
msg = re.escape(msg)
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py
index 8037095aff0b9..3275520bff4de 100644
--- a/pandas/tests/reshape/merge/test_merge_asof.py
+++ b/pandas/tests/reshape/merge/test_merge_asof.py
@@ -1198,7 +1198,7 @@ def test_merge_groupby_multiple_column_with_categorical_column(self):
@pytest.mark.parametrize("side", ["left", "right"])
def test_merge_on_nans(self, func, side):
# GH 23189
- msg = "Merge keys contain null values on {} side".format(side)
+ msg = f"Merge keys contain null values on {side} side"
nulls = func([1.0, 5.0, np.nan])
non_nulls = func([1.0, 5.0, 10.0])
df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]})
diff --git a/pandas/tests/reshape/test_melt.py b/pandas/tests/reshape/test_melt.py
index 814325844cb4c..6a670e6c729e9 100644
--- a/pandas/tests/reshape/test_melt.py
+++ b/pandas/tests/reshape/test_melt.py
@@ -364,8 +364,8 @@ def test_pairs(self):
df = DataFrame(data)
spec = {
- "visitdt": ["visitdt{i:d}".format(i=i) for i in range(1, 4)],
- "wt": ["wt{i:d}".format(i=i) for i in range(1, 4)],
+ "visitdt": [f"visitdt{i:d}" for i in range(1, 4)],
+ "wt": [f"wt{i:d}" for i in range(1, 4)],
}
result = lreshape(df, spec)
@@ -557,8 +557,8 @@ def test_pairs(self):
result = lreshape(df, spec, dropna=False, label="foo")
spec = {
- "visitdt": ["visitdt{i:d}".format(i=i) for i in range(1, 3)],
- "wt": ["wt{i:d}".format(i=i) for i in range(1, 4)],
+ "visitdt": [f"visitdt{i:d}" for i in range(1, 3)],
+ "wt": [f"wt{i:d}" for i in range(1, 4)],
}
msg = "All column lists must be same length"
with pytest.raises(ValueError, match=msg):
diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py
index fe75aef1ca3d7..bf761b515767a 100644
--- a/pandas/tests/reshape/test_pivot.py
+++ b/pandas/tests/reshape/test_pivot.py
@@ -1161,9 +1161,9 @@ def test_margins_no_values_two_row_two_cols(self):
def test_pivot_table_with_margins_set_margin_name(self, margin_name):
# see gh-3335
msg = (
- r'Conflicting name "{}" in margins|'
- "margins_name argument must be a string"
- ).format(margin_name)
+ fr'Conflicting name "{margin_name}" in margins|'
+ fr"margins_name argument must be a string"
+ )
with pytest.raises(ValueError, match=msg):
# multi-index index
pivot_table(
diff --git a/pandas/tests/scalar/timedelta/test_constructors.py b/pandas/tests/scalar/timedelta/test_constructors.py
index 25c9fc19981be..d32d1994cac74 100644
--- a/pandas/tests/scalar/timedelta/test_constructors.py
+++ b/pandas/tests/scalar/timedelta/test_constructors.py
@@ -239,7 +239,7 @@ def test_iso_constructor(fmt, exp):
],
)
def test_iso_constructor_raises(fmt):
- msg = "Invalid ISO 8601 Duration format - {}".format(fmt)
+ msg = f"Invalid ISO 8601 Duration format - {fmt}"
with pytest.raises(ValueError, match=msg):
Timedelta(fmt)
diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py
index 737a85faa4c9b..b4a7173da84d0 100644
--- a/pandas/tests/scalar/timestamp/test_constructors.py
+++ b/pandas/tests/scalar/timestamp/test_constructors.py
@@ -314,7 +314,7 @@ def test_constructor_nanosecond(self, result):
def test_constructor_invalid_Z0_isostring(self, z):
# GH 8910
with pytest.raises(ValueError):
- Timestamp("2014-11-02 01:00{}".format(z))
+ Timestamp(f"2014-11-02 01:00{z}")
@pytest.mark.parametrize(
"arg",
@@ -455,9 +455,7 @@ def test_disallow_setting_tz(self, tz):
@pytest.mark.parametrize("offset", ["+0300", "+0200"])
def test_construct_timestamp_near_dst(self, offset):
# GH 20854
- expected = Timestamp(
- "2016-10-30 03:00:00{}".format(offset), tz="Europe/Helsinki"
- )
+ expected = Timestamp(f"2016-10-30 03:00:00{offset}", tz="Europe/Helsinki")
result = Timestamp(expected).tz_convert("Europe/Helsinki")
assert result == expected
diff --git a/pandas/tests/scalar/timestamp/test_rendering.py b/pandas/tests/scalar/timestamp/test_rendering.py
index cab6946bb8d02..a27d233d5ab88 100644
--- a/pandas/tests/scalar/timestamp/test_rendering.py
+++ b/pandas/tests/scalar/timestamp/test_rendering.py
@@ -17,7 +17,7 @@ class TestTimestampRendering:
)
def test_repr(self, date, freq, tz):
# avoid to match with timezone name
- freq_repr = "'{0}'".format(freq)
+ freq_repr = f"'{freq}'"
if tz.startswith("dateutil"):
tz_repr = tz.replace("dateutil", "")
else:
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index 65066fd0099ba..726695401cb91 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -233,17 +233,17 @@ def test_round_int64(self, timestamp, freq):
# test floor
result = dt.floor(freq)
- assert result.value % unit == 0, "floor not a {} multiple".format(freq)
+ assert result.value % unit == 0, f"floor not a {freq} multiple"
assert 0 <= dt.value - result.value < unit, "floor error"
# test ceil
result = dt.ceil(freq)
- assert result.value % unit == 0, "ceil not a {} multiple".format(freq)
+ assert result.value % unit == 0, f"ceil not a {freq} multiple"
assert 0 <= result.value - dt.value < unit, "ceil error"
# test round
result = dt.round(freq)
- assert result.value % unit == 0, "round not a {} multiple".format(freq)
+ assert result.value % unit == 0, f"round not a {freq} multiple"
assert abs(result.value - dt.value) <= unit // 2, "round error"
if unit % 2 == 0 and abs(result.value - dt.value) == unit // 2:
# round half to even
diff --git a/pandas/tests/series/methods/test_nlargest.py b/pandas/tests/series/methods/test_nlargest.py
index a029965c7394f..b1aa09f387a13 100644
--- a/pandas/tests/series/methods/test_nlargest.py
+++ b/pandas/tests/series/methods/test_nlargest.py
@@ -98,7 +98,7 @@ class TestSeriesNLargestNSmallest:
)
def test_nlargest_error(self, r):
dt = r.dtype
- msg = "Cannot use method 'n(larg|small)est' with dtype {dt}".format(dt=dt)
+ msg = f"Cannot use method 'n(larg|small)est' with dtype {dt}"
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py
index e6e91b5d4f5f4..1f6330b1a625d 100644
--- a/pandas/tests/series/test_analytics.py
+++ b/pandas/tests/series/test_analytics.py
@@ -169,10 +169,10 @@ def test_validate_any_all_out_keepdims_raises(self, kwargs, func):
name = func.__name__
msg = (
- r"the '{arg}' parameter is not "
- r"supported in the pandas "
- r"implementation of {fname}\(\)"
- ).format(arg=param, fname=name)
+ fr"the '{param}' parameter is not "
+ fr"supported in the pandas "
+ fr"implementation of {name}\(\)"
+ )
with pytest.raises(ValueError, match=msg):
func(s, **kwargs)
diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py
index f96d6ddfc357e..33706c00c53f4 100644
--- a/pandas/tests/series/test_api.py
+++ b/pandas/tests/series/test_api.py
@@ -136,9 +136,7 @@ def test_constructor_subclass_dict(self, dict_subclass):
def test_constructor_ordereddict(self):
# GH3283
- data = OrderedDict(
- ("col{i}".format(i=i), np.random.random()) for i in range(12)
- )
+ data = OrderedDict((f"col{i}", np.random.random()) for i in range(12))
series = Series(data)
expected = Series(list(data.values()), list(data.keys()))
@@ -258,7 +256,7 @@ def get_dir(s):
tm.makeIntIndex(10),
tm.makeFloatIndex(10),
Index([True, False]),
- Index(["a{}".format(i) for i in range(101)]),
+ Index([f"a{i}" for i in range(101)]),
pd.MultiIndex.from_tuples(zip("ABCD", "EFGH")),
pd.MultiIndex.from_tuples(zip([0, 1, 2, 3], "EFGH")),
],
diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py
index b0d06793dbe13..7f14a388b85c6 100644
--- a/pandas/tests/series/test_constructors.py
+++ b/pandas/tests/series/test_constructors.py
@@ -1351,7 +1351,6 @@ def test_constructor_cant_cast_datetimelike(self, index):
# We don't care whether the error message says
# PeriodIndex or PeriodArray
msg = f"Cannot cast {type(index).__name__.rstrip('Index')}.*? to "
-
with pytest.raises(TypeError, match=msg):
Series(index, dtype=float)
diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py
index 1fc582156a884..80a024eda7848 100644
--- a/pandas/tests/series/test_dtypes.py
+++ b/pandas/tests/series/test_dtypes.py
@@ -261,7 +261,7 @@ def test_astype_categorical_to_other(self):
value = np.random.RandomState(0).randint(0, 10000, 100)
df = DataFrame({"value": value})
- labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
+ labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
@@ -384,9 +384,9 @@ def test_astype_generic_timestamp_no_frequency(self, dtype):
s = Series(data)
msg = (
- r"The '{dtype}' dtype has no unit\. "
- r"Please pass in '{dtype}\[ns\]' instead."
- ).format(dtype=dtype.__name__)
+ fr"The '{dtype.__name__}' dtype has no unit\. "
+ fr"Please pass in '{dtype.__name__}\[ns\]' instead."
+ )
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
diff --git a/pandas/tests/series/test_ufunc.py b/pandas/tests/series/test_ufunc.py
index ece7f1f21ab23..536f15ea75d69 100644
--- a/pandas/tests/series/test_ufunc.py
+++ b/pandas/tests/series/test_ufunc.py
@@ -287,7 +287,7 @@ def __eq__(self, other) -> bool:
return type(other) is Thing and self.value == other.value
def __repr__(self) -> str:
- return "Thing({})".format(self.value)
+ return f"Thing({self.value})"
s = pd.Series([Thing(1), Thing(2)])
result = np.add(s, Thing(1))
diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py
index 02898988ca8aa..122ef1f47968e 100644
--- a/pandas/tests/test_downstream.py
+++ b/pandas/tests/test_downstream.py
@@ -19,7 +19,7 @@ def import_module(name):
try:
return importlib.import_module(name)
except ModuleNotFoundError: # noqa
- pytest.skip("skipping as {} not available".format(name))
+ pytest.skip(f"skipping as {name} not available")
@pytest.fixture
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index b377ca2869bd3..bd07fd3edc24d 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -895,10 +895,7 @@ def test_stack_unstack_unordered_multiindex(self):
# GH 18265
values = np.arange(5)
data = np.vstack(
- [
- ["b{}".format(x) for x in values], # b0, b1, ..
- ["a{}".format(x) for x in values],
- ]
+ [[f"b{x}" for x in values], [f"a{x}" for x in values]] # b0, b1, ..
) # a0, a1, ..
df = pd.DataFrame(data.T, columns=["b", "a"])
df.columns.name = "first"
diff --git a/pandas/tests/tools/test_numeric.py b/pandas/tests/tools/test_numeric.py
index 2fd39d5a7b703..19385e797467c 100644
--- a/pandas/tests/tools/test_numeric.py
+++ b/pandas/tests/tools/test_numeric.py
@@ -308,7 +308,7 @@ def test_really_large_in_arr_consistent(large_val, signed, multiple_elts, errors
if errors in (None, "raise"):
index = int(multiple_elts)
- msg = "Integer out of range. at position {index}".format(index=index)
+ msg = f"Integer out of range. at position {index}"
with pytest.raises(ValueError, match=msg):
to_numeric(arr, **kwargs)
diff --git a/pandas/tests/tslibs/test_parse_iso8601.py b/pandas/tests/tslibs/test_parse_iso8601.py
index a58f227c20c7f..1c01e826d9794 100644
--- a/pandas/tests/tslibs/test_parse_iso8601.py
+++ b/pandas/tests/tslibs/test_parse_iso8601.py
@@ -51,7 +51,7 @@ def test_parsers_iso8601(date_str, exp):
],
)
def test_parsers_iso8601_invalid(date_str):
- msg = 'Error parsing datetime string "{s}"'.format(s=date_str)
+ msg = f'Error parsing datetime string "{date_str}"'
with pytest.raises(ValueError, match=msg):
tslib._test_parse_iso8601(date_str)
diff --git a/pandas/tests/window/moments/test_moments_rolling.py b/pandas/tests/window/moments/test_moments_rolling.py
index 83e4ee25558b5..4e70cc8ad5cd8 100644
--- a/pandas/tests/window/moments/test_moments_rolling.py
+++ b/pandas/tests/window/moments/test_moments_rolling.py
@@ -860,7 +860,7 @@ def get_result(obj, window, min_periods=None, center=False):
tm.assert_series_equal(result, expected)
# shifter index
- s = ["x{x:d}".format(x=x) for x in range(12)]
+ s = [f"x{x:d}" for x in range(12)]
if has_min_periods:
minp = 10
@@ -1438,13 +1438,9 @@ def test_rolling_median_memory_error(self):
def test_rolling_min_max_numeric_types(self):
# GH12373
- types_test = [np.dtype("f{}".format(width)) for width in [4, 8]]
+ types_test = [np.dtype(f"f{width}") for width in [4, 8]]
types_test.extend(
- [
- np.dtype("{}{}".format(sign, width))
- for width in [1, 2, 4, 8]
- for sign in "ui"
- ]
+ [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"]
)
for data_type in types_test:
# Just testing that these don't throw exceptions and that
|