From cfc6e90e095e00625a569bed3fc65ce4b13c4ff9 Mon Sep 17 00:00:00 2001 From: John Karasinski Date: Fri, 2 Oct 2020 17:01:26 -0700 Subject: [PATCH 1/3] update code style for user guide for #36777 --- doc/source/user_guide/groupby.rst | 463 +++++++++------- doc/source/user_guide/io.rst | 12 +- doc/source/user_guide/missing_data.rst | 137 ++--- doc/source/user_guide/scale.rst | 35 +- doc/source/user_guide/timeseries.rst | 717 +++++++++++++------------ 5 files changed, 719 insertions(+), 645 deletions(-) diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst index 52342de98de79..9696f14f03b56 100644 --- a/doc/source/user_guide/groupby.rst +++ b/doc/source/user_guide/groupby.rst @@ -68,19 +68,23 @@ object (more on what the GroupBy object is later), you may do the following: .. ipython:: python - df = pd.DataFrame([('bird', 'Falconiformes', 389.0), - ('bird', 'Psittaciformes', 24.0), - ('mammal', 'Carnivora', 80.2), - ('mammal', 'Primates', np.nan), - ('mammal', 'Carnivora', 58)], - index=['falcon', 'parrot', 'lion', 'monkey', 'leopard'], - columns=('class', 'order', 'max_speed')) + df = pd.DataFrame( + [ + ("bird", "Falconiformes", 389.0), + ("bird", "Psittaciformes", 24.0), + ("mammal", "Carnivora", 80.2), + ("mammal", "Primates", np.nan), + ("mammal", "Carnivora", 58), + ], + index=["falcon", "parrot", "lion", "monkey", "leopard"], + columns=("class", "order", "max_speed"), + ) df # default is axis=0 - grouped = df.groupby('class') - grouped = df.groupby('order', axis='columns') - grouped = df.groupby(['class', 'order']) + grouped = df.groupby("class") + grouped = df.groupby("order", axis="columns") + grouped = df.groupby(["class", "order"]) The mapping can be specified many different ways: @@ -103,12 +107,14 @@ consider the following ``DataFrame``: .. ipython:: python - df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', - 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.random.randn(8)}) + df = pd.DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.randn(8), + "D": np.random.randn(8), + } + ) df On a DataFrame, we obtain a GroupBy object by calling :meth:`~DataFrame.groupby`. @@ -116,8 +122,8 @@ We could naturally group by either the ``A`` or ``B`` columns, or both: .. ipython:: python - grouped = df.groupby('A') - grouped = df.groupby(['A', 'B']) + grouped = df.groupby("A") + grouped = df.groupby(["A", "B"]) .. versionadded:: 0.24 @@ -126,8 +132,8 @@ but the specified columns .. ipython:: python - df2 = df.set_index(['A', 'B']) - grouped = df2.groupby(level=df2.index.names.difference(['B'])) + df2 = df.set_index(["A", "B"]) + grouped = df2.groupby(level=df2.index.names.difference(["B"])) grouped.sum() These will split the DataFrame on its index (rows). We could also split by the @@ -181,9 +187,9 @@ By default the group keys are sorted during the ``groupby`` operation. You may h .. ipython:: python - df2 = pd.DataFrame({'X': ['B', 'B', 'A', 'A'], 'Y': [1, 2, 3, 4]}) - df2.groupby(['X']).sum() - df2.groupby(['X'], sort=False).sum() + df2 = pd.DataFrame({"X": ["B", "B", "A", "A"], "Y": [1, 2, 3, 4]}) + df2.groupby(["X"]).sum() + df2.groupby(["X"], sort=False).sum() Note that ``groupby`` will preserve the order in which *observations* are sorted *within* each group. @@ -191,10 +197,10 @@ For example, the groups created by ``groupby()`` below are in the order they app .. ipython:: python - df3 = pd.DataFrame({'X': ['A', 'B', 'A', 'B'], 'Y': [1, 4, 3, 2]}) - df3.groupby(['X']).get_group('A') + df3 = pd.DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]}) + df3.groupby(["X"]).get_group("A") - df3.groupby(['X']).get_group('B') + df3.groupby(["X"]).get_group("B") .. _groupby.dropna: @@ -236,7 +242,7 @@ above example we have: .. ipython:: python - df.groupby('A').groups + df.groupby("A").groups df.groupby(get_letter_type, axis=1).groups Calling the standard Python ``len`` function on the GroupBy object just returns @@ -244,7 +250,7 @@ the length of the ``groups`` dict, so it is largely just a convenience: .. ipython:: python - grouped = df.groupby(['A', 'B']) + grouped = df.groupby(["A", "B"]) grouped.groups len(grouped) @@ -259,15 +265,14 @@ the length of the ``groups`` dict, so it is largely just a convenience: n = 10 weight = np.random.normal(166, 20, size=n) height = np.random.normal(60, 10, size=n) - time = pd.date_range('1/1/2000', periods=n) - gender = np.random.choice(['male', 'female'], size=n) - df = pd.DataFrame({'height': height, 'weight': weight, - 'gender': gender}, index=time) + time = pd.date_range("1/1/2000", periods=n) + gender = np.random.choice(["male", "female"], size=n) + df = pd.DataFrame({"height": height, "weight": weight, "gender": gender}, index=time) .. ipython:: python df - gb = df.groupby('gender') + gb = df.groupby("gender") .. ipython:: @@ -291,9 +296,11 @@ Let's create a Series with a two-level ``MultiIndex``. .. ipython:: python - arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], - ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] - index = pd.MultiIndex.from_arrays(arrays, names=['first', 'second']) + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + index = pd.MultiIndex.from_arrays(arrays, names=["first", "second"]) s = pd.Series(np.random.randn(8), index=index) s @@ -309,7 +316,7 @@ number: .. ipython:: python - s.groupby(level='second').sum() + s.groupby(level="second").sum() The aggregation functions such as ``sum`` will take the level parameter directly. Additionally, the resulting index will be named according to the @@ -317,30 +324,32 @@ chosen level: .. ipython:: python - s.sum(level='second') + s.sum(level="second") Grouping with multiple levels is supported. .. ipython:: python :suppress: - arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], - ['doo', 'doo', 'bee', 'bee', 'bop', 'bop', 'bop', 'bop'], - ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["doo", "doo", "bee", "bee", "bop", "bop", "bop", "bop"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] tuples = list(zip(*arrays)) - index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second', 'third']) + index = pd.MultiIndex.from_tuples(tuples, names=["first", "second", "third"]) s = pd.Series(np.random.randn(8), index=index) .. ipython:: python s - s.groupby(level=['first', 'second']).sum() + s.groupby(level=["first", "second"]).sum() Index level names may be supplied as keys. .. ipython:: python - s.groupby(['first', 'second']).sum() + s.groupby(["first", "second"]).sum() More on the ``sum`` function and aggregation later. @@ -352,14 +361,14 @@ objects. .. ipython:: python - arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'], - ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']] + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] - index = pd.MultiIndex.from_arrays(arrays, names=['first', 'second']) + index = pd.MultiIndex.from_arrays(arrays, names=["first", "second"]) - df = pd.DataFrame({'A': [1, 1, 1, 1, 2, 2, 3, 3], - 'B': np.arange(8)}, - index=index) + df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 3, 3], "B": np.arange(8)}, index=index) df @@ -368,19 +377,19 @@ the ``A`` column. .. ipython:: python - df.groupby([pd.Grouper(level=1), 'A']).sum() + df.groupby([pd.Grouper(level=1), "A"]).sum() Index levels may also be specified by name. .. ipython:: python - df.groupby([pd.Grouper(level='second'), 'A']).sum() + df.groupby([pd.Grouper(level="second"), "A"]).sum() Index level names may be specified as keys directly to ``groupby``. .. ipython:: python - df.groupby(['second', 'A']).sum() + df.groupby(["second", "A"]).sum() DataFrame column selection in GroupBy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -392,24 +401,26 @@ getting a column from a DataFrame, you can do: .. ipython:: python :suppress: - df = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', - 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.random.randn(8)}) + df = pd.DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.randn(8), + "D": np.random.randn(8), + } + ) .. ipython:: python - grouped = df.groupby(['A']) - grouped_C = grouped['C'] - grouped_D = grouped['D'] + grouped = df.groupby(["A"]) + grouped_C = grouped["C"] + grouped_D = grouped["D"] This is mainly syntactic sugar for the alternative and much more verbose: .. ipython:: python - df['C'].groupby(df['A']) + df["C"].groupby(df["A"]) Additionally this method avoids recomputing the internal grouping information derived from the passed key. @@ -450,13 +461,13 @@ A single group can be selected using .. ipython:: python - grouped.get_group('bar') + grouped.get_group("bar") Or for an object grouped on multiple columns: .. ipython:: python - df.groupby(['A', 'B']).get_group(('bar', 'one')) + df.groupby(["A", "B"]).get_group(("bar", "one")) .. _groupby.aggregate: @@ -474,10 +485,10 @@ An obvious one is aggregation via the .. ipython:: python - grouped = df.groupby('A') + grouped = df.groupby("A") grouped.aggregate(np.sum) - grouped = df.groupby(['A', 'B']) + grouped = df.groupby(["A", "B"]) grouped.aggregate(np.sum) As you can see, the result of the aggregation will have the group names as the @@ -487,17 +498,17 @@ changed by using the ``as_index`` option: .. ipython:: python - grouped = df.groupby(['A', 'B'], as_index=False) + grouped = df.groupby(["A", "B"], as_index=False) grouped.aggregate(np.sum) - df.groupby('A', as_index=False).sum() + df.groupby("A", as_index=False).sum() Note that you could use the ``reset_index`` DataFrame function to achieve the same result as the column names are stored in the resulting ``MultiIndex``: .. ipython:: python - df.groupby(['A', 'B']).sum().reset_index() + df.groupby(["A", "B"]).sum().reset_index() Another simple aggregation example is to compute the size of each group. This is included in GroupBy as the ``size`` method. It returns a Series whose @@ -559,8 +570,8 @@ aggregation with, outputting a DataFrame: .. ipython:: python - grouped = df.groupby('A') - grouped['C'].agg([np.sum, np.mean, np.std]) + grouped = df.groupby("A") + grouped["C"].agg([np.sum, np.mean, np.std]) On a grouped ``DataFrame``, you can pass a list of functions to apply to each column, which produces an aggregated result with a hierarchical index: @@ -575,19 +586,21 @@ need to rename, then you can add in a chained operation for a ``Series`` like th .. ipython:: python - (grouped['C'].agg([np.sum, np.mean, np.std]) - .rename(columns={'sum': 'foo', - 'mean': 'bar', - 'std': 'baz'})) + ( + grouped["C"] + .agg([np.sum, np.mean, np.std]) + .rename(columns={"sum": "foo", "mean": "bar", "std": "baz"}) + ) For a grouped ``DataFrame``, you can rename in a similar manner: .. ipython:: python - (grouped.agg([np.sum, np.mean, np.std]) - .rename(columns={'sum': 'foo', - 'mean': 'bar', - 'std': 'baz'})) + ( + grouped.agg([np.sum, np.mean, np.std]).rename( + columns={"sum": "foo", "mean": "bar", "std": "baz"} + ) + ) .. note:: @@ -598,7 +611,7 @@ For a grouped ``DataFrame``, you can rename in a similar manner: .. ipython:: python :okexcept: - grouped['C'].agg(['sum', 'sum']) + grouped["C"].agg(["sum", "sum"]) Pandas *does* allow you to provide multiple lambdas. In this case, pandas @@ -607,8 +620,7 @@ For a grouped ``DataFrame``, you can rename in a similar manner: .. ipython:: python - grouped['C'].agg([lambda x: x.max() - x.min(), - lambda x: x.median() - x.mean()]) + grouped["C"].agg([lambda x: x.max() - x.min(), lambda x: x.median() - x.mean()]) @@ -631,15 +643,19 @@ accepts the special syntax in :meth:`GroupBy.agg`, known as "named aggregation", .. ipython:: python - animals = pd.DataFrame({'kind': ['cat', 'dog', 'cat', 'dog'], - 'height': [9.1, 6.0, 9.5, 34.0], - 'weight': [7.9, 7.5, 9.9, 198.0]}) + animals = pd.DataFrame( + { + "kind": ["cat", "dog", "cat", "dog"], + "height": [9.1, 6.0, 9.5, 34.0], + "weight": [7.9, 7.5, 9.9, 198.0], + } + ) animals animals.groupby("kind").agg( - min_height=pd.NamedAgg(column='height', aggfunc='min'), - max_height=pd.NamedAgg(column='height', aggfunc='max'), - average_weight=pd.NamedAgg(column='weight', aggfunc=np.mean), + min_height=pd.NamedAgg(column="height", aggfunc="min"), + max_height=pd.NamedAgg(column="height", aggfunc="max"), + average_weight=pd.NamedAgg(column="weight", aggfunc=np.mean), ) @@ -648,9 +664,9 @@ accepts the special syntax in :meth:`GroupBy.agg`, known as "named aggregation", .. ipython:: python animals.groupby("kind").agg( - min_height=('height', 'min'), - max_height=('height', 'max'), - average_weight=('weight', np.mean), + min_height=("height", "min"), + max_height=("height", "max"), + average_weight=("weight", np.mean), ) @@ -659,9 +675,11 @@ and unpack the keyword arguments .. ipython:: python - animals.groupby("kind").agg(**{ - 'total weight': pd.NamedAgg(column='weight', aggfunc=sum), - }) + animals.groupby("kind").agg( + **{ + "total weight": pd.NamedAgg(column="weight", aggfunc=sum), + } + ) Additional keyword arguments are not passed through to the aggregation functions. Only pairs of ``(column, aggfunc)`` should be passed as ``**kwargs``. If your aggregation functions @@ -680,8 +698,8 @@ no column selection, so the values are just the functions. .. ipython:: python animals.groupby("kind").height.agg( - min_height='min', - max_height='max', + min_height="min", + max_height="max", ) Applying different functions to DataFrame columns @@ -692,8 +710,7 @@ columns of a DataFrame: .. ipython:: python - grouped.agg({'C': np.sum, - 'D': lambda x: np.std(x, ddof=1)}) + grouped.agg({"C": np.sum, "D": lambda x: np.std(x, ddof=1)}) The function names can also be strings. In order for a string to be valid it must be either implemented on GroupBy or available via :ref:`dispatching @@ -701,7 +718,7 @@ must be either implemented on GroupBy or available via :ref:`dispatching .. ipython:: python - grouped.agg({'C': 'sum', 'D': 'std'}) + grouped.agg({"C": "sum", "D": "std"}) .. _groupby.aggregate.cython: @@ -713,8 +730,8 @@ optimized Cython implementations: .. ipython:: python - df.groupby('A').sum() - df.groupby(['A', 'B']).mean() + df.groupby("A").sum() + df.groupby(["A", "B"]).mean() Of course ``sum`` and ``mean`` are implemented on pandas objects, so the above code would work even without the special versions via dispatching (see below). @@ -743,15 +760,14 @@ For example, suppose we wished to standardize the data within each group: .. ipython:: python - index = pd.date_range('10/1/1999', periods=1100) + index = pd.date_range("10/1/1999", periods=1100) ts = pd.Series(np.random.normal(0.5, 2, 1100), index) ts = ts.rolling(window=100, min_periods=100).mean().dropna() ts.head() ts.tail() - transformed = (ts.groupby(lambda x: x.year) - .transform(lambda x: (x - x.mean()) / x.std())) + transformed = ts.groupby(lambda x: x.year).transform(lambda x: (x - x.mean()) / x.std()) We would expect the result to now have mean 0 and standard deviation 1 within each group, which we can easily check: @@ -772,7 +788,7 @@ We can also visually compare the original and transformed data sets. .. ipython:: python - compare = pd.DataFrame({'Original': ts, 'Transformed': transformed}) + compare = pd.DataFrame({"Original": ts, "Transformed": transformed}) @savefig groupby_transform_plot.png compare.plot() @@ -788,8 +804,8 @@ Alternatively, the built-in methods could be used to produce the same outputs. .. ipython:: python - max = ts.groupby(lambda x: x.year).transform('max') - min = ts.groupby(lambda x: x.year).transform('min') + max = ts.groupby(lambda x: x.year).transform("max") + min = ts.groupby(lambda x: x.year).transform("min") max - min @@ -798,7 +814,7 @@ Another common data transform is to replace missing data with the group mean. .. ipython:: python :suppress: - cols = ['A', 'B', 'C'] + cols = ["A", "B", "C"] values = np.random.randn(1000, 3) values[np.random.randint(0, 1000, 100), 0] = np.nan values[np.random.randint(0, 1000, 50), 1] = np.nan @@ -809,7 +825,7 @@ Another common data transform is to replace missing data with the group mean. data_df - countries = np.array(['US', 'UK', 'GR', 'JP']) + countries = np.array(["US", "UK", "GR", "JP"]) key = countries[np.random.randint(0, 4, 1000)] grouped = data_df.groupby(key) @@ -859,11 +875,10 @@ the column B based on the groups of column A. .. ipython:: python - df_re = pd.DataFrame({'A': [1] * 10 + [5] * 10, - 'B': np.arange(20)}) + df_re = pd.DataFrame({"A": [1] * 10 + [5] * 10, "B": np.arange(20)}) df_re - df_re.groupby('A').rolling(4).B.mean() + df_re.groupby("A").rolling(4).B.mean() The ``expanding()`` method will accumulate a given operation @@ -872,7 +887,7 @@ group. .. ipython:: python - df_re.groupby('A').expanding().sum() + df_re.groupby("A").expanding().sum() Suppose you want to use the ``resample()`` method to get a daily @@ -881,13 +896,16 @@ missing values with the ``ffill()`` method. .. ipython:: python - df_re = pd.DataFrame({'date': pd.date_range(start='2016-01-01', periods=4, - freq='W'), - 'group': [1, 1, 2, 2], - 'val': [5, 6, 7, 8]}).set_index('date') + df_re = pd.DataFrame( + { + "date": pd.date_range(start="2016-01-01", periods=4, freq="W"), + "group": [1, 1, 2, 2], + "val": [5, 6, 7, 8], + } + ).set_index("date") df_re - df_re.groupby('group').resample('1D').ffill() + df_re.groupby("group").resample("1D").ffill() .. _groupby.filter: @@ -911,8 +929,8 @@ with only a couple members. .. ipython:: python - dff = pd.DataFrame({'A': np.arange(8), 'B': list('aabbbbcc')}) - dff.groupby('B').filter(lambda x: len(x) > 2) + dff = pd.DataFrame({"A": np.arange(8), "B": list("aabbbbcc")}) + dff.groupby("B").filter(lambda x: len(x) > 2) Alternatively, instead of dropping the offending groups, we can return a like-indexed objects where the groups that do not pass the filter are filled @@ -920,14 +938,14 @@ with NaNs. .. ipython:: python - dff.groupby('B').filter(lambda x: len(x) > 2, dropna=False) + dff.groupby("B").filter(lambda x: len(x) > 2, dropna=False) For DataFrames with multiple columns, filters should explicitly specify a column as the filter criterion. .. ipython:: python - dff['C'] = np.arange(8) - dff.groupby('B').filter(lambda x: len(x['C']) > 2) + dff["C"] = np.arange(8) + dff.groupby("B").filter(lambda x: len(x["C"]) > 2) .. note:: @@ -939,7 +957,7 @@ For DataFrames with multiple columns, filters should explicitly specify a column .. ipython:: python - dff.groupby('B').head(2) + dff.groupby("B").head(2) .. _groupby.dispatch: @@ -953,7 +971,7 @@ functions: .. ipython:: python - grouped = df.groupby('A') + grouped = df.groupby("A") grouped.agg(lambda x: x.std()) But, it's rather verbose and can be untidy if you need to pass additional @@ -973,12 +991,14 @@ next). This enables some operations to be carried out rather succinctly: .. ipython:: python - tsdf = pd.DataFrame(np.random.randn(1000, 3), - index=pd.date_range('1/1/2000', periods=1000), - columns=['A', 'B', 'C']) + tsdf = pd.DataFrame( + np.random.randn(1000, 3), + index=pd.date_range("1/1/2000", periods=1000), + columns=["A", "B", "C"], + ) tsdf.iloc[::2] = np.nan grouped = tsdf.groupby(lambda x: x.year) - grouped.fillna(method='pad') + grouped.fillna(method="pad") In this example, we chopped the collection of time series into yearly chunks then independently called :ref:`fillna ` on the @@ -989,7 +1009,7 @@ The ``nlargest`` and ``nsmallest`` methods work on ``Series`` style groupbys: .. ipython:: python s = pd.Series([9, 8, 7, 5, 19, 1, 4.2, 3.3]) - g = pd.Series(list('abababab')) + g = pd.Series(list("abababab")) gb = s.groupby(g) gb.nlargest(3) gb.nsmallest(3) @@ -1008,10 +1028,10 @@ for both ``aggregate`` and ``transform`` in many standard use cases. However, .. ipython:: python df - grouped = df.groupby('A') + grouped = df.groupby("A") # could also just call .describe() - grouped['C'].apply(lambda x: x.describe()) + grouped["C"].apply(lambda x: x.describe()) The dimension of the returned result can also change: @@ -1032,7 +1052,8 @@ that is itself a series, and possibly upcast the result to a DataFrame: .. ipython:: python def f(x): - return pd.Series([x, x ** 2], index=['x', 'x^2']) + return pd.Series([x, x ** 2], index=["x", "x^2"]) + s = pd.Series(np.random.rand(5)) s @@ -1133,7 +1154,7 @@ will be (silently) dropped. Thus, this does not pose any problems: .. ipython:: python - df.groupby('A').std() + df.groupby("A").std() Note that ``df.groupby('A').colname.std().`` is more efficient than ``df.groupby('A').std().colname``, so if the result of an aggregation function @@ -1151,23 +1172,29 @@ is only interesting over one column (here ``colname``), it may be filtered .. ipython:: python from decimal import Decimal + df_dec = pd.DataFrame( - {'id': [1, 2, 1, 2], - 'int_column': [1, 2, 3, 4], - 'dec_column': [Decimal('0.50'), Decimal('0.15'), - Decimal('0.25'), Decimal('0.40')] - } + { + "id": [1, 2, 1, 2], + "int_column": [1, 2, 3, 4], + "dec_column": [ + Decimal("0.50"), + Decimal("0.15"), + Decimal("0.25"), + Decimal("0.40"), + ], + } ) # Decimal columns can be sum'd explicitly by themselves... - df_dec.groupby(['id'])[['dec_column']].sum() + df_dec.groupby(["id"])[["dec_column"]].sum() # ...but cannot be combined with standard data types or they will be excluded - df_dec.groupby(['id'])[['int_column', 'dec_column']].sum() + df_dec.groupby(["id"])[["int_column", "dec_column"]].sum() # Use .agg function to aggregate over standard and "nuisance" data types # at the same time - df_dec.groupby(['id']).agg({'int_column': 'sum', 'dec_column': 'sum'}) + df_dec.groupby(["id"]).agg({"int_column": "sum", "dec_column": "sum"}) .. _groupby.observed: @@ -1182,25 +1209,27 @@ Show all values: .. ipython:: python - pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'], - categories=['a', 'b']), - observed=False).count() + pd.Series([1, 1, 1]).groupby( + pd.Categorical(["a", "a", "a"], categories=["a", "b"]), observed=False + ).count() Show only the observed values: .. ipython:: python - pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'], - categories=['a', 'b']), - observed=True).count() + pd.Series([1, 1, 1]).groupby( + pd.Categorical(["a", "a", "a"], categories=["a", "b"]), observed=True + ).count() The returned dtype of the grouped will *always* include *all* of the categories that were grouped. .. ipython:: python - s = pd.Series([1, 1, 1]).groupby(pd.Categorical(['a', 'a', 'a'], - categories=['a', 'b']), - observed=False).count() + s = ( + pd.Series([1, 1, 1]) + .groupby(pd.Categorical(["a", "a", "a"], categories=["a", "b"]), observed=False) + .count() + ) s.index.dtype .. _groupby.missing: @@ -1224,7 +1253,7 @@ can be used as group keys. If so, the order of the levels will be preserved: data = pd.Series(np.random.randn(100)) - factor = pd.qcut(data, [0, .25, .5, .75, 1.]) + factor = pd.qcut(data, [0, 0.25, 0.5, 0.75, 1.0]) data.groupby(factor).mean() @@ -1240,19 +1269,23 @@ use the ``pd.Grouper`` to provide this local control. import datetime - df = pd.DataFrame({'Branch': 'A A A A A A A B'.split(), - 'Buyer': 'Carl Mark Carl Carl Joe Joe Joe Carl'.split(), - 'Quantity': [1, 3, 5, 1, 8, 1, 9, 3], - 'Date': [ - datetime.datetime(2013, 1, 1, 13, 0), - datetime.datetime(2013, 1, 1, 13, 5), - datetime.datetime(2013, 10, 1, 20, 0), - datetime.datetime(2013, 10, 2, 10, 0), - datetime.datetime(2013, 10, 1, 20, 0), - datetime.datetime(2013, 10, 2, 10, 0), - datetime.datetime(2013, 12, 2, 12, 0), - datetime.datetime(2013, 12, 2, 14, 0)] - }) + df = pd.DataFrame( + { + "Branch": "A A A A A A A B".split(), + "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(), + "Quantity": [1, 3, 5, 1, 8, 1, 9, 3], + "Date": [ + datetime.datetime(2013, 1, 1, 13, 0), + datetime.datetime(2013, 1, 1, 13, 5), + datetime.datetime(2013, 10, 1, 20, 0), + datetime.datetime(2013, 10, 2, 10, 0), + datetime.datetime(2013, 10, 1, 20, 0), + datetime.datetime(2013, 10, 2, 10, 0), + datetime.datetime(2013, 12, 2, 12, 0), + datetime.datetime(2013, 12, 2, 14, 0), + ], + } + ) df @@ -1260,18 +1293,18 @@ Groupby a specific column with the desired frequency. This is like resampling. .. ipython:: python - df.groupby([pd.Grouper(freq='1M', key='Date'), 'Buyer']).sum() + df.groupby([pd.Grouper(freq="1M", key="Date"), "Buyer"]).sum() You have an ambiguous specification in that you have a named index and a column that could be potential groupers. .. ipython:: python - df = df.set_index('Date') - df['Date'] = df.index + pd.offsets.MonthEnd(2) - df.groupby([pd.Grouper(freq='6M', key='Date'), 'Buyer']).sum() + df = df.set_index("Date") + df["Date"] = df.index + pd.offsets.MonthEnd(2) + df.groupby([pd.Grouper(freq="6M", key="Date"), "Buyer"]).sum() - df.groupby([pd.Grouper(freq='6M', level='Date'), 'Buyer']).sum() + df.groupby([pd.Grouper(freq="6M", level="Date"), "Buyer"]).sum() Taking the first rows of each group @@ -1281,10 +1314,10 @@ Just like for a DataFrame or Series you can call head and tail on a groupby: .. ipython:: python - df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B']) + df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=["A", "B"]) df - g = df.groupby('A') + g = df.groupby("A") g.head(1) g.tail(1) @@ -1302,8 +1335,8 @@ will return a single row (or no row) per group if you pass an int for n: .. ipython:: python - df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) - g = df.groupby('A') + df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"]) + g = df.groupby("A") g.nth(0) g.nth(-1) @@ -1314,21 +1347,21 @@ If you want to select the nth not-null item, use the ``dropna`` kwarg. For a Dat .. ipython:: python # nth(0) is the same as g.first() - g.nth(0, dropna='any') + g.nth(0, dropna="any") g.first() # nth(-1) is the same as g.last() - g.nth(-1, dropna='any') # NaNs denote group exhausted when using dropna + g.nth(-1, dropna="any") # NaNs denote group exhausted when using dropna g.last() - g.B.nth(0, dropna='all') + g.B.nth(0, dropna="all") As with other methods, passing ``as_index=False``, will achieve a filtration, which returns the grouped row. .. ipython:: python - df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B']) - g = df.groupby('A', as_index=False) + df = pd.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"]) + g = df.groupby("A", as_index=False) g.nth(0) g.nth(-1) @@ -1337,8 +1370,8 @@ You can also select multiple rows from each group by specifying multiple nth val .. ipython:: python - business_dates = pd.date_range(start='4/1/2014', end='6/30/2014', freq='B') - df = pd.DataFrame(1, index=business_dates, columns=['a', 'b']) + business_dates = pd.date_range(start="4/1/2014", end="6/30/2014", freq="B") + df = pd.DataFrame(1, index=business_dates, columns=["a", "b"]) # get the first, 4th, and last date index for each month df.groupby([df.index.year, df.index.month]).nth([0, 3, -1]) @@ -1350,12 +1383,12 @@ To see the order in which each row appears within its group, use the .. ipython:: python - dfg = pd.DataFrame(list('aaabba'), columns=['A']) + dfg = pd.DataFrame(list("aaabba"), columns=["A"]) dfg - dfg.groupby('A').cumcount() + dfg.groupby("A").cumcount() - dfg.groupby('A').cumcount(ascending=False) + dfg.groupby("A").cumcount(ascending=False) .. _groupby.ngroup: @@ -1374,12 +1407,12 @@ order they are first observed. .. ipython:: python - dfg = pd.DataFrame(list('aaabba'), columns=['A']) + dfg = pd.DataFrame(list("aaabba"), columns=["A"]) dfg - dfg.groupby('A').ngroup() + dfg.groupby("A").ngroup() - dfg.groupby('A').ngroup(ascending=False) + dfg.groupby("A").ngroup(ascending=False) Plotting ~~~~~~~~ @@ -1392,8 +1425,8 @@ the values in column 1 where the group is "B" are 3 higher on average. np.random.seed(1234) df = pd.DataFrame(np.random.randn(50, 2)) - df['g'] = np.random.choice(['A', 'B'], size=50) - df.loc[df['g'] == 'B', 1] += 3 + df["g"] = np.random.choice(["A", "B"], size=50) + df.loc[df["g"] == "B", 1] += 3 We can easily visualize this with a boxplot: @@ -1401,7 +1434,7 @@ We can easily visualize this with a boxplot: :okwarning: @savefig groupby_boxplot.png - df.groupby('g').boxplot() + df.groupby("g").boxplot() The result of calling ``boxplot`` is a dictionary whose keys are the values of our grouping column ``g`` ("A" and "B"). The values of the resulting dictionary @@ -1436,20 +1469,26 @@ code more readable. First we set the data: .. ipython:: python n = 1000 - df = pd.DataFrame({'Store': np.random.choice(['Store_1', 'Store_2'], n), - 'Product': np.random.choice(['Product_1', - 'Product_2'], n), - 'Revenue': (np.random.random(n) * 50 + 10).round(2), - 'Quantity': np.random.randint(1, 10, size=n)}) + df = pd.DataFrame( + { + "Store": np.random.choice(["Store_1", "Store_2"], n), + "Product": np.random.choice(["Product_1", "Product_2"], n), + "Revenue": (np.random.random(n) * 50 + 10).round(2), + "Quantity": np.random.randint(1, 10, size=n), + } + ) df.head(2) Now, to find prices per store/product, we can simply do: .. ipython:: python - (df.groupby(['Store', 'Product']) - .pipe(lambda grp: grp.Revenue.sum() / grp.Quantity.sum()) - .unstack().round(2)) + ( + df.groupby(["Store", "Product"]) + .pipe(lambda grp: grp.Revenue.sum() / grp.Quantity.sum()) + .unstack() + .round(2) + ) Piping can also be expressive when you want to deliver a grouped object to some arbitrary function, for example: @@ -1459,7 +1498,8 @@ arbitrary function, for example: def mean(groupby): return groupby.mean() - df.groupby(['Store', 'Product']).pipe(mean) + + df.groupby(["Store", "Product"]).pipe(mean) where ``mean`` takes a GroupBy object and finds the mean of the Revenue and Quantity columns respectively for each Store-Product combination. The ``mean`` function can @@ -1476,8 +1516,7 @@ Regroup columns of a DataFrame according to their sum, and sum the aggregated on .. ipython:: python - df = pd.DataFrame({'a': [1, 0, 0], 'b': [0, 1, 0], - 'c': [1, 0, 0], 'd': [2, 3, 4]}) + df = pd.DataFrame({"a": [1, 0, 0], "b": [0, 1, 0], "c": [1, 0, 0], "d": [2, 3, 4]}) df df.groupby(df.sum(), axis=1).sum() @@ -1536,16 +1575,22 @@ column index name will be used as the name of the inserted column: .. ipython:: python - df = pd.DataFrame({'a': [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2], - 'b': [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], - 'c': [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], - 'd': [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]}) + df = pd.DataFrame( + { + "a": [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2], + "b": [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1], + "c": [1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0], + "d": [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1], + } + ) + def compute_metrics(x): - result = {'b_sum': x['b'].sum(), 'c_mean': x['c'].mean()} - return pd.Series(result, name='metrics') + result = {"b_sum": x["b"].sum(), "c_mean": x["c"].mean()} + return pd.Series(result, name="metrics") + - result = df.groupby('a').apply(compute_metrics) + result = df.groupby("a").apply(compute_metrics) result diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index e483cebf71614..11e8df55fcad5 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -3310,10 +3310,10 @@ applications (CTRL-V on many operating systems). Here we illustrate writing a .. code-block:: python - >>> df = pd.DataFrame({'A': [1, 2, 3], - ... 'B': [4, 5, 6], - ... 'C': ['p', 'q', 'r']}, - ... index=['x', 'y', 'z']) + >>> df = pd.DataFrame({"A": [1, 2, 3], + ... "B": [4, 5, 6], + ... "C": ["p", "q", "r"]}, + ... index=["x", "y", "z"]) >>> df A B C x 1 4 p @@ -3607,8 +3607,8 @@ This format is specified by default when using ``put`` or ``to_hdf`` or by ``for .. code-block:: python - >>> pd.DataFrame(np.random.randn(10, 2)).to_hdf('test_fixed.h5', 'df') - >>> pd.read_hdf('test_fixed.h5', 'df', where='index>5') + >>> pd.DataFrame(np.random.randn(10, 2)).to_hdf("test_fixed.h5", "df") + >>> pd.read_hdf("test_fixed.h5", "df", where="index>5") TypeError: cannot pass a where specification when reading a fixed format. this store must be selected in its entirety diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst index 9294897686d46..3c97cc7da6edb 100644 --- a/doc/source/user_guide/missing_data.rst +++ b/doc/source/user_guide/missing_data.rst @@ -38,12 +38,15 @@ arise and we wish to also consider that "missing" or "not available" or "NA". .. ipython:: python - df = pd.DataFrame(np.random.randn(5, 3), index=['a', 'c', 'e', 'f', 'h'], - columns=['one', 'two', 'three']) - df['four'] = 'bar' - df['five'] = df['one'] > 0 + df = pd.DataFrame( + np.random.randn(5, 3), + index=["a", "c", "e", "f", "h"], + columns=["one", "two", "three"], + ) + df["four"] = "bar" + df["five"] = df["one"] > 0 df - df2 = df.reindex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']) + df2 = df.reindex(["a", "b", "c", "d", "e", "f", "g", "h"]) df2 To make detecting missing values easier (and across different array dtypes), @@ -53,9 +56,9 @@ Series and DataFrame objects: .. ipython:: python - df2['one'] - pd.isna(df2['one']) - df2['four'].notna() + df2["one"] + pd.isna(df2["one"]) + df2["four"].notna() df2.isna() .. warning:: @@ -65,14 +68,14 @@ Series and DataFrame objects: .. ipython:: python - None == None # noqa: E711 + None == None # noqa: E711 np.nan == np.nan So as compared to above, a scalar equality comparison versus a ``None/np.nan`` doesn't provide useful information. .. ipython:: python - df2['one'] == np.nan + df2["one"] == np.nan Integer dtypes and missing data ------------------------------- @@ -101,9 +104,9 @@ pandas objects provide compatibility between ``NaT`` and ``NaN``. .. ipython:: python df2 = df.copy() - df2['timestamp'] = pd.Timestamp('20120101') + df2["timestamp"] = pd.Timestamp("20120101") df2 - df2.loc[['a', 'c', 'h'], ['one', 'timestamp']] = np.nan + df2.loc[["a", "c", "h"], ["one", "timestamp"]] = np.nan df2 df2.dtypes.value_counts() @@ -146,9 +149,9 @@ objects. .. ipython:: python :suppress: - df = df2.loc[:, ['one', 'two', 'three']] - a = df2.loc[df2.index[:5], ['one', 'two']].fillna(method='pad') - b = df2.loc[df2.index[:5], ['one', 'two', 'three']] + df = df2.loc[:, ["one", "two", "three"]] + a = df2.loc[df2.index[:5], ["one", "two"]].fillna(method="pad") + b = df2.loc[df2.index[:5], ["one", "two", "three"]] .. ipython:: python @@ -168,7 +171,7 @@ account for missing data. For example: .. ipython:: python df - df['one'].sum() + df["one"].sum() df.mean(1) df.cumsum() df.cumsum(skipna=False) @@ -210,7 +213,7 @@ with R, for example: .. ipython:: python df - df.groupby('one').mean() + df.groupby("one").mean() See the groupby section :ref:`here ` for more information. @@ -234,7 +237,7 @@ of ways, which we illustrate: df2 df2.fillna(0) - df2['one'].fillna('missing') + df2["one"].fillna("missing") **Fill gaps forward or backward** @@ -244,7 +247,7 @@ can propagate non-NA values forward or backward: .. ipython:: python df - df.fillna(method='pad') + df.fillna(method="pad") .. _missing_data.fillna.limit: @@ -261,7 +264,7 @@ we can use the ``limit`` keyword: .. ipython:: python df - df.fillna(method='pad', limit=1) + df.fillna(method="pad", limit=1) To remind you, these are the available filling methods: @@ -289,21 +292,21 @@ use case of this is to fill a DataFrame with the mean of that column. .. ipython:: python - dff = pd.DataFrame(np.random.randn(10, 3), columns=list('ABC')) + dff = pd.DataFrame(np.random.randn(10, 3), columns=list("ABC")) dff.iloc[3:5, 0] = np.nan dff.iloc[4:6, 1] = np.nan dff.iloc[5:8, 2] = np.nan dff dff.fillna(dff.mean()) - dff.fillna(dff.mean()['B':'C']) + dff.fillna(dff.mean()["B":"C"]) Same result as above, but is aligning the 'fill' value which is a Series in this case. .. ipython:: python - dff.where(pd.notna(dff), dff.mean(), axis='columns') + dff.where(pd.notna(dff), dff.mean(), axis="columns") .. _missing_data.dropna: @@ -317,15 +320,15 @@ data. To do this, use :meth:`~DataFrame.dropna`: .. ipython:: python :suppress: - df['two'] = df['two'].fillna(0) - df['three'] = df['three'].fillna(0) + df["two"] = df["two"].fillna(0) + df["three"] = df["three"].fillna(0) .. ipython:: python df df.dropna(axis=0) df.dropna(axis=1) - df['one'].dropna() + df["one"].dropna() An equivalent :meth:`~Series.dropna` is available for Series. DataFrame.dropna has considerably more options than Series.dropna, which can be @@ -343,7 +346,7 @@ that, by default, performs linear interpolation at missing data points. :suppress: np.random.seed(123456) - idx = pd.date_range('1/1/2000', periods=100, freq='BM') + idx = pd.date_range("1/1/2000", periods=100, freq="BM") ts = pd.Series(np.random.randn(100), index=idx) ts[1:5] = np.nan ts[20:30] = np.nan @@ -376,28 +379,29 @@ Index aware interpolation is available via the ``method`` keyword: ts2 ts2.interpolate() - ts2.interpolate(method='time') + ts2.interpolate(method="time") For a floating-point index, use ``method='values'``: .. ipython:: python :suppress: - idx = [0., 1., 10.] - ser = pd.Series([0., np.nan, 10.], idx) + idx = [0.0, 1.0, 10.0] + ser = pd.Series([0.0, np.nan, 10.0], idx) .. ipython:: python ser ser.interpolate() - ser.interpolate(method='values') + ser.interpolate(method="values") You can also interpolate with a DataFrame: .. ipython:: python - df = pd.DataFrame({'A': [1, 2.1, np.nan, 4.7, 5.6, 6.8], - 'B': [.25, np.nan, np.nan, 4, 12.2, 14.4]}) + df = pd.DataFrame( + {"A": [1, 2.1, np.nan, 4.7, 5.6, 6.8], "B": [0.25, np.nan, np.nan, 4, 12.2, 14.4]} + ) df df.interpolate() @@ -418,20 +422,20 @@ The appropriate interpolation method will depend on the type of data you are wor .. ipython:: python - df.interpolate(method='barycentric') + df.interpolate(method="barycentric") - df.interpolate(method='pchip') + df.interpolate(method="pchip") - df.interpolate(method='akima') + df.interpolate(method="akima") When interpolating via a polynomial or spline approximation, you must also specify the degree or order of the approximation: .. ipython:: python - df.interpolate(method='spline', order=2) + df.interpolate(method="spline", order=2) - df.interpolate(method='polynomial', order=2) + df.interpolate(method="polynomial", order=2) Compare several methods: @@ -439,10 +443,10 @@ Compare several methods: np.random.seed(2) - ser = pd.Series(np.arange(1, 10.1, .25) ** 2 + np.random.randn(37)) + ser = pd.Series(np.arange(1, 10.1, 0.25) ** 2 + np.random.randn(37)) missing = np.array([4, 13, 14, 15, 16, 17, 18, 20, 29]) ser[missing] = np.nan - methods = ['linear', 'quadratic', 'cubic'] + methods = ["linear", "quadratic", "cubic"] df = pd.DataFrame({m: ser.interpolate(method=m) for m in methods}) @savefig compare_interpolations.png @@ -460,7 +464,7 @@ at the new values. # interpolate at new_index new_index = ser.index | pd.Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75]) - interp_s = ser.reindex(new_index).interpolate(method='pchip') + interp_s = ser.reindex(new_index).interpolate(method="pchip") interp_s[49:51] .. _scipy: https://www.scipy.org @@ -478,8 +482,7 @@ filled since the last valid observation: .. ipython:: python - ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, - np.nan, 13, np.nan, np.nan]) + ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13, np.nan, np.nan]) ser # fill all consecutive values in a forward direction @@ -494,13 +497,13 @@ By default, ``NaN`` values are filled in a ``forward`` direction. Use .. ipython:: python # fill one consecutive value backwards - ser.interpolate(limit=1, limit_direction='backward') + ser.interpolate(limit=1, limit_direction="backward") # fill one consecutive value in both directions - ser.interpolate(limit=1, limit_direction='both') + ser.interpolate(limit=1, limit_direction="both") # fill all consecutive values in both directions - ser.interpolate(limit_direction='both') + ser.interpolate(limit_direction="both") By default, ``NaN`` values are filled whether they are inside (surrounded by) existing valid values, or outside existing valid values. The ``limit_area`` @@ -509,13 +512,13 @@ parameter restricts filling to either inside or outside values. .. ipython:: python # fill one consecutive inside value in both directions - ser.interpolate(limit_direction='both', limit_area='inside', limit=1) + ser.interpolate(limit_direction="both", limit_area="inside", limit=1) # fill all consecutive outside values backward - ser.interpolate(limit_direction='backward', limit_area='outside') + ser.interpolate(limit_direction="backward", limit_area="outside") # fill all consecutive outside values in both directions - ser.interpolate(limit_direction='both', limit_area='outside') + ser.interpolate(limit_direction="both", limit_area="outside") .. _missing_data.replace: @@ -531,7 +534,7 @@ value: .. ipython:: python - ser = pd.Series([0., 1., 2., 3., 4.]) + ser = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0]) ser.replace(0, 5) @@ -551,16 +554,16 @@ For a DataFrame, you can specify individual values by column: .. ipython:: python - df = pd.DataFrame({'a': [0, 1, 2, 3, 4], 'b': [5, 6, 7, 8, 9]}) + df = pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": [5, 6, 7, 8, 9]}) - df.replace({'a': 0, 'b': 5}, 100) + df.replace({"a": 0, "b": 5}, 100) Instead of replacing with specified values, you can treat all given values as missing and interpolate over them: .. ipython:: python - ser.replace([1, 2, 3], method='pad') + ser.replace([1, 2, 3], method="pad") .. _missing_data.replace_expression: @@ -581,67 +584,67 @@ Replace the '.' with ``NaN`` (str -> str): .. ipython:: python - d = {'a': list(range(4)), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} + d = {"a": list(range(4)), "b": list("ab.."), "c": ["a", "b", np.nan, "d"]} df = pd.DataFrame(d) - df.replace('.', np.nan) + df.replace(".", np.nan) Now do it with a regular expression that removes surrounding whitespace (regex -> regex): .. ipython:: python - df.replace(r'\s*\.\s*', np.nan, regex=True) + df.replace(r"\s*\.\s*", np.nan, regex=True) Replace a few different values (list -> list): .. ipython:: python - df.replace(['a', '.'], ['b', np.nan]) + df.replace(["a", "."], ["b", np.nan]) list of regex -> list of regex: .. ipython:: python - df.replace([r'\.', r'(a)'], ['dot', r'\1stuff'], regex=True) + df.replace([r"\.", r"(a)"], ["dot", r"\1stuff"], regex=True) Only search in column ``'b'`` (dict -> dict): .. ipython:: python - df.replace({'b': '.'}, {'b': np.nan}) + df.replace({"b": "."}, {"b": np.nan}) Same as the previous example, but use a regular expression for searching instead (dict of regex -> dict): .. ipython:: python - df.replace({'b': r'\s*\.\s*'}, {'b': np.nan}, regex=True) + df.replace({"b": r"\s*\.\s*"}, {"b": np.nan}, regex=True) You can pass nested dictionaries of regular expressions that use ``regex=True``: .. ipython:: python - df.replace({'b': {'b': r''}}, regex=True) + df.replace({"b": {"b": r""}}, regex=True) Alternatively, you can pass the nested dictionary like so: .. ipython:: python - df.replace(regex={'b': {r'\s*\.\s*': np.nan}}) + df.replace(regex={"b": {r"\s*\.\s*": np.nan}}) You can also use the group of a regular expression match when replacing (dict of regex -> dict of regex), this works for lists as well. .. ipython:: python - df.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True) + df.replace({"b": r"\s*(\.)\s*"}, {"b": r"\1ty"}, regex=True) You can pass a list of regular expressions, of which those that match will be replaced with a scalar (list of regex -> regex). .. ipython:: python - df.replace([r'\s*\.\s*', r'a|b'], np.nan, regex=True) + df.replace([r"\s*\.\s*", r"a|b"], np.nan, regex=True) All of the regular expression examples can also be passed with the ``to_replace`` argument as the ``regex`` argument. In this case the ``value`` @@ -650,7 +653,7 @@ dictionary. The previous example, in this case, would then be: .. ipython:: python - df.replace(regex=[r'\s*\.\s*', r'a|b'], value=np.nan) + df.replace(regex=[r"\s*\.\s*", r"a|b"], value=np.nan) This can be convenient if you do not want to pass ``regex=True`` every time you want to use a regular expression. @@ -676,7 +679,7 @@ Replacing more than one value is possible by passing a list. .. ipython:: python df00 = df.iloc[0, 0] - df.replace([1.5, df00], [np.nan, 'a']) + df.replace([1.5, df00], [np.nan, "a"]) df[1].dtype You can also operate on the DataFrame in place: @@ -932,7 +935,7 @@ the first 10 columns. .. ipython:: python - bb = pd.read_csv('data/baseball.csv', index_col='id') + bb = pd.read_csv("data/baseball.csv", index_col="id") bb[bb.columns[:10]].dtypes .. ipython:: python diff --git a/doc/source/user_guide/scale.rst b/doc/source/user_guide/scale.rst index 206d8dd0f4739..f36f27269a996 100644 --- a/doc/source/user_guide/scale.rst +++ b/doc/source/user_guide/scale.rst @@ -72,7 +72,7 @@ Option 1 loads in all the data and then filters to what we need. .. ipython:: python - columns = ['id_0', 'name_0', 'x_0', 'y_0'] + columns = ["id_0", "name_0", "x_0", "y_0"] pd.read_parquet("timeseries_wide.parquet")[columns] @@ -123,7 +123,7 @@ space-efficient integers to know which specific name is used in each row. .. ipython:: python ts2 = ts.copy() - ts2['name'] = ts2['name'].astype('category') + ts2["name"] = ts2["name"].astype("category") ts2.memory_usage(deep=True) We can go a bit further and downcast the numeric columns to their smallest types @@ -131,8 +131,8 @@ using :func:`pandas.to_numeric`. .. ipython:: python - ts2['id'] = pd.to_numeric(ts2['id'], downcast='unsigned') - ts2[['x', 'y']] = ts2[['x', 'y']].apply(pd.to_numeric, downcast='float') + ts2["id"] = pd.to_numeric(ts2["id"], downcast="unsigned") + ts2[["x", "y"]] = ts2[["x", "y"]].apply(pd.to_numeric, downcast="float") ts2.dtypes .. ipython:: python @@ -141,8 +141,7 @@ using :func:`pandas.to_numeric`. .. ipython:: python - reduction = (ts2.memory_usage(deep=True).sum() - / ts.memory_usage(deep=True).sum()) + reduction = ts2.memory_usage(deep=True).sum() / ts.memory_usage(deep=True).sum() print(f"{reduction:0.2f}") In all, we've reduced the in-memory footprint of this dataset to 1/5 of its @@ -174,13 +173,13 @@ files. Each file in the directory represents a different year of the entire data import pathlib N = 12 - starts = [f'20{i:>02d}-01-01' for i in range(N)] - ends = [f'20{i:>02d}-12-13' for i in range(N)] + starts = [f"20{i:>02d}-01-01" for i in range(N)] + ends = [f"20{i:>02d}-12-13" for i in range(N)] pathlib.Path("data/timeseries").mkdir(exist_ok=True) for i, (start, end) in enumerate(zip(starts, ends)): - ts = _make_timeseries(start=start, end=end, freq='1T', seed=i) + ts = _make_timeseries(start=start, end=end, freq="1T", seed=i) ts.to_parquet(f"data/timeseries/ts-{i:0>2d}.parquet") @@ -215,7 +214,7 @@ work for arbitrary-sized datasets. # Only one dataframe is in memory at a time... df = pd.read_parquet(path) # ... plus a small Series ``counts``, which is updated. - counts = counts.add(df['name'].value_counts(), fill_value=0) + counts = counts.add(df["name"].value_counts(), fill_value=0) counts.astype(int) Some readers, like :meth:`pandas.read_csv`, offer parameters to control the @@ -278,8 +277,8 @@ Rather than executing immediately, doing operations build up a **task graph**. .. ipython:: python ddf - ddf['name'] - ddf['name'].value_counts() + ddf["name"] + ddf["name"].value_counts() Each of these calls is instant because the result isn't being computed yet. We're just building up a list of computation to do when someone needs the @@ -291,7 +290,7 @@ To get the actual result you can call ``.compute()``. .. ipython:: python - %time ddf['name'].value_counts().compute() + %time ddf["name"].value_counts().compute() At that point, you get back the same thing you'd get with pandas, in this case a concrete pandas Series with the count of each ``name``. @@ -324,7 +323,7 @@ a familiar groupby aggregation. .. ipython:: python - %time ddf.groupby('name')[['x', 'y']].mean().compute().head() + %time ddf.groupby("name")[["x", "y"]].mean().compute().head() The grouping and aggregation is done out-of-core and in parallel. @@ -336,8 +335,8 @@ we need to supply the divisions manually. .. ipython:: python N = 12 - starts = [f'20{i:>02d}-01-01' for i in range(N)] - ends = [f'20{i:>02d}-12-13' for i in range(N)] + starts = [f"20{i:>02d}-01-01" for i in range(N)] + ends = [f"20{i:>02d}-12-13" for i in range(N)] divisions = tuple(pd.to_datetime(starts)) + (pd.Timestamp(ends[-1]),) ddf.divisions = divisions @@ -347,7 +346,7 @@ Now we can do things like fast random access with ``.loc``. .. ipython:: python - ddf.loc['2002-01-01 12:01':'2002-01-01 12:05'].compute() + ddf.loc["2002-01-01 12:01":"2002-01-01 12:05"].compute() Dask knows to just look in the 3rd partition for selecting values in 2002. It doesn't need to look at any other data. @@ -362,7 +361,7 @@ out of memory. At that point it's just a regular pandas object. :okwarning: @savefig dask_resample.png - ddf[['x', 'y']].resample("1D").mean().cumsum().compute().plot() + ddf[["x", "y"]].resample("1D").mean().cumsum().compute().plot() These Dask examples have all be done using multiple processes on a single machine. Dask can be `deployed on a cluster diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 61902b4a41b7c..4d0ddd2185669 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -19,42 +19,43 @@ Parsing time series information from various sources and formats import datetime - dti = pd.to_datetime(['1/1/2018', np.datetime64('2018-01-01'), - datetime.datetime(2018, 1, 1)]) + dti = pd.to_datetime( + ["1/1/2018", np.datetime64("2018-01-01"), datetime.datetime(2018, 1, 1)] + ) dti Generate sequences of fixed-frequency dates and time spans .. ipython:: python - dti = pd.date_range('2018-01-01', periods=3, freq='H') + dti = pd.date_range("2018-01-01", periods=3, freq="H") dti Manipulating and converting date times with timezone information .. ipython:: python - dti = dti.tz_localize('UTC') + dti = dti.tz_localize("UTC") dti - dti.tz_convert('US/Pacific') + dti.tz_convert("US/Pacific") Resampling or converting a time series to a particular frequency .. ipython:: python - idx = pd.date_range('2018-01-01', periods=5, freq='H') + idx = pd.date_range("2018-01-01", periods=5, freq="H") ts = pd.Series(range(len(idx)), index=idx) ts - ts.resample('2H').mean() + ts.resample("2H").mean() Performing date and time arithmetic with absolute or relative time increments .. ipython:: python - friday = pd.Timestamp('2018-01-05') + friday = pd.Timestamp("2018-01-05") friday.day_name() # Add 1 day - saturday = friday + pd.Timedelta('1 day') + saturday = friday + pd.Timedelta("1 day") saturday.day_name() # Add 1 business day (Friday --> Monday) monday = friday + pd.offsets.BDay() @@ -90,13 +91,13 @@ so manipulations can be performed with respect to the time element. .. ipython:: python - pd.Series(range(3), index=pd.date_range('2000', freq='D', periods=3)) + pd.Series(range(3), index=pd.date_range("2000", freq="D", periods=3)) However, :class:`Series` and :class:`DataFrame` can directly also support the time component as data itself. .. ipython:: python - pd.Series(pd.date_range('2000', freq='D', periods=3)) + pd.Series(pd.date_range("2000", freq="D", periods=3)) :class:`Series` and :class:`DataFrame` have extended data type support and functionality for ``datetime``, ``timedelta`` and ``Period`` data when passed into those constructors. ``DateOffset`` @@ -104,9 +105,9 @@ data however will be stored as ``object`` data. .. ipython:: python - pd.Series(pd.period_range('1/1/2011', freq='M', periods=3)) + pd.Series(pd.period_range("1/1/2011", freq="M", periods=3)) pd.Series([pd.DateOffset(1), pd.DateOffset(2)]) - pd.Series(pd.date_range('1/1/2011', freq='M', periods=3)) + pd.Series(pd.date_range("1/1/2011", freq="M", periods=3)) Lastly, pandas represents null date times, time deltas, and time spans as ``NaT`` which is useful for representing missing or null date like values and behaves similar @@ -132,7 +133,7 @@ time. .. ipython:: python pd.Timestamp(datetime.datetime(2012, 5, 1)) - pd.Timestamp('2012-05-01') + pd.Timestamp("2012-05-01") pd.Timestamp(2012, 5, 1) However, in many cases it is more natural to associate things like change @@ -143,9 +144,9 @@ For example: .. ipython:: python - pd.Period('2011-01') + pd.Period("2011-01") - pd.Period('2012-05', freq='D') + pd.Period("2012-05", freq="D") :class:`Timestamp` and :class:`Period` can serve as an index. Lists of ``Timestamp`` and ``Period`` are automatically coerced to :class:`DatetimeIndex` @@ -153,9 +154,11 @@ and :class:`PeriodIndex` respectively. .. ipython:: python - dates = [pd.Timestamp('2012-05-01'), - pd.Timestamp('2012-05-02'), - pd.Timestamp('2012-05-03')] + dates = [ + pd.Timestamp("2012-05-01"), + pd.Timestamp("2012-05-02"), + pd.Timestamp("2012-05-03"), + ] ts = pd.Series(np.random.randn(3), dates) type(ts.index) @@ -163,7 +166,7 @@ and :class:`PeriodIndex` respectively. ts - periods = [pd.Period('2012-01'), pd.Period('2012-02'), pd.Period('2012-03')] + periods = [pd.Period("2012-01"), pd.Period("2012-02"), pd.Period("2012-03")] ts = pd.Series(np.random.randn(3), periods) @@ -193,18 +196,18 @@ is converted to a ``DatetimeIndex``: .. ipython:: python - pd.to_datetime(pd.Series(['Jul 31, 2009', '2010-01-10', None])) + pd.to_datetime(pd.Series(["Jul 31, 2009", "2010-01-10", None])) - pd.to_datetime(['2005/11/23', '2010.12.31']) + pd.to_datetime(["2005/11/23", "2010.12.31"]) If you use dates which start with the day first (i.e. European style), you can pass the ``dayfirst`` flag: .. ipython:: python - pd.to_datetime(['04-01-2012 10:00'], dayfirst=True) + pd.to_datetime(["04-01-2012 10:00"], dayfirst=True) - pd.to_datetime(['14-01-2012', '01-14-2012'], dayfirst=True) + pd.to_datetime(["14-01-2012", "01-14-2012"], dayfirst=True) .. warning:: @@ -218,22 +221,22 @@ options like ``dayfirst`` or ``format``, so use ``to_datetime`` if these are req .. ipython:: python - pd.to_datetime('2010/11/12') + pd.to_datetime("2010/11/12") - pd.Timestamp('2010/11/12') + pd.Timestamp("2010/11/12") You can also use the ``DatetimeIndex`` constructor directly: .. ipython:: python - pd.DatetimeIndex(['2018-01-01', '2018-01-03', '2018-01-05']) + pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"]) The string 'infer' can be passed in order to set the frequency of the index as the inferred frequency upon creation: .. ipython:: python - pd.DatetimeIndex(['2018-01-01', '2018-01-03', '2018-01-05'], freq='infer') + pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"], freq="infer") .. _timeseries.converting.format: @@ -245,9 +248,9 @@ This could also potentially speed up the conversion considerably. .. ipython:: python - pd.to_datetime('2010/11/12', format='%Y/%m/%d') + pd.to_datetime("2010/11/12", format="%Y/%m/%d") - pd.to_datetime('12-11-2010 00:00', format='%d-%m-%Y %H:%M') + pd.to_datetime("12-11-2010 00:00", format="%d-%m-%Y %H:%M") For more information on the choices available when specifying the ``format`` option, see the Python `datetime documentation`_. @@ -261,10 +264,9 @@ You can also pass a ``DataFrame`` of integer or string columns to assemble into .. ipython:: python - df = pd.DataFrame({'year': [2015, 2016], - 'month': [2, 3], - 'day': [4, 5], - 'hour': [2, 3]}) + df = pd.DataFrame( + {"year": [2015, 2016], "month": [2, 3], "day": [4, 5], "hour": [2, 3]} + ) pd.to_datetime(df) @@ -272,7 +274,7 @@ You can pass only the columns that you need to assemble. .. ipython:: python - pd.to_datetime(df[['year', 'month', 'day']]) + pd.to_datetime(df[["year", "month", "day"]]) ``pd.to_datetime`` looks for standard designations of the datetime component in the column names, including: @@ -293,13 +295,13 @@ Pass ``errors='ignore'`` to return the original input when unparsable: .. ipython:: python - pd.to_datetime(['2009/07/31', 'asd'], errors='ignore') + pd.to_datetime(["2009/07/31", "asd"], errors="ignore") Pass ``errors='coerce'`` to convert unparsable data to ``NaT`` (not a time): .. ipython:: python - pd.to_datetime(['2009/07/31', 'asd'], errors='coerce') + pd.to_datetime(["2009/07/31", "asd"], errors="coerce") .. _timeseries.converting.epoch: @@ -315,11 +317,12 @@ which can be specified. These are computed from the starting point specified by .. ipython:: python - pd.to_datetime([1349720105, 1349806505, 1349892905, - 1349979305, 1350065705], unit='s') + pd.to_datetime([1349720105, 1349806505, 1349892905, 1349979305, 1350065705], unit="s") - pd.to_datetime([1349720105100, 1349720105200, 1349720105300, - 1349720105400, 1349720105500], unit='ms') + pd.to_datetime( + [1349720105100, 1349720105200, 1349720105300, 1349720105400, 1349720105500], + unit="ms", + ) .. note:: @@ -336,8 +339,8 @@ as timezone-naive timestamps and then localize to the appropriate timezone: .. ipython:: python - pd.Timestamp(1262347200000000000).tz_localize('US/Pacific') - pd.DatetimeIndex([1262347200000000000]).tz_localize('US/Pacific') + pd.Timestamp(1262347200000000000).tz_localize("US/Pacific") + pd.DatetimeIndex([1262347200000000000]).tz_localize("US/Pacific") .. note:: @@ -353,8 +356,8 @@ as timezone-naive timestamps and then localize to the appropriate timezone: .. ipython:: python - pd.to_datetime([1490195805.433, 1490195805.433502912], unit='s') - pd.to_datetime(1490195805433502912, unit='ns') + pd.to_datetime([1490195805.433, 1490195805.433502912], unit="s") + pd.to_datetime(1490195805433502912, unit="ns") .. seealso:: @@ -369,7 +372,7 @@ To invert the operation from above, namely, to convert from a ``Timestamp`` to a .. ipython:: python - stamps = pd.date_range('2012-10-08 18:15:05', periods=4, freq='D') + stamps = pd.date_range("2012-10-08 18:15:05", periods=4, freq="D") stamps We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by the @@ -377,7 +380,7 @@ We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by .. ipython:: python - (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s') + (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s") .. _timeseries.origin: @@ -389,14 +392,14 @@ of a ``DatetimeIndex``. For example, to use 1960-01-01 as the starting date: .. ipython:: python - pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01')) + pd.to_datetime([1, 2, 3], unit="D", origin=pd.Timestamp("1960-01-01")) The default is set at ``origin='unix'``, which defaults to ``1970-01-01 00:00:00``. Commonly called 'unix epoch' or POSIX time. .. ipython:: python - pd.to_datetime([1, 2, 3], unit='D') + pd.to_datetime([1, 2, 3], unit="D") .. _timeseries.daterange: @@ -408,9 +411,11 @@ To generate an index with timestamps, you can use either the ``DatetimeIndex`` o .. ipython:: python - dates = [datetime.datetime(2012, 5, 1), - datetime.datetime(2012, 5, 2), - datetime.datetime(2012, 5, 3)] + dates = [ + datetime.datetime(2012, 5, 1), + datetime.datetime(2012, 5, 2), + datetime.datetime(2012, 5, 3), + ] # Note the frequency information index = pd.DatetimeIndex(dates) @@ -442,9 +447,9 @@ variety of :ref:`frequency aliases `: .. ipython:: python - pd.date_range(start, periods=1000, freq='M') + pd.date_range(start, periods=1000, freq="M") - pd.bdate_range(start, periods=250, freq='BQS') + pd.bdate_range(start, periods=250, freq="BQS") ``date_range`` and ``bdate_range`` make it easy to generate a range of dates using various combinations of parameters like ``start``, ``end``, ``periods``, @@ -453,9 +458,9 @@ of those specified will not be generated: .. ipython:: python - pd.date_range(start, end, freq='BM') + pd.date_range(start, end, freq="BM") - pd.date_range(start, end, freq='W') + pd.date_range(start, end, freq="W") pd.bdate_range(end=end, periods=20) @@ -467,9 +472,9 @@ resulting ``DatetimeIndex``: .. ipython:: python - pd.date_range('2018-01-01', '2018-01-05', periods=5) + pd.date_range("2018-01-01", "2018-01-05", periods=5) - pd.date_range('2018-01-01', '2018-01-05', periods=10) + pd.date_range("2018-01-01", "2018-01-05", periods=10) .. _timeseries.custom-freq-ranges: @@ -482,13 +487,13 @@ used if a custom frequency string is passed. .. ipython:: python - weekmask = 'Mon Wed Fri' + weekmask = "Mon Wed Fri" holidays = [datetime.datetime(2011, 1, 5), datetime.datetime(2011, 3, 14)] - pd.bdate_range(start, end, freq='C', weekmask=weekmask, holidays=holidays) + pd.bdate_range(start, end, freq="C", weekmask=weekmask, holidays=holidays) - pd.bdate_range(start, end, freq='CBMS', weekmask=weekmask) + pd.bdate_range(start, end, freq="CBMS", weekmask=weekmask) .. seealso:: @@ -545,7 +550,7 @@ intelligent functionality like selection, slicing, etc. .. ipython:: python - rng = pd.date_range(start, end, freq='BM') + rng = pd.date_range(start, end, freq="BM") ts = pd.Series(np.random.randn(len(rng)), index=rng) ts.index ts[:5].index @@ -560,20 +565,20 @@ Dates and strings that parse to timestamps can be passed as indexing parameters: .. ipython:: python - ts['1/31/2011'] + ts["1/31/2011"] - ts[datetime.datetime(2011, 12, 25):] + ts[datetime.datetime(2011, 12, 25) :] - ts['10/31/2011':'12/31/2011'] + ts["10/31/2011":"12/31/2011"] To provide convenience for accessing longer time series, you can also pass in the year or year and month as strings: .. ipython:: python - ts['2011'] + ts["2011"] - ts['2011-6'] + ts["2011-6"] This type of slicing will work on a ``DataFrame`` with a ``DatetimeIndex`` as well. Since the partial string selection is a form of label slicing, the endpoints **will be** included. This @@ -586,10 +591,13 @@ would include matching times on an included date: .. ipython:: python :okwarning: - dft = pd.DataFrame(np.random.randn(100000, 1), columns=['A'], - index=pd.date_range('20130101', periods=100000, freq='T')) + dft = pd.DataFrame( + np.random.randn(100000, 1), + columns=["A"], + index=pd.date_range("20130101", periods=100000, freq="T"), + ) dft - dft['2013'] + dft["2013"] This starts on the very first time in the month, and includes the last date and time for the month: @@ -597,43 +605,45 @@ time for the month: .. ipython:: python :okwarning: - dft['2013-1':'2013-2'] + dft["2013-1":"2013-2"] This specifies a stop time **that includes all of the times on the last day**: .. ipython:: python :okwarning: - dft['2013-1':'2013-2-28'] + dft["2013-1":"2013-2-28"] This specifies an **exact** stop time (and is not the same as the above): .. ipython:: python :okwarning: - dft['2013-1':'2013-2-28 00:00:00'] + dft["2013-1":"2013-2-28 00:00:00"] We are stopping on the included end-point as it is part of the index: .. ipython:: python :okwarning: - dft['2013-1-15':'2013-1-15 12:30:00'] + dft["2013-1-15":"2013-1-15 12:30:00"] ``DatetimeIndex`` partial string indexing also works on a ``DataFrame`` with a ``MultiIndex``: .. ipython:: python - dft2 = pd.DataFrame(np.random.randn(20, 1), - columns=['A'], - index=pd.MultiIndex.from_product( - [pd.date_range('20130101', periods=10, freq='12H'), - ['a', 'b']])) + dft2 = pd.DataFrame( + np.random.randn(20, 1), + columns=["A"], + index=pd.MultiIndex.from_product( + [pd.date_range("20130101", periods=10, freq="12H"), ["a", "b"]] + ), + ) dft2 - dft2.loc['2013-01-05'] + dft2.loc["2013-01-05"] idx = pd.IndexSlice dft2 = dft2.swaplevel(0, 1).sort_index() - dft2.loc[idx[:, '2013-01-05'], :] + dft2.loc[idx[:, "2013-01-05"], :] .. versionadded:: 0.25.0 @@ -642,9 +652,9 @@ Slicing with string indexing also honors UTC offset. .. ipython:: python :okwarning: - df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific')) + df = pd.DataFrame([0], index=pd.DatetimeIndex(["2019-01-01"], tz="US/Pacific")) df - df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00'] + df["2019-01-01 12:00:00+04:00":"2019-01-01 13:00:00+04:00"] .. _timeseries.slice_vs_exact_match: @@ -657,45 +667,48 @@ Consider a ``Series`` object with a minute resolution index: .. ipython:: python - series_minute = pd.Series([1, 2, 3], - pd.DatetimeIndex(['2011-12-31 23:59:00', - '2012-01-01 00:00:00', - '2012-01-01 00:02:00'])) + series_minute = pd.Series( + [1, 2, 3], + pd.DatetimeIndex( + ["2011-12-31 23:59:00", "2012-01-01 00:00:00", "2012-01-01 00:02:00"] + ), + ) series_minute.index.resolution A timestamp string less accurate than a minute gives a ``Series`` object. .. ipython:: python - series_minute['2011-12-31 23'] + series_minute["2011-12-31 23"] A timestamp string with minute resolution (or more accurate), gives a scalar instead, i.e. it is not casted to a slice. .. ipython:: python - series_minute['2011-12-31 23:59'] - series_minute['2011-12-31 23:59:00'] + series_minute["2011-12-31 23:59"] + series_minute["2011-12-31 23:59:00"] If index resolution is second, then the minute-accurate timestamp gives a ``Series``. .. ipython:: python - series_second = pd.Series([1, 2, 3], - pd.DatetimeIndex(['2011-12-31 23:59:59', - '2012-01-01 00:00:00', - '2012-01-01 00:00:01'])) + series_second = pd.Series( + [1, 2, 3], + pd.DatetimeIndex( + ["2011-12-31 23:59:59", "2012-01-01 00:00:00", "2012-01-01 00:00:01"] + ), + ) series_second.index.resolution - series_second['2011-12-31 23:59'] + series_second["2011-12-31 23:59"] If the timestamp string is treated as a slice, it can be used to index ``DataFrame`` with ``[]`` as well. .. ipython:: python :okwarning: - dft_minute = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, - index=series_minute.index) - dft_minute['2011-12-31 23'] + dft_minute = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=series_minute.index) + dft_minute["2011-12-31 23"] .. warning:: @@ -706,16 +719,17 @@ If the timestamp string is treated as a slice, it can be used to index ``DataFra .. ipython:: python - dft_minute.loc['2011-12-31 23:59'] + dft_minute.loc["2011-12-31 23:59"] Note also that ``DatetimeIndex`` resolution cannot be less precise than day. .. ipython:: python - series_monthly = pd.Series([1, 2, 3], - pd.DatetimeIndex(['2011-12', '2012-01', '2012-02'])) + series_monthly = pd.Series( + [1, 2, 3], pd.DatetimeIndex(["2011-12", "2012-01", "2012-02"]) + ) series_monthly.index.resolution - series_monthly['2011-12'] # returns Series + series_monthly["2011-12"] # returns Series Exact indexing @@ -727,14 +741,15 @@ These ``Timestamp`` and ``datetime`` objects have exact ``hours, minutes,`` and .. ipython:: python - dft[datetime.datetime(2013, 1, 1):datetime.datetime(2013, 2, 28)] + dft[datetime.datetime(2013, 1, 1) : datetime.datetime(2013, 2, 28)] With no defaults. .. ipython:: python - dft[datetime.datetime(2013, 1, 1, 10, 12, 0): - datetime.datetime(2013, 2, 28, 10, 12, 0)] + dft[ + datetime.datetime(2013, 1, 1, 10, 12, 0) : datetime.datetime(2013, 2, 28, 10, 12, 0) + ] Truncating & fancy indexing @@ -747,11 +762,11 @@ partially matching dates: .. ipython:: python - rng2 = pd.date_range('2011-01-01', '2012-01-01', freq='W') + rng2 = pd.date_range("2011-01-01", "2012-01-01", freq="W") ts2 = pd.Series(np.random.randn(len(rng2)), index=rng2) - ts2.truncate(before='2011-11', after='2011-12') - ts2['2011-11':'2011-12'] + ts2.truncate(before="2011-11", after="2011-12") + ts2["2011-11":"2011-12"] Even complicated fancy indexing that breaks the ``DatetimeIndex`` frequency regularity will result in a ``DatetimeIndex``, although frequency is lost: @@ -807,7 +822,7 @@ You may obtain the year, week and day components of the ISO year from the ISO 86 .. ipython:: python - idx = pd.date_range(start='2019-12-29', freq='D', periods=4) + idx = pd.date_range(start="2019-12-29", freq="D", periods=4) idx.isocalendar() idx.to_series().dt.isocalendar() @@ -837,12 +852,12 @@ arithmetic operator (``+``) or the ``apply`` method can be used to perform the s .. ipython:: python # This particular day contains a day light savings time transition - ts = pd.Timestamp('2016-10-30 00:00:00', tz='Europe/Helsinki') + ts = pd.Timestamp("2016-10-30 00:00:00", tz="Europe/Helsinki") # Respects absolute time ts + pd.Timedelta(days=1) # Respects calendar time ts + pd.DateOffset(days=1) - friday = pd.Timestamp('2018-01-05') + friday = pd.Timestamp("2018-01-05") friday.day_name() # Add 2 business days (Friday --> Tuesday) two_business_days = 2 * pd.offsets.BDay() @@ -900,10 +915,10 @@ business offsets operate on the weekdays. .. ipython:: python - ts = pd.Timestamp('2018-01-06 00:00:00') + ts = pd.Timestamp("2018-01-06 00:00:00") ts.day_name() # BusinessHour's valid offset dates are Monday through Friday - offset = pd.offsets.BusinessHour(start='09:00') + offset = pd.offsets.BusinessHour(start="09:00") # Bring the date to the closest offset date (Monday) offset.rollforward(ts) # Date is brought to the closest offset date first and then the hour is added @@ -916,12 +931,12 @@ in the operation). .. ipython:: python - ts = pd.Timestamp('2014-01-01 09:00') + ts = pd.Timestamp("2014-01-01 09:00") day = pd.offsets.Day() day.apply(ts) day.apply(ts).normalize() - ts = pd.Timestamp('2014-01-01 22:00') + ts = pd.Timestamp("2014-01-01 22:00") hour = pd.offsets.Hour() hour.apply(ts) hour.apply(ts).normalize() @@ -974,7 +989,7 @@ apply the offset to each element. .. ipython:: python - rng = pd.date_range('2012-01-01', '2012-01-03') + rng = pd.date_range("2012-01-01", "2012-01-03") s = pd.Series(rng) rng rng + pd.DateOffset(months=2) @@ -989,7 +1004,7 @@ used exactly like a ``Timedelta`` - see the .. ipython:: python s - pd.offsets.Day(2) - td = s - pd.Series(pd.date_range('2011-12-29', '2011-12-31')) + td = s - pd.Series(pd.date_range("2011-12-29", "2011-12-31")) td td + pd.offsets.Minute(15) @@ -1016,16 +1031,13 @@ As an interesting example, let's look at Egypt where a Friday-Saturday weekend i .. ipython:: python - weekmask_egypt = 'Sun Mon Tue Wed Thu' + weekmask_egypt = "Sun Mon Tue Wed Thu" # They also observe International Workers' Day so let's # add that for a couple of years - holidays = ['2012-05-01', - datetime.datetime(2013, 5, 1), - np.datetime64('2014-05-01')] - bday_egypt = pd.offsets.CustomBusinessDay(holidays=holidays, - weekmask=weekmask_egypt) + holidays = ["2012-05-01", datetime.datetime(2013, 5, 1), np.datetime64("2014-05-01")] + bday_egypt = pd.offsets.CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt) dt = datetime.datetime(2013, 4, 30) dt + 2 * bday_egypt @@ -1035,8 +1047,7 @@ Let's map to the weekday names: dts = pd.date_range(dt, periods=5, freq=bday_egypt) - pd.Series(dts.weekday, dts).map( - pd.Series('Mon Tue Wed Thu Fri Sat Sun'.split())) + pd.Series(dts.weekday, dts).map(pd.Series("Mon Tue Wed Thu Fri Sat Sun".split())) Holiday calendars can be used to provide the list of holidays. See the :ref:`holiday calendar` section for more information. @@ -1058,15 +1069,14 @@ in the usual way. .. ipython:: python - bmth_us = pd.offsets.CustomBusinessMonthBegin( - calendar=USFederalHolidayCalendar()) + bmth_us = pd.offsets.CustomBusinessMonthBegin(calendar=USFederalHolidayCalendar()) # Skip new years dt = datetime.datetime(2013, 12, 17) dt + bmth_us # Define date index with custom offset - pd.date_range(start='20100101', end='20120101', freq=bmth_us) + pd.date_range(start="20100101", end="20120101", freq=bmth_us) .. note:: @@ -1097,23 +1107,23 @@ hours are added to the next business day. bh # 2014-08-01 is Friday - pd.Timestamp('2014-08-01 10:00').weekday() - pd.Timestamp('2014-08-01 10:00') + bh + pd.Timestamp("2014-08-01 10:00").weekday() + pd.Timestamp("2014-08-01 10:00") + bh # Below example is the same as: pd.Timestamp('2014-08-01 09:00') + bh - pd.Timestamp('2014-08-01 08:00') + bh + pd.Timestamp("2014-08-01 08:00") + bh # If the results is on the end time, move to the next business day - pd.Timestamp('2014-08-01 16:00') + bh + pd.Timestamp("2014-08-01 16:00") + bh # Remainings are added to the next day - pd.Timestamp('2014-08-01 16:30') + bh + pd.Timestamp("2014-08-01 16:30") + bh # Adding 2 business hours - pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(2) + pd.Timestamp("2014-08-01 10:00") + pd.offsets.BusinessHour(2) # Subtracting 3 business hours - pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(-3) + pd.Timestamp("2014-08-01 10:00") + pd.offsets.BusinessHour(-3) You can also specify ``start`` and ``end`` time by keywords. The argument must be a ``str`` with an ``hour:minute`` representation or a ``datetime.time`` @@ -1122,12 +1132,12 @@ results in ``ValueError``. .. ipython:: python - bh = pd.offsets.BusinessHour(start='11:00', end=datetime.time(20, 0)) + bh = pd.offsets.BusinessHour(start="11:00", end=datetime.time(20, 0)) bh - pd.Timestamp('2014-08-01 13:00') + bh - pd.Timestamp('2014-08-01 09:00') + bh - pd.Timestamp('2014-08-01 18:00') + bh + pd.Timestamp("2014-08-01 13:00") + bh + pd.Timestamp("2014-08-01 09:00") + bh + pd.Timestamp("2014-08-01 18:00") + bh Passing ``start`` time later than ``end`` represents midnight business hour. In this case, business hour exceeds midnight and overlap to the next day. @@ -1135,19 +1145,19 @@ Valid business hours are distinguished by whether it started from valid ``Busine .. ipython:: python - bh = pd.offsets.BusinessHour(start='17:00', end='09:00') + bh = pd.offsets.BusinessHour(start="17:00", end="09:00") bh - pd.Timestamp('2014-08-01 17:00') + bh - pd.Timestamp('2014-08-01 23:00') + bh + pd.Timestamp("2014-08-01 17:00") + bh + pd.Timestamp("2014-08-01 23:00") + bh # Although 2014-08-02 is Saturday, # it is valid because it starts from 08-01 (Friday). - pd.Timestamp('2014-08-02 04:00') + bh + pd.Timestamp("2014-08-02 04:00") + bh # Although 2014-08-04 is Monday, # it is out of business hours because it starts from 08-03 (Sunday). - pd.Timestamp('2014-08-04 04:00') + bh + pd.Timestamp("2014-08-04 04:00") + bh Applying ``BusinessHour.rollforward`` and ``rollback`` to out of business hours results in the next business hour start or previous day's end. Different from other offsets, ``BusinessHour.rollforward`` @@ -1160,19 +1170,19 @@ under the default business hours (9:00 - 17:00), there is no gap (0 minutes) bet .. ipython:: python # This adjusts a Timestamp to business hour edge - pd.offsets.BusinessHour().rollback(pd.Timestamp('2014-08-02 15:00')) - pd.offsets.BusinessHour().rollforward(pd.Timestamp('2014-08-02 15:00')) + pd.offsets.BusinessHour().rollback(pd.Timestamp("2014-08-02 15:00")) + pd.offsets.BusinessHour().rollforward(pd.Timestamp("2014-08-02 15:00")) # It is the same as BusinessHour().apply(pd.Timestamp('2014-08-01 17:00')). # And it is the same as BusinessHour().apply(pd.Timestamp('2014-08-04 09:00')) - pd.offsets.BusinessHour().apply(pd.Timestamp('2014-08-02 15:00')) + pd.offsets.BusinessHour().apply(pd.Timestamp("2014-08-02 15:00")) # BusinessDay results (for reference) - pd.offsets.BusinessHour().rollforward(pd.Timestamp('2014-08-02')) + pd.offsets.BusinessHour().rollforward(pd.Timestamp("2014-08-02")) # It is the same as BusinessDay().apply(pd.Timestamp('2014-08-01')) # The result is the same as rollworward because BusinessDay never overlap. - pd.offsets.BusinessHour().apply(pd.Timestamp('2014-08-02')) + pd.offsets.BusinessHour().apply(pd.Timestamp("2014-08-02")) ``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary holidays, you can use ``CustomBusinessHour`` offset, as explained in the @@ -1190,6 +1200,7 @@ as ``BusinessHour`` except that it skips specified custom holidays. .. ipython:: python from pandas.tseries.holiday import USFederalHolidayCalendar + bhour_us = pd.offsets.CustomBusinessHour(calendar=USFederalHolidayCalendar()) # Friday before MLK Day dt = datetime.datetime(2014, 1, 17, 15) @@ -1203,8 +1214,7 @@ You can use keyword arguments supported by either ``BusinessHour`` and ``CustomB .. ipython:: python - bhour_mon = pd.offsets.CustomBusinessHour(start='10:00', - weekmask='Tue Wed Thu Fri') + bhour_mon = pd.offsets.CustomBusinessHour(start="10:00", weekmask="Tue Wed Thu Fri") # Monday is skipped because it's a holiday, business hour starts from 10:00 dt + bhour_mon * 2 @@ -1257,7 +1267,7 @@ most functions: .. ipython:: python - pd.date_range(start, periods=5, freq='B') + pd.date_range(start, periods=5, freq="B") pd.date_range(start, periods=5, freq=pd.offsets.BDay()) @@ -1265,9 +1275,9 @@ You can combine together day and intraday offsets: .. ipython:: python - pd.date_range(start, periods=10, freq='2h20min') + pd.date_range(start, periods=10, freq="2h20min") - pd.date_range(start, periods=10, freq='1D10U') + pd.date_range(start, periods=10, freq="1D10U") Anchored offsets ~~~~~~~~~~~~~~~~ @@ -1326,39 +1336,39 @@ anchor point, and moved ``|n|-1`` additional steps forwards or backwards. .. ipython:: python - pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=1) - pd.Timestamp('2014-01-02') + pd.offsets.MonthEnd(n=1) + pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=1) + pd.Timestamp("2014-01-02") + pd.offsets.MonthEnd(n=1) - pd.Timestamp('2014-01-02') - pd.offsets.MonthBegin(n=1) - pd.Timestamp('2014-01-02') - pd.offsets.MonthEnd(n=1) + pd.Timestamp("2014-01-02") - pd.offsets.MonthBegin(n=1) + pd.Timestamp("2014-01-02") - pd.offsets.MonthEnd(n=1) - pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=4) - pd.Timestamp('2014-01-02') - pd.offsets.MonthBegin(n=4) + pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=4) + pd.Timestamp("2014-01-02") - pd.offsets.MonthBegin(n=4) If the given date *is* on an anchor point, it is moved ``|n|`` points forwards or backwards. .. ipython:: python - pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=1) - pd.Timestamp('2014-01-31') + pd.offsets.MonthEnd(n=1) + pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=1) + pd.Timestamp("2014-01-31") + pd.offsets.MonthEnd(n=1) - pd.Timestamp('2014-01-01') - pd.offsets.MonthBegin(n=1) - pd.Timestamp('2014-01-31') - pd.offsets.MonthEnd(n=1) + pd.Timestamp("2014-01-01") - pd.offsets.MonthBegin(n=1) + pd.Timestamp("2014-01-31") - pd.offsets.MonthEnd(n=1) - pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=4) - pd.Timestamp('2014-01-31') - pd.offsets.MonthBegin(n=4) + pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=4) + pd.Timestamp("2014-01-31") - pd.offsets.MonthBegin(n=4) For the case when ``n=0``, the date is not moved if on an anchor point, otherwise it is rolled forward to the next anchor point. .. ipython:: python - pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=0) - pd.Timestamp('2014-01-02') + pd.offsets.MonthEnd(n=0) + pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=0) + pd.Timestamp("2014-01-02") + pd.offsets.MonthEnd(n=0) - pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=0) - pd.Timestamp('2014-01-31') + pd.offsets.MonthEnd(n=0) + pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=0) + pd.Timestamp("2014-01-31") + pd.offsets.MonthEnd(n=0) .. _timeseries.holiday: @@ -1394,14 +1404,22 @@ An example of how holidays and holiday calendars are defined: .. ipython:: python - from pandas.tseries.holiday import Holiday, USMemorialDay,\ - AbstractHolidayCalendar, nearest_workday, MO + from pandas.tseries.holiday import ( + Holiday, + USMemorialDay, + AbstractHolidayCalendar, + nearest_workday, + MO, + ) + + class ExampleCalendar(AbstractHolidayCalendar): rules = [ USMemorialDay, - Holiday('July 4th', month=7, day=4, observance=nearest_workday), - Holiday('Columbus Day', month=10, day=1, - offset=pd.DateOffset(weekday=MO(2)))] + Holiday("July 4th", month=7, day=4, observance=nearest_workday), + Holiday("Columbus Day", month=10, day=1, offset=pd.DateOffset(weekday=MO(2))), + ] + cal = ExampleCalendar() cal.holidays(datetime.datetime(2012, 1, 1), datetime.datetime(2012, 12, 31)) @@ -1417,8 +1435,9 @@ or ``Timestamp`` objects. .. ipython:: python - pd.date_range(start='7/1/2012', end='7/10/2012', - freq=pd.offsets.CDay(calendar=cal)).to_pydatetime() + pd.date_range( + start="7/1/2012", end="7/10/2012", freq=pd.offsets.CDay(calendar=cal) + ).to_pydatetime() offset = pd.offsets.CustomBusinessDay(calendar=cal) datetime.datetime(2012, 5, 25) + offset datetime.datetime(2012, 7, 3) + offset @@ -1450,11 +1469,11 @@ or calendars with additional rules. .. ipython:: python - from pandas.tseries.holiday import get_calendar, HolidayCalendarFactory,\ - USLaborDay - cal = get_calendar('ExampleCalendar') + from pandas.tseries.holiday import get_calendar, HolidayCalendarFactory, USLaborDay + + cal = get_calendar("ExampleCalendar") cal.rules - new_cal = HolidayCalendarFactory('NewExampleCalendar', cal, USLaborDay) + new_cal = HolidayCalendarFactory("NewExampleCalendar", cal, USLaborDay) new_cal.rules .. _timeseries.advanced_datetime: @@ -1484,9 +1503,9 @@ rather than changing the alignment of the data and the index: .. ipython:: python - ts.shift(5, freq='D') + ts.shift(5, freq="D") ts.shift(5, freq=pd.offsets.BDay()) - ts.shift(5, freq='BM') + ts.shift(5, freq="BM") Note that with when ``freq`` is specified, the leading entry is no longer NaN because the data is not being realigned. @@ -1501,7 +1520,7 @@ calls ``reindex``. .. ipython:: python - dr = pd.date_range('1/1/2010', periods=3, freq=3 * pd.offsets.BDay()) + dr = pd.date_range("1/1/2010", periods=3, freq=3 * pd.offsets.BDay()) ts = pd.Series(np.random.randn(3), index=dr) ts ts.asfreq(pd.offsets.BDay()) @@ -1511,7 +1530,7 @@ method for any gaps that may appear after the frequency conversion. .. ipython:: python - ts.asfreq(pd.offsets.BDay(), method='pad') + ts.asfreq(pd.offsets.BDay(), method="pad") Filling forward / backward ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1552,11 +1571,11 @@ Basics .. ipython:: python - rng = pd.date_range('1/1/2012', periods=100, freq='S') + rng = pd.date_range("1/1/2012", periods=100, freq="S") ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng) - ts.resample('5Min').sum() + ts.resample("5Min").sum() The ``resample`` function is very flexible and allows you to specify many different parameters to control the frequency conversion and resampling @@ -1568,11 +1587,11 @@ a method of the returned object, including ``sum``, ``mean``, ``std``, ``sem``, .. ipython:: python - ts.resample('5Min').mean() + ts.resample("5Min").mean() - ts.resample('5Min').ohlc() + ts.resample("5Min").ohlc() - ts.resample('5Min').max() + ts.resample("5Min").max() For downsampling, ``closed`` can be set to 'left' or 'right' to specify which @@ -1580,9 +1599,9 @@ end of the interval is closed: .. ipython:: python - ts.resample('5Min', closed='right').mean() + ts.resample("5Min", closed="right").mean() - ts.resample('5Min', closed='left').mean() + ts.resample("5Min", closed="left").mean() Parameters like ``label`` are used to manipulate the resulting labels. ``label`` specifies whether the result is labeled with the beginning or @@ -1590,9 +1609,9 @@ the end of the interval. .. ipython:: python - ts.resample('5Min').mean() # by default label='left' + ts.resample("5Min").mean() # by default label='left' - ts.resample('5Min', label='left').mean() + ts.resample("5Min", label="left").mean() .. warning:: @@ -1606,12 +1625,12 @@ the end of the interval. .. ipython:: python - s = pd.date_range('2000-01-01', '2000-01-05').to_series() + s = pd.date_range("2000-01-01", "2000-01-05").to_series() s.iloc[2] = pd.NaT s.dt.day_name() # default: label='left', closed='left' - s.resample('B').last().dt.day_name() + s.resample("B").last().dt.day_name() Notice how the value for Sunday got pulled back to the previous Friday. To get the behavior where the value for Sunday is pushed to Monday, use @@ -1619,7 +1638,7 @@ the end of the interval. .. ipython:: python - s.resample('B', label='right', closed='right').last().dt.day_name() + s.resample("B", label="right", closed="right").last().dt.day_name() The ``axis`` parameter can be set to 0 or 1 and allows you to resample the specified axis for a ``DataFrame``. @@ -1642,11 +1661,11 @@ For upsampling, you can specify a way to upsample and the ``limit`` parameter to # from secondly to every 250 milliseconds - ts[:2].resample('250L').asfreq() + ts[:2].resample("250L").asfreq() - ts[:2].resample('250L').ffill() + ts[:2].resample("250L").ffill() - ts[:2].resample('250L').ffill(limit=2) + ts[:2].resample("250L").ffill(limit=2) Sparse resampling ~~~~~~~~~~~~~~~~~ @@ -1662,14 +1681,14 @@ resample only the groups that are not all ``NaN``. .. ipython:: python - rng = pd.date_range('2014-1-1', periods=100, freq='D') + pd.Timedelta('1s') + rng = pd.date_range("2014-1-1", periods=100, freq="D") + pd.Timedelta("1s") ts = pd.Series(range(100), index=rng) If we want to resample to the full range of the series: .. ipython:: python - ts.resample('3T').sum() + ts.resample("3T").sum() We can instead only resample those groups where we have points as follows: @@ -1678,12 +1697,14 @@ We can instead only resample those groups where we have points as follows: from functools import partial from pandas.tseries.frequencies import to_offset + def round(t, freq): # round a Timestamp to a specified freq freq = to_offset(freq) return pd.Timestamp((t.value // freq.delta.value) * freq.delta.value) - ts.groupby(partial(round, freq='3T')).sum() + + ts.groupby(partial(round, freq="3T")).sum() .. _timeseries.aggregate: @@ -1697,25 +1718,27 @@ Resampling a ``DataFrame``, the default will be to act on all columns with the s .. ipython:: python - df = pd.DataFrame(np.random.randn(1000, 3), - index=pd.date_range('1/1/2012', freq='S', periods=1000), - columns=['A', 'B', 'C']) - r = df.resample('3T') + df = pd.DataFrame( + np.random.randn(1000, 3), + index=pd.date_range("1/1/2012", freq="S", periods=1000), + columns=["A", "B", "C"], + ) + r = df.resample("3T") r.mean() We can select a specific column or columns using standard getitem. .. ipython:: python - r['A'].mean() + r["A"].mean() - r[['A', 'B']].mean() + r[["A", "B"]].mean() You can pass a list or dict of functions to do aggregation with, outputting a ``DataFrame``: .. ipython:: python - r['A'].agg([np.sum, np.mean, np.std]) + r["A"].agg([np.sum, np.mean, np.std]) On a resampled ``DataFrame``, you can pass a list of functions to apply to each column, which produces an aggregated result with a hierarchical index: @@ -1730,21 +1753,20 @@ columns of a ``DataFrame``: .. ipython:: python :okexcept: - r.agg({'A': np.sum, - 'B': lambda x: np.std(x, ddof=1)}) + r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)}) The function names can also be strings. In order for a string to be valid it must be implemented on the resampled object: .. ipython:: python - r.agg({'A': 'sum', 'B': 'std'}) + r.agg({"A": "sum", "B": "std"}) Furthermore, you can also specify multiple aggregation functions for each column separately. .. ipython:: python - r.agg({'A': ['sum', 'std'], 'B': ['mean', 'std']}) + r.agg({"A": ["sum", "std"], "B": ["mean", "std"]}) If a ``DataFrame`` does not have a datetimelike index, but instead you want @@ -1753,14 +1775,15 @@ to resample based on datetimelike column in the frame, it can passed to the .. ipython:: python - df = pd.DataFrame({'date': pd.date_range('2015-01-01', freq='W', periods=5), - 'a': np.arange(5)}, - index=pd.MultiIndex.from_arrays([ - [1, 2, 3, 4, 5], - pd.date_range('2015-01-01', freq='W', periods=5)], - names=['v', 'd'])) + df = pd.DataFrame( + {"date": pd.date_range("2015-01-01", freq="W", periods=5), "a": np.arange(5)}, + index=pd.MultiIndex.from_arrays( + [[1, 2, 3, 4, 5], pd.date_range("2015-01-01", freq="W", periods=5)], + names=["v", "d"], + ), + ) df - df.resample('M', on='date').sum() + df.resample("M", on="date").sum() Similarly, if you instead want to resample by a datetimelike level of ``MultiIndex``, its name or location can be passed to the @@ -1768,7 +1791,7 @@ level of ``MultiIndex``, its name or location can be passed to the .. ipython:: python - df.resample('M', level='d').sum() + df.resample("M", level="d").sum() .. _timeseries.iterating-label: @@ -1782,14 +1805,18 @@ natural and functions similarly to :py:func:`itertools.groupby`: small = pd.Series( range(6), - index=pd.to_datetime(['2017-01-01T00:00:00', - '2017-01-01T00:30:00', - '2017-01-01T00:31:00', - '2017-01-01T01:00:00', - '2017-01-01T03:00:00', - '2017-01-01T03:05:00']) + index=pd.to_datetime( + [ + "2017-01-01T00:00:00", + "2017-01-01T00:30:00", + "2017-01-01T00:31:00", + "2017-01-01T01:00:00", + "2017-01-01T03:00:00", + "2017-01-01T03:05:00", + ] + ), ) - resampled = small.resample('H') + resampled = small.resample("H") for name, group in resampled: print("Group: ", name) @@ -1811,9 +1838,9 @@ For example: .. ipython:: python - start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' - middle = '2000-10-02 00:00:00' - rng = pd.date_range(start, end, freq='7min') + start, end = "2000-10-01 23:30:00", "2000-10-02 00:30:00" + middle = "2000-10-02 00:00:00" + rng = pd.date_range(start, end, freq="7min") ts = pd.Series(np.arange(len(rng)) * 3, index=rng) ts @@ -1821,32 +1848,32 @@ Here we can see that, when using ``origin`` with its default value (``'start_day .. ipython:: python - ts.resample('17min', origin='start_day').sum() - ts[middle:end].resample('17min', origin='start_day').sum() + ts.resample("17min", origin="start_day").sum() + ts[middle:end].resample("17min", origin="start_day").sum() Here we can see that, when setting ``origin`` to ``'epoch'``, the result after ``'2000-10-02 00:00:00'`` are identical depending on the start of time series: .. ipython:: python - ts.resample('17min', origin='epoch').sum() - ts[middle:end].resample('17min', origin='epoch').sum() + ts.resample("17min", origin="epoch").sum() + ts[middle:end].resample("17min", origin="epoch").sum() If needed you can use a custom timestamp for ``origin``: .. ipython:: python - ts.resample('17min', origin='2001-01-01').sum() - ts[middle:end].resample('17min', origin=pd.Timestamp('2001-01-01')).sum() + ts.resample("17min", origin="2001-01-01").sum() + ts[middle:end].resample("17min", origin=pd.Timestamp("2001-01-01")).sum() If needed you can just adjust the bins with an ``offset`` Timedelta that would be added to the default ``origin``. Those two examples are equivalent for this time series: .. ipython:: python - ts.resample('17min', origin='start').sum() - ts.resample('17min', offset='23h30min').sum() + ts.resample("17min", origin="start").sum() + ts.resample("17min", offset="23h30min").sum() Note the use of ``'start'`` for ``origin`` on the last example. In that case, ``origin`` will be set to the first value of the timeseries. @@ -1869,37 +1896,37 @@ Because ``freq`` represents a span of ``Period``, it cannot be negative like "-3 .. ipython:: python - pd.Period('2012', freq='A-DEC') + pd.Period("2012", freq="A-DEC") - pd.Period('2012-1-1', freq='D') + pd.Period("2012-1-1", freq="D") - pd.Period('2012-1-1 19:00', freq='H') + pd.Period("2012-1-1 19:00", freq="H") - pd.Period('2012-1-1 19:00', freq='5H') + pd.Period("2012-1-1 19:00", freq="5H") Adding and subtracting integers from periods shifts the period by its own frequency. Arithmetic is not allowed between ``Period`` with different ``freq`` (span). .. ipython:: python - p = pd.Period('2012', freq='A-DEC') + p = pd.Period("2012", freq="A-DEC") p + 1 p - 3 - p = pd.Period('2012-01', freq='2M') + p = pd.Period("2012-01", freq="2M") p + 2 p - 1 @okexcept - p == pd.Period('2012-01', freq='3M') + p == pd.Period("2012-01", freq="3M") If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``), ``offsets`` and ``timedelta``-like can be added if the result can have the same freq. Otherwise, ``ValueError`` will be raised. .. ipython:: python - p = pd.Period('2014-07-01 09:00', freq='H') + p = pd.Period("2014-07-01 09:00", freq="H") p + pd.offsets.Hour(2) p + datetime.timedelta(minutes=120) - p + np.timedelta64(7200, 's') + p + np.timedelta64(7200, "s") .. code-block:: ipython @@ -1912,7 +1939,7 @@ If ``Period`` has other frequencies, only the same ``offsets`` can be added. Oth .. ipython:: python - p = pd.Period('2014-07', freq='M') + p = pd.Period("2014-07", freq="M") p + pd.offsets.MonthEnd(3) .. code-block:: ipython @@ -1927,7 +1954,7 @@ return the number of frequency units between them: .. ipython:: python - pd.Period('2012', freq='A-DEC') - pd.Period('2002', freq='A-DEC') + pd.Period("2012", freq="A-DEC") - pd.Period("2002", freq="A-DEC") PeriodIndex and period_range ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1936,21 +1963,21 @@ which can be constructed using the ``period_range`` convenience function: .. ipython:: python - prng = pd.period_range('1/1/2011', '1/1/2012', freq='M') + prng = pd.period_range("1/1/2011", "1/1/2012", freq="M") prng The ``PeriodIndex`` constructor can also be used directly: .. ipython:: python - pd.PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M') + pd.PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M") Passing multiplied frequency outputs a sequence of ``Period`` which has multiplied span. .. ipython:: python - pd.period_range(start='2014-01', freq='3M', periods=4) + pd.period_range(start="2014-01", freq="3M", periods=4) If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor endpoints for a ``PeriodIndex`` with frequency matching that of the @@ -1958,8 +1985,9 @@ endpoints for a ``PeriodIndex`` with frequency matching that of the .. ipython:: python - pd.period_range(start=pd.Period('2017Q1', freq='Q'), - end=pd.Period('2017Q2', freq='Q'), freq='M') + pd.period_range( + start=pd.Period("2017Q1", freq="Q"), end=pd.Period("2017Q2", freq="Q"), freq="M" + ) Just like ``DatetimeIndex``, a ``PeriodIndex`` can also be used to index pandas objects: @@ -1973,11 +2001,11 @@ objects: .. ipython:: python - idx = pd.period_range('2014-07-01 09:00', periods=5, freq='H') + idx = pd.period_range("2014-07-01 09:00", periods=5, freq="H") idx idx + pd.offsets.Hour(2) - idx = pd.period_range('2014-07', periods=5, freq='M') + idx = pd.period_range("2014-07", periods=5, freq="M") idx idx + pd.offsets.MonthEnd(3) @@ -1996,7 +2024,7 @@ The ``period`` dtype holds the ``freq`` attribute and is represented with .. ipython:: python - pi = pd.period_range('2016-01-01', periods=3, freq='M') + pi = pd.period_range("2016-01-01", periods=3, freq="M") pi pi.dtype @@ -2007,15 +2035,15 @@ The ``period`` dtype can be used in ``.astype(...)``. It allows one to change th .. ipython:: python # change monthly freq to daily freq - pi.astype('period[D]') + pi.astype("period[D]") # convert to DatetimeIndex - pi.astype('datetime64[ns]') + pi.astype("datetime64[ns]") # convert to PeriodIndex - dti = pd.date_range('2011-01-01', freq='M', periods=3) + dti = pd.date_range("2011-01-01", freq="M", periods=3) dti - dti.astype('period[M]') + dti.astype("period[M]") PeriodIndex partial string indexing @@ -2029,32 +2057,32 @@ You can pass in dates and strings to ``Series`` and ``DataFrame`` with ``PeriodI .. ipython:: python - ps['2011-01'] + ps["2011-01"] - ps[datetime.datetime(2011, 12, 25):] + ps[datetime.datetime(2011, 12, 25) :] - ps['10/31/2011':'12/31/2011'] + ps["10/31/2011":"12/31/2011"] Passing a string representing a lower frequency than ``PeriodIndex`` returns partial sliced data. .. ipython:: python :okwarning: - ps['2011'] + ps["2011"] - dfp = pd.DataFrame(np.random.randn(600, 1), - columns=['A'], - index=pd.period_range('2013-01-01 9:00', - periods=600, - freq='T')) + dfp = pd.DataFrame( + np.random.randn(600, 1), + columns=["A"], + index=pd.period_range("2013-01-01 9:00", periods=600, freq="T"), + ) dfp - dfp['2013-01-01 10H'] + dfp["2013-01-01 10H"] As with ``DatetimeIndex``, the endpoints will be included in the result. The example below slices data starting from 10:00 to 11:59. .. ipython:: python - dfp['2013-01-01 10H':'2013-01-01 11H'] + dfp["2013-01-01 10H":"2013-01-01 11H"] Frequency conversion and resampling with PeriodIndex @@ -2064,7 +2092,7 @@ method. Let's start with the fiscal year 2011, ending in December: .. ipython:: python - p = pd.Period('2011', freq='A-DEC') + p = pd.Period("2011", freq="A-DEC") p We can convert it to a monthly frequency. Using the ``how`` parameter, we can @@ -2072,16 +2100,16 @@ specify whether to return the starting or ending month: .. ipython:: python - p.asfreq('M', how='start') + p.asfreq("M", how="start") - p.asfreq('M', how='end') + p.asfreq("M", how="end") The shorthands 's' and 'e' are provided for convenience: .. ipython:: python - p.asfreq('M', 's') - p.asfreq('M', 'e') + p.asfreq("M", "s") + p.asfreq("M", "e") Converting to a "super-period" (e.g., annual frequency is a super-period of quarterly frequency) automatically returns the super-period that includes the @@ -2089,9 +2117,9 @@ input period: .. ipython:: python - p = pd.Period('2011-12', freq='M') + p = pd.Period("2011-12", freq="M") - p.asfreq('A-NOV') + p.asfreq("A-NOV") Note that since we converted to an annual frequency that ends the year in November, the monthly period of December 2011 is actually in the 2012 A-NOV @@ -2110,21 +2138,21 @@ frequencies ``Q-JAN`` through ``Q-DEC``. .. ipython:: python - p = pd.Period('2012Q1', freq='Q-DEC') + p = pd.Period("2012Q1", freq="Q-DEC") - p.asfreq('D', 's') + p.asfreq("D", "s") - p.asfreq('D', 'e') + p.asfreq("D", "e") ``Q-MAR`` defines fiscal year end in March: .. ipython:: python - p = pd.Period('2011Q4', freq='Q-MAR') + p = pd.Period("2011Q4", freq="Q-MAR") - p.asfreq('D', 's') + p.asfreq("D", "s") - p.asfreq('D', 'e') + p.asfreq("D", "e") .. _timeseries.interchange: @@ -2136,7 +2164,7 @@ and vice-versa using ``to_timestamp``: .. ipython:: python - rng = pd.date_range('1/1/2012', periods=5, freq='M') + rng = pd.date_range("1/1/2012", periods=5, freq="M") ts = pd.Series(np.random.randn(len(rng)), index=rng) @@ -2153,7 +2181,7 @@ end of the period: .. ipython:: python - ps.to_timestamp('D', how='s') + ps.to_timestamp("D", how="s") Converting between period and timestamp enables some convenient arithmetic functions to be used. In the following example, we convert a quarterly @@ -2162,11 +2190,11 @@ the quarter end: .. ipython:: python - prng = pd.period_range('1990Q1', '2000Q4', freq='Q-NOV') + prng = pd.period_range("1990Q1", "2000Q4", freq="Q-NOV") ts = pd.Series(np.random.randn(len(prng)), prng) - ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9 + ts.index = (prng.asfreq("M", "e") + 1).asfreq("H", "s") + 9 ts.head() @@ -2180,7 +2208,7 @@ then you can use a ``PeriodIndex`` and/or ``Series`` of ``Periods`` to do comput .. ipython:: python - span = pd.period_range('1215-01-01', '1381-01-01', freq='D') + span = pd.period_range("1215-01-01", "1381-01-01", freq="D") span To convert from an ``int64`` based YYYYMMDD representation. @@ -2190,9 +2218,10 @@ To convert from an ``int64`` based YYYYMMDD representation. s = pd.Series([20121231, 20141130, 99991231]) s + def conv(x): - return pd.Period(year=x // 10000, month=x // 100 % 100, - day=x % 100, freq='D') + return pd.Period(year=x // 10000, month=x // 100 % 100, day=x % 100, freq="D") + s.apply(conv) s.apply(conv)[2] @@ -2221,7 +2250,7 @@ By default, pandas objects are time zone unaware: .. ipython:: python - rng = pd.date_range('3/6/2012 00:00', periods=15, freq='D') + rng = pd.date_range("3/6/2012 00:00", periods=15, freq="D") rng.tz is None To localize these dates to a time zone (assign a particular time zone to a naive date), @@ -2241,18 +2270,16 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string import dateutil # pytz - rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D', - tz='Europe/London') + rng_pytz = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz="Europe/London") rng_pytz.tz # dateutil - rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D') - rng_dateutil = rng_dateutil.tz_localize('dateutil/Europe/London') + rng_dateutil = pd.date_range("3/6/2012 00:00", periods=3, freq="D") + rng_dateutil = rng_dateutil.tz_localize("dateutil/Europe/London") rng_dateutil.tz # dateutil - utc special case - rng_utc = pd.date_range('3/6/2012 00:00', periods=3, freq='D', - tz=dateutil.tz.tzutc()) + rng_utc = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=dateutil.tz.tzutc()) rng_utc.tz .. versionadded:: 0.25.0 @@ -2260,8 +2287,7 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string .. ipython:: python # datetime.timezone - rng_utc = pd.date_range('3/6/2012 00:00', periods=3, freq='D', - tz=datetime.timezone.utc) + rng_utc = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=datetime.timezone.utc) rng_utc.tz Note that the ``UTC`` time zone is a special case in ``dateutil`` and should be constructed explicitly @@ -2273,15 +2299,14 @@ zones objects explicitly first. import pytz # pytz - tz_pytz = pytz.timezone('Europe/London') - rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D') + tz_pytz = pytz.timezone("Europe/London") + rng_pytz = pd.date_range("3/6/2012 00:00", periods=3, freq="D") rng_pytz = rng_pytz.tz_localize(tz_pytz) rng_pytz.tz == tz_pytz # dateutil - tz_dateutil = dateutil.tz.gettz('Europe/London') - rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D', - tz=tz_dateutil) + tz_dateutil = dateutil.tz.gettz("Europe/London") + rng_dateutil = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=tz_dateutil) rng_dateutil.tz == tz_dateutil To convert a time zone aware pandas object from one time zone to another, @@ -2289,7 +2314,7 @@ you can use the ``tz_convert`` method. .. ipython:: python - rng_pytz.tz_convert('US/Eastern') + rng_pytz.tz_convert("US/Eastern") .. note:: @@ -2301,9 +2326,9 @@ you can use the ``tz_convert`` method. .. ipython:: python - dti = pd.date_range('2019-01-01', periods=3, freq='D', tz='US/Pacific') + dti = pd.date_range("2019-01-01", periods=3, freq="D", tz="US/Pacific") dti.tz - ts = pd.Timestamp('2019-01-01', tz='US/Pacific') + ts = pd.Timestamp("2019-01-01", tz="US/Pacific") ts.tz .. warning:: @@ -2344,11 +2369,11 @@ you can use the ``tz_convert`` method. .. ipython:: python - d_2037 = '2037-03-31T010101' - d_2038 = '2038-03-31T010101' - DST = 'Europe/London' - assert pd.Timestamp(d_2037, tz=DST) != pd.Timestamp(d_2037, tz='GMT') - assert pd.Timestamp(d_2038, tz=DST) == pd.Timestamp(d_2038, tz='GMT') + d_2037 = "2037-03-31T010101" + d_2038 = "2038-03-31T010101" + DST = "Europe/London" + assert pd.Timestamp(d_2037, tz=DST) != pd.Timestamp(d_2037, tz="GMT") + assert pd.Timestamp(d_2038, tz=DST) == pd.Timestamp(d_2038, tz="GMT") Under the hood, all timestamps are stored in UTC. Values from a time zone aware :class:`DatetimeIndex` or :class:`Timestamp` will have their fields (day, hour, minute, etc.) @@ -2357,8 +2382,8 @@ still considered to be equal even if they are in different time zones: .. ipython:: python - rng_eastern = rng_utc.tz_convert('US/Eastern') - rng_berlin = rng_utc.tz_convert('Europe/Berlin') + rng_eastern = rng_utc.tz_convert("US/Eastern") + rng_berlin = rng_utc.tz_convert("Europe/Berlin") rng_eastern[2] rng_berlin[2] @@ -2369,9 +2394,9 @@ Operations between :class:`Series` in different time zones will yield UTC .. ipython:: python - ts_utc = pd.Series(range(3), pd.date_range('20130101', periods=3, tz='UTC')) - eastern = ts_utc.tz_convert('US/Eastern') - berlin = ts_utc.tz_convert('Europe/Berlin') + ts_utc = pd.Series(range(3), pd.date_range("20130101", periods=3, tz="UTC")) + eastern = ts_utc.tz_convert("US/Eastern") + berlin = ts_utc.tz_convert("Europe/Berlin") result = eastern + berlin result result.index @@ -2382,14 +2407,13 @@ To remove time zone information, use ``tz_localize(None)`` or ``tz_convert(None) .. ipython:: python - didx = pd.date_range(start='2014-08-01 09:00', freq='H', - periods=3, tz='US/Eastern') + didx = pd.date_range(start="2014-08-01 09:00", freq="H", periods=3, tz="US/Eastern") didx didx.tz_localize(None) didx.tz_convert(None) # tz_convert(None) is identical to tz_convert('UTC').tz_localize(None) - didx.tz_convert('UTC').tz_localize(None) + didx.tz_convert("UTC").tz_localize(None) .. _timeseries.fold: @@ -2415,10 +2439,12 @@ control over how they are handled. .. ipython:: python - pd.Timestamp(datetime.datetime(2019, 10, 27, 1, 30, 0, 0), - tz='dateutil/Europe/London', fold=0) - pd.Timestamp(year=2019, month=10, day=27, hour=1, minute=30, - tz='dateutil/Europe/London', fold=1) + pd.Timestamp( + datetime.datetime(2019, 10, 27, 1, 30, 0, 0), tz="dateutil/Europe/London", fold=0 + ) + pd.Timestamp( + year=2019, month=10, day=27, hour=1, minute=30, tz="dateutil/Europe/London", fold=1 + ) .. _timeseries.timezone_ambiguous: @@ -2436,8 +2462,9 @@ twice within one day ("clocks fall back"). The following options are available: .. ipython:: python - rng_hourly = pd.DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00', - '11/06/2011 01:00', '11/06/2011 02:00']) + rng_hourly = pd.DatetimeIndex( + ["11/06/2011 00:00", "11/06/2011 01:00", "11/06/2011 01:00", "11/06/2011 02:00"] + ) This will fail as there are ambiguous times (``'11/06/2011 01:00'``) @@ -2450,9 +2477,9 @@ Handle these ambiguous times by specifying the following. .. ipython:: python - rng_hourly.tz_localize('US/Eastern', ambiguous='infer') - rng_hourly.tz_localize('US/Eastern', ambiguous='NaT') - rng_hourly.tz_localize('US/Eastern', ambiguous=[True, True, False, False]) + rng_hourly.tz_localize("US/Eastern", ambiguous="infer") + rng_hourly.tz_localize("US/Eastern", ambiguous="NaT") + rng_hourly.tz_localize("US/Eastern", ambiguous=[True, True, False, False]) .. _timeseries.timezone_nonexistent: @@ -2471,7 +2498,7 @@ can be controlled by the ``nonexistent`` argument. The following options are ava .. ipython:: python - dti = pd.date_range(start='2015-03-29 02:30:00', periods=3, freq='H') + dti = pd.date_range(start="2015-03-29 02:30:00", periods=3, freq="H") # 2:30 is a nonexistent time Localization of nonexistent times will raise an error by default. @@ -2486,10 +2513,10 @@ Transform nonexistent times to ``NaT`` or shift the times. .. ipython:: python dti - dti.tz_localize('Europe/Warsaw', nonexistent='shift_forward') - dti.tz_localize('Europe/Warsaw', nonexistent='shift_backward') - dti.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta(1, unit='H')) - dti.tz_localize('Europe/Warsaw', nonexistent='NaT') + dti.tz_localize("Europe/Warsaw", nonexistent="shift_forward") + dti.tz_localize("Europe/Warsaw", nonexistent="shift_backward") + dti.tz_localize("Europe/Warsaw", nonexistent=pd.Timedelta(1, unit="H")) + dti.tz_localize("Europe/Warsaw", nonexistent="NaT") .. _timeseries.timezone_series: @@ -2502,7 +2529,7 @@ represented with a dtype of ``datetime64[ns]``. .. ipython:: python - s_naive = pd.Series(pd.date_range('20130101', periods=3)) + s_naive = pd.Series(pd.date_range("20130101", periods=3)) s_naive A :class:`Series` with a time zone **aware** values is @@ -2510,7 +2537,7 @@ represented with a dtype of ``datetime64[ns, tz]`` where ``tz`` is the time zone .. ipython:: python - s_aware = pd.Series(pd.date_range('20130101', periods=3, tz='US/Eastern')) + s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern")) s_aware Both of these :class:`Series` time zone information @@ -2520,7 +2547,7 @@ For example, to localize and convert a naive stamp to time zone aware. .. ipython:: python - s_naive.dt.tz_localize('UTC').dt.tz_convert('US/Eastern') + s_naive.dt.tz_localize("UTC").dt.tz_convert("US/Eastern") Time zone information can also be manipulated using the ``astype`` method. This method can localize and convert time zone naive timestamps or @@ -2529,13 +2556,13 @@ convert time zone aware timestamps. .. ipython:: python # localize and convert a naive time zone - s_naive.astype('datetime64[ns, US/Eastern]') + s_naive.astype("datetime64[ns, US/Eastern]") # make an aware tz naive - s_aware.astype('datetime64[ns]') + s_aware.astype("datetime64[ns]") # convert to a new time zone - s_aware.astype('datetime64[ns, CET]') + s_aware.astype("datetime64[ns, CET]") .. note:: @@ -2561,4 +2588,4 @@ convert time zone aware timestamps. .. ipython:: python - s_aware.to_numpy(dtype='datetime64[ns]') + s_aware.to_numpy(dtype="datetime64[ns]") From ee448646f34e80b50ef168c73b3387cac771a88c Mon Sep 17 00:00:00 2001 From: John Karasinski Date: Fri, 2 Oct 2020 17:35:07 -0700 Subject: [PATCH 2/3] fix E203 errors --- doc/source/user_guide/timeseries.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 4d0ddd2185669..11ec90085d9bf 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -567,7 +567,7 @@ Dates and strings that parse to timestamps can be passed as indexing parameters: ts["1/31/2011"] - ts[datetime.datetime(2011, 12, 25) :] + ts[datetime.datetime(2011, 12, 25):] ts["10/31/2011":"12/31/2011"] @@ -741,14 +741,14 @@ These ``Timestamp`` and ``datetime`` objects have exact ``hours, minutes,`` and .. ipython:: python - dft[datetime.datetime(2013, 1, 1) : datetime.datetime(2013, 2, 28)] + dft[datetime.datetime(2013, 1, 1): datetime.datetime(2013, 2, 28)] With no defaults. .. ipython:: python dft[ - datetime.datetime(2013, 1, 1, 10, 12, 0) : datetime.datetime(2013, 2, 28, 10, 12, 0) + datetime.datetime(2013, 1, 1, 10, 12, 0): datetime.datetime(2013, 2, 28, 10, 12, 0) ] @@ -2059,7 +2059,7 @@ You can pass in dates and strings to ``Series`` and ``DataFrame`` with ``PeriodI ps["2011-01"] - ps[datetime.datetime(2011, 12, 25) :] + ps[datetime.datetime(2011, 12, 25):] ps["10/31/2011":"12/31/2011"] From 12f9fa2a2f036a4ae213e3668daa751e0730c1ab Mon Sep 17 00:00:00 2001 From: John Karasinski Date: Fri, 2 Oct 2020 23:17:53 -0700 Subject: [PATCH 3/3] add black-style line wrapping --- doc/source/user_guide/io.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index 11e8df55fcad5..184894bbafe28 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -3310,10 +3310,10 @@ applications (CTRL-V on many operating systems). Here we illustrate writing a .. code-block:: python - >>> df = pd.DataFrame({"A": [1, 2, 3], - ... "B": [4, 5, 6], - ... "C": ["p", "q", "r"]}, - ... index=["x", "y", "z"]) + >>> df = pd.DataFrame( + ... {"A": [1, 2, 3], "B": [4, 5, 6], "C": ["p", "q", "r"]}, index=["x", "y", "z"] + ... ) + >>> df A B C x 1 4 p