Skip to content

CLN: fix all flake8 warnings in pandas/io #12096

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions pandas/io/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
Data IO api
"""

# flake8: noqa

from pandas.io.parsers import read_csv, read_table, read_fwf
from pandas.io.clipboard import read_clipboard
from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/clipboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def read_clipboard(**kwargs): # pragma: no cover
# 1 3 4

counts = set([x.lstrip().count('\t') for x in lines])
if len(lines)>1 and len(counts) == 1 and counts.pop() != 0:
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
kwargs['sep'] = '\t'

if kwargs.get('sep') is None and kwargs.get('delim_whitespace') is None:
Expand Down
30 changes: 18 additions & 12 deletions pandas/io/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,20 +30,19 @@
from urllib.request import urlopen, pathname2url
_urlopen = urlopen
from urllib.parse import urlparse as parse_url
import urllib.parse as compat_parse
from urllib.parse import (uses_relative, uses_netloc, uses_params,
urlencode, urljoin)
from urllib.error import URLError
from http.client import HTTPException
from http.client import HTTPException # noqa
else:
from urllib2 import urlopen as _urlopen
from urllib import urlencode, pathname2url
from urllib import urlencode, pathname2url # noqa
from urlparse import urlparse as parse_url
from urlparse import uses_relative, uses_netloc, uses_params, urljoin
from urllib2 import URLError
from httplib import HTTPException
from contextlib import contextmanager, closing
from functools import wraps
from urllib2 import URLError # noqa
from httplib import HTTPException # noqa
from contextlib import contextmanager, closing # noqa
from functools import wraps # noqa

# @wraps(_urlopen)
@contextmanager
Expand All @@ -66,6 +65,7 @@ class DtypeWarning(Warning):

try:
from boto.s3 import key

class BotoFileLikeReader(key.Key):
"""boto Key modified to be more file-like

Expand All @@ -78,10 +78,12 @@ class BotoFileLikeReader(key.Key):
Also adds a `readline` function which will split the returned
values by the `\n` character.
"""

def __init__(self, *args, **kwargs):
encoding = kwargs.pop("encoding", None) # Python 2 compat
super(BotoFileLikeReader, self).__init__(*args, **kwargs)
self.finished_read = False # Add a flag to mark the end of the read.
# Add a flag to mark the end of the read.
self.finished_read = False
self.buffer = ""
self.lines = []
if encoding is None and compat.PY3:
Expand Down Expand Up @@ -121,7 +123,8 @@ def readline(self):
raise StopIteration

if self.encoding:
self.buffer = "{}{}".format(self.buffer, self.read(8192).decode(self.encoding))
self.buffer = "{}{}".format(
self.buffer, self.read(8192).decode(self.encoding))
else:
self.buffer = "{}{}".format(self.buffer, self.read(8192))

Expand Down Expand Up @@ -211,13 +214,15 @@ def _expand_user(filepath_or_buffer):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer


def _validate_header_arg(header):
if isinstance(header, bool):
raise TypeError("Passing a bool to header is invalid. "
"Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names")


def _stringify_path(filepath_or_buffer):
"""Return the argument coerced to a string if it was a pathlib.Path
or a py.path.local
Expand Down Expand Up @@ -263,8 +268,9 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
else:
compression = None
# cat on the compression to the tuple returned by the function
to_return = list(maybe_read_encoded_stream(req, encoding, compression)) + \
[compression]
to_return = (list(maybe_read_encoded_stream(req, encoding,
compression)) +
[compression])
return tuple(to_return)

if _is_s3_url(filepath_or_buffer):
Expand Down Expand Up @@ -467,4 +473,4 @@ def _check_as_is(x):
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
self.queue.truncate(0)
2 changes: 2 additions & 0 deletions pandas/io/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@


"""
# flake8: noqa

import warnings
import tempfile
import datetime as dt
Expand Down
Loading