aboutsummaryrefslogtreecommitdiff
path: root/newspipe/lib/utils.py
diff options
context:
space:
mode:
Diffstat (limited to 'newspipe/lib/utils.py')
-rw-r--r--newspipe/lib/utils.py53
1 files changed, 32 insertions, 21 deletions
diff --git a/newspipe/lib/utils.py b/newspipe/lib/utils.py
index d206b769..f7244e17 100644
--- a/newspipe/lib/utils.py
+++ b/newspipe/lib/utils.py
@@ -11,18 +11,20 @@ import conf
logger = logging.getLogger(__name__)
-def default_handler(obj, role='admin'):
+def default_handler(obj, role="admin"):
"""JSON handler for default query formatting"""
- if hasattr(obj, 'isoformat'):
+ if hasattr(obj, "isoformat"):
return obj.isoformat()
- if hasattr(obj, 'dump'):
+ if hasattr(obj, "dump"):
return obj.dump(role=role)
if isinstance(obj, (set, frozenset, types.GeneratorType)):
return list(obj)
if isinstance(obj, BaseException):
return str(obj)
- raise TypeError("Object of type %s with value of %r "
- "is not JSON serializable" % (type(obj), obj))
+ raise TypeError(
+ "Object of type %s with value of %r "
+ "is not JSON serializable" % (type(obj), obj)
+ )
def try_keys(dico, *keys):
@@ -37,9 +39,12 @@ def rebuild_url(url, base_split):
if split.scheme and split.netloc:
return url # url is fine
new_split = urllib.parse.SplitResult(
- scheme=split.scheme or base_split.scheme,
- netloc=split.netloc or base_split.netloc,
- path=split.path, query='', fragment='')
+ scheme=split.scheme or base_split.scheme,
+ netloc=split.netloc or base_split.netloc,
+ path=split.path,
+ query="",
+ fragment="",
+ )
return urllib.parse.urlunsplit(new_split)
@@ -52,19 +57,22 @@ def try_get_icon_url(url, *splits):
# if html in content-type, we assume it's a fancy 404 page
try:
response = jarr_get(rb_url)
- content_type = response.headers.get('content-type', '')
+ content_type = response.headers.get("content-type", "")
except Exception:
pass
else:
- if response is not None and response.ok \
- and 'html' not in content_type and response.content:
+ if (
+ response is not None
+ and response.ok
+ and "html" not in content_type
+ and response.content
+ ):
return response.url
return None
def to_hash(text):
- return md5(text.encode('utf8') if hasattr(text, 'encode') else text)\
- .hexdigest()
+ return md5(text.encode("utf8") if hasattr(text, "encode") else text).hexdigest()
def clear_string(data):
@@ -72,18 +80,21 @@ def clear_string(data):
Clear a string by removing HTML tags, HTML special caracters
and consecutive white spaces (more that one).
"""
- p = re.compile('<[^>]+>') # HTML tags
- q = re.compile('\s') # consecutive white spaces
- return p.sub('', q.sub(' ', data))
+ p = re.compile("<[^>]+>") # HTML tags
+ q = re.compile("\s") # consecutive white spaces
+ return p.sub("", q.sub(" ", data))
-def redirect_url(default='home'):
- return request.args.get('next') or request.referrer or url_for(default)
+def redirect_url(default="home"):
+ return request.args.get("next") or request.referrer or url_for(default)
async def jarr_get(url, **kwargs):
- request_kwargs = {'verify': False, 'allow_redirects': True,
- 'timeout': conf.CRAWLER_TIMEOUT,
- 'headers': {'User-Agent': conf.CRAWLER_USER_AGENT}}
+ request_kwargs = {
+ "verify": False,
+ "allow_redirects": True,
+ "timeout": conf.CRAWLER_TIMEOUT,
+ "headers": {"User-Agent": conf.CRAWLER_USER_AGENT},
+ }
request_kwargs.update(kwargs)
return requests.get(url, **request_kwargs)
bgstack15