aboutsummaryrefslogtreecommitdiff
path: root/src/web/lib/utils.py
diff options
context:
space:
mode:
authorFrançois Schmidts <francois.schmidts@gmail.com>2015-12-12 21:14:28 +0100
committerFrançois Schmidts <francois.schmidts@gmail.com>2015-12-17 09:42:56 +0100
commitb35e9773198ef2d8b37c4ca223f08147db47de0b (patch)
treeba4b1b171b3c1ab9414a96ad264c47b0f9d1246b /src/web/lib/utils.py
parentUpdated link to Heroku deploy button on the About page. (diff)
downloadnewspipe-b35e9773198ef2d8b37c4ca223f08147db47de0b.tar.gz
newspipe-b35e9773198ef2d8b37c4ca223f08147db47de0b.tar.bz2
newspipe-b35e9773198ef2d8b37c4ca223f08147db47de0b.zip
moving the root of source code from / to /src/
Diffstat (limited to 'src/web/lib/utils.py')
-rw-r--r--src/web/lib/utils.py57
1 files changed, 57 insertions, 0 deletions
diff --git a/src/web/lib/utils.py b/src/web/lib/utils.py
new file mode 100644
index 00000000..aa552a12
--- /dev/null
+++ b/src/web/lib/utils.py
@@ -0,0 +1,57 @@
+import types
+import urllib
+import logging
+import requests
+from hashlib import md5
+
+logger = logging.getLogger(__name__)
+
+
+def default_handler(obj):
+ """JSON handler for default query formatting"""
+ if hasattr(obj, 'isoformat'):
+ return obj.isoformat()
+ if hasattr(obj, 'dump'):
+ return obj.dump()
+ if isinstance(obj, (set, frozenset, types.GeneratorType)):
+ return list(obj)
+ if isinstance(obj, BaseException):
+ return str(obj)
+ raise TypeError("Object of type %s with value of %r "
+ "is not JSON serializable" % (type(obj), obj))
+
+
+def try_keys(dico, *keys):
+ for key in keys:
+ if key in dico:
+ return dico[key]
+ return
+
+
+def rebuild_url(url, base_split):
+ split = urllib.parse.urlsplit(url)
+ if split.scheme and split.netloc:
+ return url # url is fine
+ new_split = urllib.parse.SplitResult(
+ scheme=split.scheme or base_split.scheme,
+ netloc=split.netloc or base_split.netloc,
+ path=split.path, query='', fragment='')
+ return urllib.parse.urlunsplit(new_split)
+
+
+def try_get_icon_url(url, *splits):
+ for split in splits:
+ if split is None:
+ continue
+ rb_url = rebuild_url(url, split)
+ response = requests.get(rb_url, verify=False, timeout=10)
+ # if html in content-type, we assume it's a fancy 404 page
+ content_type = response.headers.get('content-type', '')
+ if response.ok and 'html' not in content_type and response.content:
+ return response.url
+ return None
+
+
+def to_hash(text):
+ return md5(text.encode('utf8') if hasattr(text, 'encode') else text)\
+ .hexdigest()
bgstack15