aboutsummaryrefslogtreecommitdiff
path: root/pyaggr3g470r/lib/utils.py
blob: b937b5a995c3c2d7effb3a967de7775ac5161f5e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import types
import urllib
import base64
import logging
import requests
from hashlib import md5

logger = logging.getLogger(__name__)


def default_handler(obj):
    """JSON handler for default query formatting"""
    if hasattr(obj, 'isoformat'):
        return obj.isoformat()
    if hasattr(obj, 'dump'):
        return obj.dump()
    if isinstance(obj, (set, frozenset, types.GeneratorType)):
        return list(obj)
    if isinstance(obj, BaseException):
        return str(obj)
    raise TypeError("Object of type %s with value of %r "
                    "is not JSON serializable" % (type(obj), obj))


def try_keys(dico, *keys):
    for key in keys:
        if key in dico:
            return dico[key]
    return


def rebuild_url(url, base_split):
    split = urllib.parse.urlsplit(url)
    if split.scheme and split.netloc:
        return url  # url is fine
    new_split = urllib.parse.SplitResult(
            scheme=split.scheme or base_split.scheme,
            netloc=split.netloc or base_split.netloc,
            path=split.path, query='', fragment='')
    return urllib.parse.urlunsplit(new_split)


def try_get_b64icon(url, *splits):
    for split in splits:
        if split is None:
            continue
        rb_url = rebuild_url(url, split)
        response = requests.get(rb_url, verify=False, timeout=10)
        # if html in content-type, we assume it's a fancy 404 page
        content_type = response.headers.get('content-type', '')
        if response.ok and 'html' not in content_type and response.content:
            return content_type + (
                    '\n%s' % base64.b64encode(response.content).decode('utf8'))
    return None


def to_hash(text):
    return md5(text.encode('utf8')).hexdigest()
bgstack15