aboutsummaryrefslogtreecommitdiff
path: root/pyaggr3g470r/lib/feed_utils.py
blob: a7149d7953edb6c2095a291055c92256a363a08b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import urllib
import logging
import requests
import feedparser
from bs4 import BeautifulSoup, SoupStrainer

from pyaggr3g470r.lib.utils import try_keys, try_splits, rebuild_url

logger = logging.getLogger(__name__)


def construct_feed_from(url=None, fp_parsed=None, feed=None, query_site=True):
    if url is None and fp_parsed is not None:
        url = fp_parsed.get('url')
    if url is not None and fp_parsed is None:
        try:
            response = requests.get(url, verify=False)
            fp_parsed = feedparser.parse(response.content,
                                         request_headers=response.headers)
        except Exception:
            logger.exception('failed to retreive that url')
            fp_parsed = {'bozo': True}
    assert url is not None and fp_parsed is not None
    feed = feed or {}
    feed_split = urllib.parse.urlsplit(url)
    if not fp_parsed['bozo']:
        feed['link'] = url
        feed['site_link'] = try_keys(fp_parsed['feed'], 'href', 'link')
        feed['title'] = fp_parsed['feed'].get('title')
        feed['description'] = try_keys(fp_parsed['feed'], 'subtitle', 'title')
        feed['icon'] = try_keys(fp_parsed['feed'], 'icon')
    else:
        feed['site_link'] = url

    if feed.get('site_link'):
        feed['site_link'] = rebuild_url(feed['site_link'], feed_split)
        site_split = urllib.parse.urlsplit(feed['site_link'])

    if feed.get('icon'):
        feed['icon'] = try_splits(feed['icon'], site_split, feed_split)
        if feed['icon'] is None:
            del feed['icon']

    if not feed.get('site_link') or not query_site \
            or all(bool(feed.get(key)) for key in ('link', 'title', 'icon')):
        return feed

    response = requests.get(feed['site_link'], verify=False)
    bs_parsed = BeautifulSoup(response.content, 'html.parser',
                              parse_only=SoupStrainer('head'))

    if not feed.get('title'):
        try:
            feed['title'] = bs_parsed.find_all('title')[0].text
        except Exception:
            pass

    def check_keys(**kwargs):
        def wrapper(elem):
            for key, vals in kwargs.items():
                if not elem.has_attr(key):
                    return False
                if not all(val in elem.attrs[key] for val in vals):
                    return False
            return True
        return wrapper

    if not feed.get('icon'):
        icons = bs_parsed.find_all(check_keys(rel=['icon', 'shortcut']))
        if not len(icons):
            icons = bs_parsed.find_all(check_keys(rel=['icon']))
        if len(icons) >= 1:
            for icon in icons:
                feed['icon'] = try_splits(icon.attrs['href'],
                                          site_split, feed_split)
                if feed['icon'] is not None:
                    break

        if feed.get('icon') is None:
            feed['icon'] = try_splits('/favicon.ico', site_split, feed_split)
        if 'icon' in feed and feed['icon'] is None:
            del feed['icon']

    if not feed.get('link'):
        alternate = bs_parsed.find_all(check_keys(rel=['alternate'],
                type=['application/rss+xml']))
        if len(alternate) >= 1:
            feed['link'] = alternate[0].attrs['href']
    return feed
bgstack15