diff options
author | Cédric Bonhomme <cedric@cedricbonhomme.org> | 2016-11-08 14:39:47 +0100 |
---|---|---|
committer | Cédric Bonhomme <cedric@cedricbonhomme.org> | 2016-11-08 14:39:47 +0100 |
commit | 2d72f44a90a76fe7450e59fdfdf4d42f44b9cd96 (patch) | |
tree | 39895c10f68cf0b13d957073268769d04aa924a0 /src/web/lib | |
parent | Closes section HTML tag. (diff) | |
download | newspipe-2d72f44a90a76fe7450e59fdfdf4d42f44b9cd96.tar.gz newspipe-2d72f44a90a76fe7450e59fdfdf4d42f44b9cd96.tar.bz2 newspipe-2d72f44a90a76fe7450e59fdfdf4d42f44b9cd96.zip |
various improvements to the crawler (better use of coroutines, test if an article should be updated). tags are now retrieved for the k-means clustering (previously achived with the content of articles)
Diffstat (limited to 'src/web/lib')
-rw-r--r-- | src/web/lib/article_utils.py | 228 | ||||
-rw-r--r-- | src/web/lib/feed_utils.py | 5 | ||||
-rwxr-xr-x | src/web/lib/misc_utils.py | 2 | ||||
-rw-r--r-- | src/web/lib/utils.py | 24 |
4 files changed, 192 insertions, 67 deletions
diff --git a/src/web/lib/article_utils.py b/src/web/lib/article_utils.py index f79c1b30..81a0b145 100644 --- a/src/web/lib/article_utils.py +++ b/src/web/lib/article_utils.py @@ -1,74 +1,60 @@ +import html import logging -import dateutil.parser +import re from datetime import datetime, timezone +from enum import Enum +from urllib.parse import SplitResult, urlsplit, urlunsplit + +import dateutil.parser +from bs4 import BeautifulSoup, SoupStrainer +from requests.exceptions import MissingSchema import conf -from web.lib.utils import to_hash +from web.lib.utils import jarr_get logger = logging.getLogger(__name__) +PROCESSED_DATE_KEYS = {'published', 'created', 'updated'} -def extract_id(entry, keys=[('link', 'link'), ('published', 'date'), - ('updated', 'date')], force_id=False): - """For a given entry will return a dict that allows to identify it. The - dict will be constructed on the uid of the entry. if that identifier is - absent, the dict will be constructed upon the values of "keys". - """ - entry_id = entry.get('entry_id') or entry.get('id') - if entry_id: - return {'entry_id': entry_id} - if not entry_id and force_id: - return to_hash("".join(entry[entry_key] for _, entry_key in keys - if entry_key in entry).encode('utf8')) - else: - ids = {} - for entry_key, pyagg_key in keys: - if entry_key in entry and pyagg_key not in ids: - ids[pyagg_key] = entry[entry_key] - if 'date' in pyagg_key: - try: - ids[pyagg_key] = dateutil.parser.parse(ids[pyagg_key])\ - .isoformat() - except ValueError as e: - logger.exception("extract_id: " + str(e)) - ids[pyagg_key] = datetime.now().isoformat() - return ids - - -def construct_article(entry, feed): - if hasattr(feed, 'dump'): # this way can be a sqlalchemy obj or a dict - feed = feed.dump() - "Safe method to transorm a feedparser entry into an article" - now = datetime.now() - date = None - for date_key in ('published', 'created', 'date'): - if entry.get(date_key): - try: - date = dateutil.parser.parse(entry[date_key])\ - .astimezone(timezone.utc) - except Exception as e: - logger.exception(str(e)) - else: - break +def extract_id(entry): + """ extract a value from an entry that will identify it among the other of + that feed""" + return entry.get('entry_id') or entry.get('id') or entry['link'] - updated_date = None - try: - updated_date = dateutil.parser.parse(entry['updated']) - except Exception: - pass - content = get_article_content(entry) - article_link = entry.get('link') - return {'feed_id': feed['id'], - 'user_id': feed['user_id'], - 'entry_id': extract_id(entry).get('entry_id', None), - 'link': entry.get('link', feed['site_link']), - 'title': entry.get('title', 'No title'), - 'readed': False, 'like': False, - 'content': content, - 'retrieved_date': now, - 'date': date or now, - 'updated_date': updated_date or date or now} +async def construct_article(entry, feed, fields=None, fetch=True): + "Safe method to transorm a feedparser entry into an article" + now = datetime.utcnow() + article = {} + def push_in_article(key, value): + if not fields or key in fields: + article[key] = value + push_in_article('feed_id', feed.id) + push_in_article('user_id', feed.user_id) + push_in_article('entry_id', extract_id(entry)) + push_in_article('retrieved_date', now) + if not fields or 'date' in fields: + for date_key in PROCESSED_DATE_KEYS: + if entry.get(date_key): + try: + article['date'] = dateutil.parser.parse(entry[date_key])\ + .astimezone(timezone.utc) + except Exception as e: + logger.exception(e) + else: + break + push_in_article('content', get_article_content(entry)) + if fields is None or {'link', 'title'}.intersection(fields): + link, title = await get_article_details(entry, fetch) + push_in_article('link', link) + push_in_article('title', title) + if 'content' in article: + #push_in_article('content', clean_urls(article['content'], link)) + push_in_article('content', article['content']) + push_in_article('tags', {tag.get('term').strip() + for tag in entry.get('tags', [])}) + return article + def get_article_content(entry): content = '' @@ -77,3 +63,123 @@ def get_article_content(entry): elif entry.get('summary'): content = entry['summary'] return content + + +async def get_article_details(entry, fetch=True): + article_link = entry.get('link') + article_title = html.unescape(entry.get('title', '')) + if fetch and conf.CRAWLER_RESOLV and article_link or not article_title: + try: + # resolves URL behind proxies (like feedproxy.google.com) + response = await jarr_get(article_link, timeout=5) + except MissingSchema: + split, failed = urlsplit(article_link), False + for scheme in 'https', 'http': + new_link = urlunsplit(SplitResult(scheme, *split[1:])) + try: + response = await jarr_get(new_link, timeout=5) + except Exception as error: + failed = True + continue + failed = False + article_link = new_link + break + if failed: + return article_link, article_title or 'No title' + except Exception as error: + logger.info("Unable to get the real URL of %s. Won't fix " + "link or title. Error: %s", article_link, error) + return article_link, article_title or 'No title' + article_link = response.url + if not article_title: + bs_parsed = BeautifulSoup(response.content, 'html.parser', + parse_only=SoupStrainer('head')) + try: + article_title = bs_parsed.find_all('title')[0].text + except IndexError: # no title + pass + return article_link, article_title or 'No title' + + +class FiltersAction(Enum): + READ = 'mark as read' + LIKED = 'mark as favorite' + SKIP = 'skipped' + + +class FiltersType(Enum): + REGEX = 'regex' + MATCH = 'simple match' + EXACT_MATCH = 'exact match' + TAG_MATCH = 'tag match' + TAG_CONTAINS = 'tag contains' + + +class FiltersTrigger(Enum): + MATCH = 'match' + NO_MATCH = 'no match' + + +def process_filters(filters, article, only_actions=None): + skipped, read, liked = False, None, False + filters = filters or [] + if only_actions is None: + only_actions = set(FiltersAction) + for filter_ in filters: + match = False + try: + pattern = filter_.get('pattern', '') + filter_type = FiltersType(filter_.get('type')) + filter_action = FiltersAction(filter_.get('action')) + filter_trigger = FiltersTrigger(filter_.get('action on')) + if filter_type is not FiltersType.REGEX: + pattern = pattern.lower() + except ValueError: + continue + if filter_action not in only_actions: + logger.debug('ignoring filter %r' % filter_) + continue + if filter_action in {FiltersType.REGEX, FiltersType.MATCH, + FiltersType.EXACT_MATCH} and 'title' not in article: + continue + if filter_action in {FiltersType.TAG_MATCH, FiltersType.TAG_CONTAINS} \ + and 'tags' not in article: + continue + title = article.get('title', '').lower() + tags = [tag.lower() for tag in article.get('tags', [])] + if filter_type is FiltersType.REGEX: + match = re.match(pattern, title) + elif filter_type is FiltersType.MATCH: + match = pattern in title + elif filter_type is FiltersType.EXACT_MATCH: + match = pattern == title + elif filter_type is FiltersType.TAG_MATCH: + match = pattern in tags + elif filter_type is FiltersType.TAG_CONTAINS: + match = any(pattern in tag for tag in tags) + take_action = match and filter_trigger is FiltersTrigger.MATCH \ + or not match and filter_trigger is FiltersTrigger.NO_MATCH + + if not take_action: + continue + + if filter_action is FiltersAction.READ: + read = True + elif filter_action is FiltersAction.LIKED: + liked = True + elif filter_action is FiltersAction.SKIP: + skipped = True + + if skipped or read or liked: + logger.info("%r applied on %r", filter_action.value, + article.get('link') or article.get('title')) + return skipped, read, liked + + +def get_skip_and_ids(entry, feed): + entry_ids = construct_article(entry, feed, + {'entry_id', 'feed_id', 'user_id'}, fetch=False) + skipped, _, _ = process_filters(feed.filters, + construct_article(entry, feed, {'title', 'tags'}, fetch=False), + {FiltersAction.SKIP}) + return skipped, entry_ids diff --git a/src/web/lib/feed_utils.py b/src/web/lib/feed_utils.py index 9925613f..94ae6e53 100644 --- a/src/web/lib/feed_utils.py +++ b/src/web/lib/feed_utils.py @@ -3,7 +3,7 @@ import urllib import logging import requests import feedparser -from conf import USER_AGENT +from conf import CRAWLER_USER_AGENT from bs4 import BeautifulSoup, SoupStrainer from web.lib.utils import try_keys, try_get_icon_url, rebuild_url @@ -32,7 +32,8 @@ def escape_keys(*keys): @escape_keys('title', 'description') def construct_feed_from(url=None, fp_parsed=None, feed=None, query_site=True): - requests_kwargs = {'headers': {'User-Agent': USER_AGENT}, 'verify': False} + requests_kwargs = {'headers': {'User-Agent': CRAWLER_USER_AGENT}, + 'verify': False} if url is None and fp_parsed is not None: url = fp_parsed.get('url') if url is not None and fp_parsed is None: diff --git a/src/web/lib/misc_utils.py b/src/web/lib/misc_utils.py index 22d52b70..1359c798 100755 --- a/src/web/lib/misc_utils.py +++ b/src/web/lib/misc_utils.py @@ -109,6 +109,8 @@ def fetch(id, feed_id=None): """ cmd = [sys.executable, conf.BASE_DIR + '/manager.py', 'fetch_asyncio', '--user_id='+str(id)] + if feed_id: + cmd.append('--feed_id='+str(feed_id)) return subprocess.Popen(cmd, stdout=subprocess.PIPE) def history(user_id, year=None, month=None): diff --git a/src/web/lib/utils.py b/src/web/lib/utils.py index f2bed3ff..d206b769 100644 --- a/src/web/lib/utils.py +++ b/src/web/lib/utils.py @@ -6,6 +6,8 @@ import requests from hashlib import md5 from flask import request, url_for +import conf + logger = logging.getLogger(__name__) @@ -46,11 +48,17 @@ def try_get_icon_url(url, *splits): if split is None: continue rb_url = rebuild_url(url, split) - response = requests.get(rb_url, verify=False, timeout=10) + response = None # if html in content-type, we assume it's a fancy 404 page - content_type = response.headers.get('content-type', '') - if response.ok and 'html' not in content_type and response.content: - return response.url + try: + response = jarr_get(rb_url) + content_type = response.headers.get('content-type', '') + except Exception: + pass + else: + if response is not None and response.ok \ + and 'html' not in content_type and response.content: + return response.url return None @@ -71,3 +79,11 @@ def clear_string(data): def redirect_url(default='home'): return request.args.get('next') or request.referrer or url_for(default) + + +async def jarr_get(url, **kwargs): + request_kwargs = {'verify': False, 'allow_redirects': True, + 'timeout': conf.CRAWLER_TIMEOUT, + 'headers': {'User-Agent': conf.CRAWLER_USER_AGENT}} + request_kwargs.update(kwargs) + return requests.get(url, **request_kwargs) |