aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCédric Bonhomme <cedric@cedricbonhomme.org>2016-04-10 13:57:50 +0200
committerCédric Bonhomme <cedric@cedricbonhomme.org>2016-04-10 13:57:50 +0200
commit57b08c20597f24b46679e2d209b9e13886daa3e3 (patch)
tree091aba4476b2db7257e49e792269d1a15cb0e760
parentMoved notifications module. (diff)
downloadnewspipe-57b08c20597f24b46679e2d209b9e13886daa3e3.tar.gz
newspipe-57b08c20597f24b46679e2d209b9e13886daa3e3.tar.bz2
newspipe-57b08c20597f24b46679e2d209b9e13886daa3e3.zip
Start to clean generic utils functions.
-rw-r--r--src/web/forms.py8
-rwxr-xr-xsrc/web/lib/misc_utils.py281
-rw-r--r--src/web/views/feed.py10
-rw-r--r--src/web/views/home.py4
-rw-r--r--src/web/views/user.py12
5 files changed, 298 insertions, 17 deletions
diff --git a/src/web/forms.py b/src/web/forms.py
index fbfff85f..d1999b8b 100644
--- a/src/web/forms.py
+++ b/src/web/forms.py
@@ -35,7 +35,7 @@ from wtforms import TextField, TextAreaField, PasswordField, BooleanField, \
SubmitField, IntegerField, SelectField, validators, HiddenField
from flask.ext.wtf.html5 import EmailField
-from web import utils
+from web.lib import misc_utils
from web.controllers import UserController
from web.models import User
@@ -76,12 +76,12 @@ class RedirectForm(Form):
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
if not self.next.data:
- self.next.data = utils.get_redirect_target() or ''
+ self.next.data = misc_utils.get_redirect_target() or ''
def redirect(self, endpoint='home', **values):
- if utils.is_safe_url(self.next.data):
+ if misc_utils.is_safe_url(self.next.data):
return redirect(self.next.data)
- target = utils.get_redirect_target()
+ target = misc_utils.get_redirect_target()
return redirect(target or url_for(endpoint, **values))
diff --git a/src/web/lib/misc_utils.py b/src/web/lib/misc_utils.py
new file mode 100755
index 00000000..92bdde8a
--- /dev/null
+++ b/src/web/lib/misc_utils.py
@@ -0,0 +1,281 @@
+#! /usr/bin/env python
+#-*- coding: utf-8 -*-
+
+# JARR - A Web based news aggregator.
+# Copyright (C) 2010-2016 Cédric Bonhomme - https://www.cedricbonhomme.org
+#
+# For more information : https://github.com/JARR-aggregator/JARR
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+__author__ = "Cedric Bonhomme"
+__version__ = "$Revision: 1.8 $"
+__date__ = "$Date: 2010/12/07 $"
+__revision__ = "$Date: 2016/04/10 $"
+__copyright__ = "Copyright (c) Cedric Bonhomme"
+__license__ = "AGPLv3"
+
+#
+# This file provides functions used for:
+# - import from a JSON file;
+# - generation of tags cloud;
+# - HTML processing.
+#
+
+import re
+import sys
+import glob
+import opml
+import json
+import logging
+import datetime
+import operator
+import urllib
+import subprocess
+import sqlalchemy
+try:
+ from urlparse import urlparse, parse_qs, urlunparse
+except:
+ from urllib.parse import urlparse, parse_qs, urlunparse, urljoin
+from bs4 import BeautifulSoup
+from collections import Counter
+from contextlib import contextmanager
+from flask import request
+
+import conf
+from bootstrap import db
+from web import controllers
+from web.models import User, Feed, Article
+from web.lib.utils import clear_string
+
+logger = logging.getLogger(__name__)
+
+ALLOWED_EXTENSIONS = set(['xml', 'opml', 'json'])
+
+def is_safe_url(target):
+ """
+ Ensures that a redirect target will lead to the same server.
+ """
+ ref_url = urlparse(request.host_url)
+ test_url = urlparse(urljoin(request.host_url, target))
+ return test_url.scheme in ('http', 'https') and \
+ ref_url.netloc == test_url.netloc
+
+def get_redirect_target():
+ """
+ Looks at various hints to find the redirect target.
+ """
+ for target in request.args.get('next'), request.referrer:
+ if not target:
+ continue
+ if is_safe_url(target):
+ return target
+
+def allowed_file(filename):
+ """
+ Check if the uploaded file is allowed.
+ """
+ return '.' in filename and \
+ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
+
+@contextmanager
+def opened_w_error(filename, mode="r"):
+ try:
+ f = open(filename, mode)
+ except IOError as err:
+ yield None, err
+ else:
+ try:
+ yield f, None
+ finally:
+ f.close()
+
+def fetch(id, feed_id=None):
+ """
+ Fetch the feeds in a new processus.
+ The "asyncio" crawler is launched with the manager.
+ """
+ cmd = [sys.executable, conf.BASE_DIR + '/manager.py', 'fetch_asyncio',
+ str(id), str(feed_id)]
+ return subprocess.Popen(cmd, stdout=subprocess.PIPE)
+
+def history(user_id, year=None, month=None):
+ """
+ Sort articles by year and month.
+ """
+ articles_counter = Counter()
+ articles = controllers.ArticleController(user_id).read()
+ if None != year:
+ articles = articles.filter(sqlalchemy.extract('year', Article.date) == year)
+ if None != month:
+ articles = articles.filter(sqlalchemy.extract('month', Article.date) == month)
+ for article in articles.all():
+ if None != year:
+ articles_counter[article.date.month] += 1
+ else:
+ articles_counter[article.date.year] += 1
+ return articles_counter, articles
+
+def import_opml(email, opml_content):
+ """
+ Import new feeds from an OPML file.
+ """
+ user = User.query.filter(User.email == email).first()
+ try:
+ subscriptions = opml.from_string(opml_content)
+ except:
+ logger.exception("Parsing OPML file failed:")
+ raise
+
+ def read(subsubscription, nb=0):
+ """
+ Parse recursively through the categories and sub-categories.
+ """
+ for subscription in subsubscription:
+ if len(subscription) != 0:
+ nb = read(subscription, nb)
+ else:
+ try:
+ title = subscription.text
+ except:
+ title = ""
+ try:
+ description = subscription.description
+ except:
+ description = ""
+ try:
+ link = subscription.xmlUrl
+ except:
+ continue
+ if None != Feed.query.filter(Feed.user_id == user.id, Feed.link == link).first():
+ continue
+ try:
+ site_link = subscription.htmlUrl
+ except:
+ site_link = ""
+ new_feed = Feed(title=title, description=description,
+ link=link, site_link=site_link,
+ enabled=True)
+ user.feeds.append(new_feed)
+ nb += 1
+ return nb
+ nb = read(subscriptions)
+ db.session.commit()
+ return nb
+
+def import_json(email, json_content):
+ """
+ Import an account from a JSON file.
+ """
+ user = User.query.filter(User.email == email).first()
+ json_account = json.loads(json_content)
+ nb_feeds, nb_articles = 0, 0
+ # Create feeds:
+ for feed in json_account["result"]:
+ if None != Feed.query.filter(Feed.user_id == user.id,
+ Feed.link == feed["link"]).first():
+ continue
+ new_feed = Feed(title=feed["title"],
+ description="",
+ link=feed["link"],
+ site_link=feed["site_link"],
+ created_date=datetime.datetime.
+ fromtimestamp(int(feed["created_date"])),
+ enabled=feed["enabled"])
+ user.feeds.append(new_feed)
+ nb_feeds += 1
+ db.session.commit()
+ # Create articles:
+ for feed in json_account["result"]:
+ user_feed = Feed.query.filter(Feed.user_id == user.id,
+ Feed.link == feed["link"]).first()
+ if None != user_feed:
+ for article in feed["articles"]:
+ if None == Article.query.filter(Article.user_id == user.id,
+ Article.feed_id == user_feed.id,
+ Article.link == article["link"]).first():
+ new_article = Article(link=article["link"],
+ title=article["title"],
+ content=article["content"],
+ readed=article["readed"],
+ like=article["like"],
+ retrieved_date=datetime.datetime.
+ fromtimestamp(int(article["retrieved_date"])),
+ date=datetime.datetime.
+ fromtimestamp(int(article["date"])),
+ user_id=user.id,
+ feed_id=user_feed.id)
+ user_feed.articles.append(new_article)
+ nb_articles += 1
+ db.session.commit()
+ return nb_feeds, nb_articles
+
+def clean_url(url):
+ """
+ Remove utm_* parameters
+ """
+ parsed_url = urlparse(url)
+ qd = parse_qs(parsed_url.query, keep_blank_values=True)
+ filtered = dict((k, v) for k, v in qd.items()
+ if not k.startswith('utm_'))
+ return urlunparse([
+ parsed_url.scheme,
+ parsed_url.netloc,
+ urllib.parse.quote(urllib.parse.unquote(parsed_url.path)),
+ parsed_url.params,
+ urllib.parse.urlencode(filtered, doseq=True),
+ parsed_url.fragment
+ ]).rstrip('=')
+
+def load_stop_words():
+ """
+ Load the stop words and return them in a list.
+ """
+ stop_words_lists = glob.glob('./JARR/var/stop_words/*.txt')
+ stop_words = []
+
+ for stop_wods_list in stop_words_lists:
+ with opened_w_error(stop_wods_list, "r") as (stop_wods_file, err):
+ if err:
+ stop_words = []
+ else:
+ stop_words += stop_wods_file.read().split(";")
+ return stop_words
+
+def top_words(articles, n=10, size=5):
+ """
+ Return the n most frequent words in a list.
+ """
+ stop_words = load_stop_words()
+ words = Counter()
+ wordre = re.compile(r'\b\w{%s,}\b' % size, re.I)
+ for article in articles:
+ for word in [elem.lower() for elem in
+ wordre.findall(clear_string(article.content)) \
+ if elem.lower() not in stop_words]:
+ words[word] += 1
+ return words.most_common(n)
+
+def tag_cloud(tags):
+ """
+ Generates a tags cloud.
+ """
+ tags.sort(key=operator.itemgetter(0))
+ return '\n'.join([('<font size=%d><a href="/search?query=%s" title="Count: %s">%s</a></font>' % \
+ (min(1 + count * 7 / max([tag[1] for tag in tags]), 7), word, format(count, ',d'), word)) \
+ for (word, count) in tags])
+
+if __name__ == "__main__":
+ import_opml("root@jarr.localhost", "./var/feeds_test.opml")
+ #import_opml("root@jarr.localhost", "./var/JARR.opml")
diff --git a/src/web/views/feed.py b/src/web/views/feed.py
index b6e0974a..b49495f3 100644
--- a/src/web/views/feed.py
+++ b/src/web/views/feed.py
@@ -10,7 +10,7 @@ from flask.ext.babel import gettext
from flask.ext.login import login_required, current_user
import conf
-from web import utils
+from web.lib import misc_utils, utils
from web.lib.view_utils import etag_match
from web.lib.feed_utils import construct_feed_from
from web.forms import AddFeedForm
@@ -47,8 +47,8 @@ def feed(feed_id=None):
articles = ArticleController(current_user.id) \
.read(feed_id=feed_id) \
.order_by(desc("date")).all()
- top_words = utils.top_words(articles, n=50, size=int(word_size))
- tag_cloud = utils.tag_cloud(top_words)
+ top_words = misc_utils.top_words(articles, n=50, size=int(word_size))
+ tag_cloud = misc_utils.tag_cloud(top_words)
today = datetime.now()
try:
@@ -126,7 +126,7 @@ def bookmarklet():
feed = feed_contr.create(**feed)
flash(gettext('Feed was successfully created.'), 'success')
if feed.enabled and conf.CRAWLING_METHOD == "classic":
- utils.fetch(current_user.id, feed.id)
+ misc_utils.fetch(current_user.id, feed.id)
flash(gettext("Downloading articles for the new feed..."), 'info')
return redirect(url_for('feed.form', feed_id=feed.id))
@@ -215,7 +215,7 @@ def process_form(feed_id=None):
feed_title=new_feed.title), 'success')
if conf.CRAWLING_METHOD == "classic":
- utils.fetch(current_user.id, new_feed.id)
+ misc_utils.fetch(current_user.id, new_feed.id)
flash(gettext("Downloading articles for the new feed..."), 'info')
return redirect(url_for('feed.form', feed_id=new_feed.id))
diff --git a/src/web/views/home.py b/src/web/views/home.py
index 12a06024..5a8312b4 100644
--- a/src/web/views/home.py
+++ b/src/web/views/home.py
@@ -8,7 +8,7 @@ from flask.ext.babel import gettext
import conf
from web.lib.utils import redirect_url
-from web import utils
+from web.lib import misc_utils
from web.lib.view_utils import etag_match
from web.models import Article
from web.views.common import jsonify
@@ -148,7 +148,7 @@ def fetch(feed_id=None):
"""
if conf.CRAWLING_METHOD == "classic" \
and (not conf.ON_HEROKU or current_user.is_admin):
- utils.fetch(current_user.id, feed_id)
+ misc_utils.fetch(current_user.id, feed_id)
flash(gettext("Downloading articles..."), "info")
else:
flash(gettext("The manual retrieving of news is only available " +
diff --git a/src/web/views/user.py b/src/web/views/user.py
index 8568439f..3928f5dc 100644
--- a/src/web/views/user.py
+++ b/src/web/views/user.py
@@ -7,7 +7,7 @@ from flask.ext.login import login_required, current_user
import conf
from notifications import notifications
-from web import utils
+from web.lib import misc_utils
from web.lib.user_utils import confirm_token
from web.controllers import (UserController, FeedController, ArticleController,
CategoryController)
@@ -28,13 +28,13 @@ def management():
if None != request.files.get('opmlfile', None):
# Import an OPML file
data = request.files.get('opmlfile', None)
- if not utils.allowed_file(data.filename):
+ if not misc_utils.allowed_file(data.filename):
flash(gettext('File not allowed.'), 'danger')
else:
try:
- nb = utils.import_opml(current_user.email, data.read())
+ nb = misc_utils.import_opml(current_user.email, data.read())
if conf.CRAWLING_METHOD == "classic":
- utils.fetch(current_user.email, None)
+ misc_utils.fetch(current_user.email, None)
flash(str(nb) + ' ' + gettext('feeds imported.'),
"success")
flash(gettext("Downloading articles..."), 'info')
@@ -44,11 +44,11 @@ def management():
elif None != request.files.get('jsonfile', None):
# Import an account
data = request.files.get('jsonfile', None)
- if not utils.allowed_file(data.filename):
+ if not misc_utils.allowed_file(data.filename):
flash(gettext('File not allowed.'), 'danger')
else:
try:
- nb = utils.import_json(current_user.email, data.read())
+ nb = misc_utils.import_json(current_user.email, data.read())
flash(gettext('Account imported.'), "success")
except:
flash(gettext("Impossible to import the account."),
bgstack15