1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
|
#! /usr/bin/env python
#-*- coding: utf-8 -*-
# Newspipe - A Web based news aggregator.
# Copyright (C) 2010-2016 Cédric Bonhomme - https://www.cedricbonhomme.org
#
# For more information : https://github.com/newspipe/newspipe
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 1.10 $"
__date__ = "$Date: 2010/12/07 $"
__revision__ = "$Date: 2016/11/22 $"
__copyright__ = "Copyright (c) Cedric Bonhomme"
__license__ = "AGPLv3"
import re
import os
import sys
import glob
import json
import logging
import operator
import urllib
import subprocess
import sqlalchemy
try:
from urlparse import urlparse, parse_qs, urlunparse
except:
from urllib.parse import urlparse, parse_qs, urlunparse, urljoin
from collections import Counter
from contextlib import contextmanager
from flask import request
import conf
from web.controllers import ArticleController
from lib.utils import clear_string
logger = logging.getLogger(__name__)
ALLOWED_EXTENSIONS = set(['xml', 'opml', 'json'])
def is_safe_url(target):
"""
Ensures that a redirect target will lead to the same server.
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
def get_redirect_target():
"""
Looks at various hints to find the redirect target.
"""
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return target
def allowed_file(filename):
"""
Check if the uploaded file is allowed.
"""
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@contextmanager
def opened_w_error(filename, mode="r"):
try:
f = open(filename, mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def fetch(id, feed_id=None):
"""
Fetch the feeds in a new processus.
The default crawler ("asyncio") is launched with the manager.
"""
cmd = [sys.executable, conf.BASE_DIR + '/manager.py', 'fetch_asyncio',
'--user_id='+str(id)]
if feed_id:
cmd.append('--feed_id='+str(feed_id))
return subprocess.Popen(cmd, stdout=subprocess.PIPE)
def history(user_id, year=None, month=None):
"""
Sort articles by year and month.
"""
articles_counter = Counter()
articles = ArticleController(user_id).read()
if None != year:
articles = articles.filter(sqlalchemy.extract('year', 'Article.date') == year)
if None != month:
articles = articles.filter(sqlalchemy.extract('month', 'Article.date') == month)
for article in articles.all():
if None != year:
articles_counter[article.date.month] += 1
else:
articles_counter[article.date.year] += 1
return articles_counter, articles
def clean_url(url):
"""
Remove utm_* parameters
"""
parsed_url = urlparse(url)
qd = parse_qs(parsed_url.query, keep_blank_values=True)
filtered = dict((k, v) for k, v in qd.items()
if not k.startswith('utm_'))
return urlunparse([
parsed_url.scheme,
parsed_url.netloc,
urllib.parse.quote(urllib.parse.unquote(parsed_url.path)),
parsed_url.params,
urllib.parse.urlencode(filtered, doseq=True),
parsed_url.fragment
]).rstrip('=')
def load_stop_words():
"""
Load the stop words and return them in a list.
"""
stop_words_lists = glob.glob(os.path.join(conf.BASE_DIR,
'web/var/stop_words/*.txt'))
stop_words = []
for stop_wods_list in stop_words_lists:
with opened_w_error(stop_wods_list, "r") as (stop_wods_file, err):
if err:
stop_words = []
else:
stop_words += stop_wods_file.read().split(";")
return stop_words
def top_words(articles, n=10, size=5):
"""
Return the n most frequent words in a list.
"""
stop_words = load_stop_words()
words = Counter()
wordre = re.compile(r'\b\w{%s,}\b' % size, re.I)
for article in articles:
for word in [elem.lower() for elem in
wordre.findall(clear_string(article.content)) \
if elem.lower() not in stop_words]:
words[word] += 1
return words.most_common(n)
def tag_cloud(tags):
"""
Generates a tags cloud.
"""
tags.sort(key=operator.itemgetter(0))
return '\n'.join([('<font size=%d>%s</font>' % \
(min(1 + count * 7 / max([tag[1] for tag in tags]), 7), word)) \
for (word, count) in tags])
|