about summary refs log tree commit diff
path: root/src/fead.py
blob: ac1fcd7e366e5a79bb060ff16cad275e40f9482d (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
#!/usr/bin/env python3
# Generate advert from web feeds
# Copyright (C) 2022  Nguyễn Gia Phong
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <https://www.gnu.org/licenses/>.

__version__ = '0.0.1'

from argparse import ArgumentParser, FileType
from asyncio import gather, open_connection, run
from collections import namedtuple
from datetime import datetime
from email.utils import parsedate_to_datetime
from http.client import HTTPResponse
from io import BytesIO
from operator import attrgetter
from pathlib import Path
from re import compile as regex
from sys import stdin, stdout
from textwrap import shorten
from urllib.error import HTTPError
from urllib.parse import urljoin, urlsplit
from warnings import warn
from xml.etree.ElementTree import fromstring as parse_xml

REQUEST = 'GET {} HTTP/1.0\r\nHost: {}\r\n\r\n'
HTML_TAG = regex('<.+?>')

Advert = namedtuple('Advert', ('source_title', 'source_link',
                               'title', 'link', 'time', 'summary'))


def read_urls(path):
    """Read newline-separated URLs from given file path."""
    return Path(path).read_text().splitlines()


class BytesSocket:
    """Duck socket for HTTPResponse."""
    def __init__(self, response):
        self.bytes = response

    def makefile(self, mode, *args, **kwargs):
        """Return a bytes stream."""
        assert mode == 'rb'
        return BytesIO(self.bytes)


def parse_rss_item(xml):
    """Parse given RSS item."""
    time = datetime.fromtimestamp(0)
    description = ''
    for child in xml:
        if child.tag == 'title':
            title = child.text
        elif child.tag == 'link':
            link = child.text
        elif child.tag == 'pubDate':
            time = parsedate_to_datetime(child.text)
        elif child.tag == 'description':
            description = child.text
        elif child.tag == 'content:encoded' and not description:
            description = child.text
    if not description:
        description = xml.text
    return title, link, time, description


def parse_rss(xml, title):
    """Parse given RSS feed."""
    items = []
    for child in xml:
        if child.tag == 'title':
            title = child.text
        elif child.tag == 'link':
            link = child.text
        elif child.tag == 'item':
            items.append(parse_rss_item(child))
    return title, link, items


def parse_atom_entry(xml):
    """Parse given Atom entry."""
    time = datetime.fromtimestamp(0)
    summary = ''
    for child in xml:
        if child.tag.endswith('Atom}title'):
            title = child.text
        elif child.tag.endswith('Atom}link'):
            rel = child.attrib.get('rel')
            if rel == 'alternate' or not rel: link = child.attrib['href']
        elif child.tag.endswith('Atom}published'):
            iso = child.text.replace('Z', '+00:00') # normalized
            time = datetime.fromisoformat(iso)
        elif child.tag.endswith('Atom}summary'):
            summary = child.text
        elif child.tag.endswith('Atom}content') and not summary:
            summary = child.text
    return title, link, time, summary


def parse_atom(xml, title):
    """Parse given Atom feed."""
    entries = []
    for child in xml:
        if child.tag.endswith('Atom}title'):
            title = child.text
        elif child.tag.endswith('Atom}link'):
            rel = child.attrib.get('rel')
            if rel == 'alternate' or not rel: link = child.attrib['href']
        elif child.tag.endswith('Atom}entry'):
            entries.append(parse_atom_entry(child))
    return title, link, entries


async def fetch(raw_url):
    """Fetch web feed from given URL and return it parsed."""
    url = urlsplit(raw_url)
    if url.scheme == 'https':
        reader, writer = await open_connection(url.hostname, 443, ssl=True)
    elif url.scheme == 'http':
        reader, writer = await open_connection(url.hostname, 80)
    else:
        raise ValueError(f'unsupported URL scheme: {url.scheme}')
    writer.write(REQUEST.format(url.path or '/', url.hostname).encode())
    response = HTTPResponse(BytesSocket(await reader.read()))
    writer.close()

    response.begin()
    with response:
        if response.status >= 400:
            raise HTTPError(raw_url, response.status,
                            f'{response.reason}: {raw_url}',
                            response.getheaders(), response)
        if response.status >= 300:
            location = urljoin(raw_url, response.getheader('Location'))
            warn(f'{raw_url} -> {location}',
                 type('RedirectWarning', (Warning,), {}))
            return await fetch(location)
        if response.status >= 200:
            xml = parse_xml(response.read())
            if xml.tag == 'rss':
                assert xml[0].tag == 'channel'
                src_title, src_link, items = parse_rss(xml[0], url.hostname)
            elif xml.tag.endswith('Atom}feed'):
                src_title, src_link, items = parse_atom(xml, url.hostname)
            else:
                raise ValueError(f'unsupported feed format at {raw_url}')
            return (Advert(src_title, urljoin(raw_url, src_link),
                           title, urljoin(raw_url, link),
                           time.astimezone(None), summary)
                    for title, link, time, summary in items)
        raise HTTPError(raw_url, response.status,
                        f'{response.reason}: {raw_url}',
                        response.getheaders(), response)


async def fetch_all(urls):
    """Fetch all given URLs asynchronously and return them parsed."""
    tasks = gather(*map(fetch, urls))
    try:
        return await tasks
    except:
        tasks.cancel() # structured concurrency
        raise


def select(n, ads):
    """Return n most recent ads from given iterable."""
    return sorted(ads, key=attrgetter('time'), reverse=True)[:n]


def truncate(ad, summary_length):
    """Return ad with truncated summary, whose HTML tags a stripped."""
    return ad._replace(summary=shorten(HTML_TAG.sub(ad.summary, ''),
                                       summary_length, placeholder='…'))


if __name__ == '__main__':
    parser = ArgumentParser(description='generate advert from web feeds')
    parser.add_argument('-v', '--version', action='version',
                        version=f'fead {__version__}')
    parser.add_argument('-F', '--feeds', metavar='PATH',
                        type=read_urls, default=[],
                        help='file containing newline-separated web feed URLs')
    parser.add_argument('-f', '--feed', metavar='URL',
                        action='append', dest='feeds',
                        help='addtional web feed URL (multiple use)')
    parser.add_argument('-n', '--count', metavar='N', type=int, default=3,
                        help='maximum number of ads in total (default to 3)')
    parser.add_argument('-p', '--per-feed', metavar='N', type=int, default=1,
                        help='maximum number of ads per feed (default to 1)')
    parser.add_argument('-l', '--length', metavar='N',
                        dest='len', type=int, default=256,
                        help='maximum summary length (default to 256)')
    parser.add_argument('-t', '--template', metavar='PATH',
                        type=FileType('r'), default=stdin,
                        help='template file (default to stdin)')
    parser.add_argument('-o', '--output', metavar='PATH',
                        type=FileType('w'), default=stdout,
                        help='output file (default to stdout)')
    args = parser.parse_args()

    template = args.template.read()
    args.template.close()
    for ad in select(args.count, (ad for feed in run(fetch_all(args.feeds))
                                  for ad in select(args.per_feed, feed))):
        args.output.write(template.format(**truncate(ad, args.len)._asdict()))
    args.output.close()