aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpg <phuong@example.com>2025-06-22 10:18:42 +0900
committerpg <phuong@example.com>2025-06-22 11:06:40 +0900
commit026a991233d30579d62c5b4deac36a5d05324e2e (patch)
tree6f4a3bd96ea27562f4d0c7c75598891dd72f61da
parentb102f8e660c40f074419a03c7b0615b5cbead0bb (diff)
downloadscadere-026a991233d30579d62c5b4deac36a5d05324e2e.tar.gz
Make feeds viewable in browsers0.2.0
-rw-r--r--src/scadere/__init__.py13
-rw-r--r--src/scadere/atom2xhtml.xslt49
-rw-r--r--src/scadere/listen.py40
-rw-r--r--tst/test_listen.py19
4 files changed, 104 insertions, 17 deletions
diff --git a/src/scadere/__init__.py b/src/scadere/__init__.py
index 823e1a9..157166e 100644
--- a/src/scadere/__init__.py
+++ b/src/scadere/__init__.py
@@ -4,11 +4,12 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
from argparse import HelpFormatter, ONE_OR_MORE
+from functools import cache
+from importlib.resources import files
__all__ = ['__version__', 'GNUHelpFormatter', 'NetLoc',
- 'format_epilog', 'format_version']
-__version__ = '0.1.3'
-
+ 'atom2xhtml', 'format_epilog', 'format_version']
+__version__ = '0.2.0'
EXAMPLE_PREFIX = ' ' * 2
# help2man's implementation detail
@@ -80,6 +81,12 @@ class NetLoc:
return hostname, int(port) # ValueError to be handled by argparse
+@cache
+def atom2xhtml():
+ """Load stylesheet from package resources exactly once."""
+ return files(__name__).joinpath('atom2xhtml.xslt').read_bytes()
+
+
def format_epilog(examples):
"""Format example commands and their description ."""
lines = ['Examples:']
diff --git a/src/scadere/atom2xhtml.xslt b/src/scadere/atom2xhtml.xslt
new file mode 100644
index 0000000..5bcc864
--- /dev/null
+++ b/src/scadere/atom2xhtml.xslt
@@ -0,0 +1,49 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!--
+ -- Atom-to-XHTML transformation
+ --
+ -- SPDX-FileCopyrightText: 2025 Nguyễn Gia Phong
+ -- SPDX-License-Identifier: AGPL-3.0-or-later
+ -->
+<xsl:stylesheet version='1.0'
+ xmlns:atom='http://www.w3.org/2005/Atom'
+ xmlns:xsl='http://www.w3.org/1999/XSL/Transform'>
+ <xsl:output method='html' version='1.0' encoding='UTF-8' indent='yes'/>
+
+ <xsl:template match="atom:generator">
+ <p>
+ <xsl:text>Generated by </xsl:text>
+ <a href='{@uri}'>
+ <xsl:value-of select='.'/>
+ </a>
+ <xsl:text> </xsl:text>
+ <xsl:value-of select='@version'/>
+ </p>
+ </xsl:template>
+
+ <xsl:template match="/atom:feed">
+ <xsl:variable name='home' select='link'/>
+ <html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta charset='utf-8'/>
+ <meta name='color-scheme' content='dark light'/>
+ <meta name='viewport' content='width=device-width, initial-scale=1'/>
+ <link rel='icon' href='data:,'/>
+ <title><xsl:value-of select='atom:title'/></title>
+ </head>
+ <body>
+ <h1><xsl:value-of select='atom:title'/></h1>
+ <ul>
+ <xsl:for-each select='atom:entry'>
+ <li>
+ <a href='{atom:link[@type="application/xhtml+xml"]/@href}'>
+ <xsl:value-of select='atom:title'/>
+ </a>
+ </li>
+ </xsl:for-each>
+ </ul>
+ <xsl:apply-templates select='atom:generator'/>
+ </body>
+ </html>
+ </xsl:template>
+</xsl:stylesheet>
diff --git a/src/scadere/listen.py b/src/scadere/listen.py
index 151a108..dcf4754 100644
--- a/src/scadere/listen.py
+++ b/src/scadere/listen.py
@@ -22,7 +22,7 @@ from xml.etree.ElementTree import (Element as xml_element,
indent, tostring as string_from_xml)
from . import (__version__, GNUHelpFormatter, NetLoc,
- format_epilog, format_version)
+ atom2xhtml, format_epilog, format_version)
from .check import base64_from_str
__all__ = ['main']
@@ -145,7 +145,7 @@ def is_subdomain(subject, objects):
for obj_parts in map(split_domain, objects))
-def feed(mtime, base_url, name, certificates, domains):
+def feed(base_url, name, mtime, certificates, domains):
"""Construct an Atom feed based on the given information."""
return ('feed', {'xmlns': 'http://www.w3.org/2005/Atom'},
('id', base_url),
@@ -202,17 +202,24 @@ def xml(tree, parent=None):
@lru_cache
-def unparsed_feed(*args):
+def atom2xhtml_url(base_url):
+ """Return the URL to the immutable stylesheet."""
+ return urljoin(base_url, f'{urlsplit(base_url).path}{__version__}.xslt')
+
+
+@lru_cache
+def unparsed_feed(base_url, *args):
"""Cache Atom feed."""
- return string_from_xml(xml(feed(*args)), 'unicode',
- xml_declaration=True, default_namespace=None)
+ return (b'<?xml version="1.0" encoding="utf-8"?>\n'
+ b'<?xml-stylesheet type="text/xsl"'
+ + f' href="{atom2xhtml_url(base_url)}"?>\n'.encode()
+ + string_from_xml(xml(feed(base_url, *args)), 'utf-8'))
@lru_cache
def unparsed_page(*args):
"""Cache XHTML page."""
- return string_from_xml(xml(page(*args)), 'unicode',
- xml_declaration=True, default_namespace=None)
+ return string_from_xml(xml(page(*args)), 'utf-8', xml_declaration=True)
@lru_cache
@@ -221,10 +228,10 @@ def set_http_time_locale():
setlocale(LC_TIME, 'C')
-def write_xml(writer, http_version, application, func, mtime, *args):
+def write_xml(writer, http_version, application, func, *args, mtime=None):
"""Write given document as XML."""
try:
- content = func(mtime, *args).encode()
+ content = func(*args)
except Exception: # pragma: no cover
describe_status(writer, HTTPStatus.INTERNAL_SERVER_ERROR, http_version)
raise
@@ -232,9 +239,13 @@ def write_xml(writer, http_version, application, func, mtime, *args):
write_status(writer, http_version, HTTPStatus.OK)
write_content_type(writer, f'application/{application}+xml')
writer.write(f'Content-Length: {len(content)}\r\n'.encode())
- set_http_time_locale()
- http_time = mtime.strftime('%a, %d %b %Y %H:%M:%S GMT')
- writer.write(f'Last-Modified: {http_time}\r\n\r\n'.encode())
+ if mtime is None:
+ writer.write(b'Cache-Control: public,'
+ b' max-age: 31536000, immutable\r\n\r\n')
+ else:
+ set_http_time_locale()
+ http_time = mtime.strftime('%a, %d %b %Y %H:%M:%S GMT')
+ writer.write(f'Last-Modified: {http_time}\r\n\r\n'.encode())
writer.write(content)
@@ -278,7 +289,10 @@ async def handle(certs, base_url, reader, writer, title=''):
if url_parts.path == urlsplit(base_url).path: # Atom feed
write_xml(writer, http_version, 'atom', unparsed_feed,
- mtime, base_url, title or certs.name, summaries, domains)
+ base_url, title or certs.name, mtime, summaries, domains,
+ mtime=mtime)
+ elif url_parts.path == urlsplit(atom2xhtml_url(base_url)).path:
+ write_xml(writer, http_version, 'xslt', atom2xhtml)
elif url_parts.path in lookup: # accessible Atom entry's link/ID
write_xml(writer, http_version, 'xhtml', unparsed_page,
*lookup.get(url_parts.path))
diff --git a/tst/test_listen.py b/tst/test_listen.py
index 61929c8..d13475d 100644
--- a/tst/test_listen.py
+++ b/tst/test_listen.py
@@ -24,6 +24,7 @@ from hypothesis.strategies import (booleans, composite, data,
from hypothesis.provisional import domains, urls
from pytest import raises
+from scadere import __version__, atom2xhtml
from scadere.check import base64_from_str, printable
from scadere.listen import (handle, is_subdomain, path, parse_summary,
str_from_base64, with_trailing_slash, xml)
@@ -172,6 +173,21 @@ async def fetch_xml(socket, url, content_type):
return XML(content.decode(), xml_parser)
+async def check_atom2xhtml(socket, url):
+ """Check if socket serves atom2xhtml stylesheet at given HTTP URL."""
+ header_parser = BytesHeaderParser()
+ async with connect(socket) as (reader, writer):
+ await write_request(writer, f'GET {url} HTTP/1.1\r\n')
+ status = await reader.readuntil(b'\r\n')
+ assert status == b'HTTP/1.1 200 OK\r\n'
+ headers_bytes = await reader.readuntil(b'\r\n\r\n')
+ headers = header_parser.parsebytes(headers_bytes)
+ assert headers['Content-Type'] == 'application/xslt+xml'
+ content = await reader.read()
+ assert len(content) == int(headers['Content-Length'])
+ assert content == atom2xhtml()
+
+
def equal_xml(a, b):
"""Check if the two XML elements are equal."""
a_copy, b_copy = deepcopy(a), deepcopy(b)
@@ -181,8 +197,9 @@ def equal_xml(a, b):
async def check_feed(socket, base_url):
- """Check the Atom feed at the given path and its entry pages."""
+ """Check the Atom feed, its stylesheet, and entry pages."""
feed = await fetch_xml(socket, base_url, 'application/atom+xml')
+ await check_atom2xhtml(socket, f'{base_url}{__version__}.xslt')
for entry in feed.findall('entry', ATOM_NAMESPACES):
link = entry.find('link', ATOM_NAMESPACES).attrib
assert link['rel'] == 'alternate'