This commit is contained in:
Adrian Vollmer 2022-09-24 19:29:34 +02:00
commit d698771b7a
20 changed files with 1005 additions and 0 deletions

46
.gitignore vendored Normal file
View File

@ -0,0 +1,46 @@
*.py[cod]
# C extensions
*.so
# Packages
*.egg*
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
cover/
.coverage*
.tox
.venv
.pytest_cache/
.mypy_cache/
# Translations
*.mo
# Complexity
output/*.html
output/*/index.html
# pbr
AUTHORS
ChangeLog
# Editors
*~
.*.swp
.*sw?

25
LICENSE Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2022 by Adrian Vollmer <computerfluesterer@protonmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

19
README.rst Normal file
View File

@ -0,0 +1,19 @@
========================================
sphinxcontrib-sphinx-self-contained-html
========================================
.. image:: https://travis-ci.org/AdrianVollmer/sphinxcontrib-sphinx-self-contained-html.svg?branch=master
:target: https://travis-ci.org/AdrianVollmer/sphinxcontrib-sphinx-self-contained-html
A Sphinx extension that allows generating completely self-contained HTML documents
Overview
--------
Add a longer description here.
Links
-----
- Source: https://github.com/AdrianVollmer/sphinxcontrib-sphinx-self-contained-html
- Bugs: https://github.com/AdrianVollmer/sphinxcontrib-sphinx-self-contained-html/issues

1
requirements.txt Normal file
View File

@ -0,0 +1 @@
pbr

46
setup.cfg Normal file
View File

@ -0,0 +1,46 @@
[metadata]
name = self-contained-html
summary = Convert distributed HTML docs to completely self-contained HTML docs
description_file = README.rst
author = Adrian Vollmer
author_email = computerfluesterer@protonmail.com
home_page = https://github.com/AdrianVollmer/self-contained-html
classifier =
Development Status :: 3 - Alpha
Environment :: Console
Environment :: Web Environment
Framework :: Sphinx :: Extension
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Operating System :: OS Independent
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Topic :: Documentation
Topic :: Documentation :: Sphinx
Topic :: Utilities
[options.entry_points]
console_scripts =
bundle-html = src.__main__:main
[files]
packages = src
[mypy]
python_version = 3.8
show_column_numbers = True
show_error_context = True
ignore_missing_imports = True
follow_imports = skip
incremental = True
check_untyped_defs = True
warn_unused_ignores = True
[flake8]
show-source = True
builtins = unicode
max-line-length = 100

9
setup.py Normal file
View File

@ -0,0 +1,9 @@
#!/usr/bin/env python
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
)

0
src/__init__.py Normal file
View File

10
src/__main__.py Normal file
View File

@ -0,0 +1,10 @@
def main():
from .args import parse_args
args = parse_args()
from .embed import embed_assets
embed_assets(args.path)
if __name__ == "__main__":
pass

193
src/app.py Normal file
View File

@ -0,0 +1,193 @@
"""
Embed HTML assets.
It creates an HTML file that has three script tags:
1. A virtual file tree containing all assets in zipped form
2. The pako JS library to unzip the assets
3. Some boostrap code that fixes the HTML so it loads all assets from the
virtual file tree instead of the file system
TODO:
* JS dependencies like mathjax don't work
* CSS delay noticable when using fonts
* CSS files can import other files with `@import`
Author: Adrian Vollmer
"""
import base64
from fnmatch import fnmatch
import json
import logging
import os
from pathlib import Path
import zlib
SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
def embed_assets(index_file):
base_dir = os.path.dirname(index_file)
base_name = os.path.basename(index_file)
new_base_name = 'SELF_CONTAINED_' + base_name
result_file = os.path.join(base_dir, new_base_name)
file_tree = load_filetree(base_dir, exclude_pattern=new_base_name)
file_tree = json.dumps(file_tree)
logger.debug('total asset size: %d' % len(file_tree))
file_tree = deflate(file_tree)
logger.debug('total asset size (compressed): %d' % len(file_tree))
remote_resources = []
data = {
'start_page': base_name,
'file_tree': file_tree,
'remote_resources': remote_resources,
}
init_script = os.path.join(SCRIPT_PATH, 'init.js')
init_script = open(init_script, 'r').read()
init_css = os.path.join(SCRIPT_PATH, 'init.css')
init_css = open(init_css, 'r').read()
pako = os.path.join(SCRIPT_PATH, 'pako.min.js')
pako = open(pako, 'r').read()
data = json.dumps(data)
body = os.path.join(SCRIPT_PATH, 'init.html')
body = open(body, 'r').read()
result = """
<!DOCTYPE html>
<html lang="{lang}">
<head>{head}<style>{style}</style></head>
<body>{body}
<script>window.data = {data}</script>
<script>{pako} //# sourceURL=pako.js</script>
<script>{init_script} //# sourceURL=init.js</script>
</body></html>
""".format(
lang='en',
head='',
style=init_css,
data=data,
pako=pako,
body=body,
init_script=init_script,
)
with open(result_file, 'w') as fp:
fp.write(result)
return result_file
def pack_file(filename):
_, ext = os.path.splitext(filename)
ext = ext.lower()[1:]
data = open(filename, 'rb').read()
if ext == 'css':
# assuming all CSS files have names ending in '.css'
data = embed_css_resources(data, filename)
if ext in [
'png', 'jpg', 'jpeg', 'woff', 'woff2', 'eot', 'ttf', 'gif', 'ico',
]:
# JSON doesn't allow binary data
data = base64.b64encode(data)
try:
data = data.decode()
except UnicodeError:
data = base64.b64encode(data).decode()
print(len(data), filename)
return data
def deflate(data):
data = zlib.compress(data.encode())
data = base64.b64encode(data).decode()
return data
def embed_css_resources(css, rel_dir):
"""Replace url(<path>) with url(data:<mime_type>;base64, ...)"""
# TODO handle @import
# This uses some heuristics which could technically fail
import re
# Quotes are optional. But then URLs can contain escaped characters.
regex = (
rb'''(?P<url_statement>url\(['"]?(?P<url>.*?)['"]?\))'''
rb'''(\s*format\(['"](?P<format>.*?)['"]\))?'''
)
replace_rules = {}
for m in re.finditer(regex, css):
if re.match(b'''['"]?data:.*''', m['url']):
continue
path = m['url'].decode()
if '?' in path:
path = path.split('?')[0]
if '#' in path:
path = path.split('#')[0]
path = os.path.dirname(rel_dir) + '/' + path
content = open(path, 'rb').read()
# If it's binary, determine mime type and encode in base64
if m['format']:
mime_type = 'font/' + m['format'].decode()
elif path[-3:].lower() == 'eot':
mime_type = 'font/eot'
elif path[-3:].lower() == 'css':
mime_type = 'text/css'
content = embed_css_resources(content, rel_dir)
else:
from io import BytesIO
from sphinx.util.images import guess_mimetype_for_stream
mime_type = guess_mimetype_for_stream(BytesIO(content))
if not mime_type:
logger.error('Unable to determine mime type: %s' % path)
mime_type = 'application/octet-stream'
content = base64.b64encode(content)
replace_rules[m['url_statement']] = \
b'url("data:%(mime_type)s;charset=utf-8;base64, %(content)s")' % {
b'content': content,
b'mime_type': mime_type.encode(),
}
for orig, new in replace_rules.items():
css = css.replace(orig, new)
return css
def load_filetree(base_dir, exclude_pattern=None):
"""Load entire directory in a dict"""
result = {}
base_dir = Path(base_dir)
for path in base_dir.rglob('*'):
if exclude_pattern and fnmatch(path.name, exclude_pattern):
continue
if path.is_file():
key = '/' + path.relative_to(base_dir).as_posix()
result[key] = pack_file(
path.as_posix()
)
logger.debug('Packed file %s [%d]' % (key, len(result[key])))
return result

37
src/args.py Normal file
View File

@ -0,0 +1,37 @@
import argparse
try:
# importlib.metadata is present in Python 3.8 and later
import importlib.metadata as importlib_metadata
except ImportError:
# use the shim package importlib-metadata pre-3.8
import importlib_metadata as importlib_metadata
try:
__version__ = importlib_metadata.version(__package__ or __name__)
# __summary__ = importlib_metadata.metadata(__package__ or __name__)['summary']
__summary__ = "Convert distributed HTML docs to completely self-contained HTML docs"
except importlib_metadata.PackageNotFoundError:
__version__ = "??"
__summary__ = "??"
parser = argparse.ArgumentParser(
description=__summary__,
)
parser.add_argument(
'-v', '--version', action='version',
version=__version__,
)
parser.add_argument(
'path',
help='path to the root HTML file',
)
def parse_args(argv=None):
args = parser.parse_args(argv)
return args

232
src/embed.py Normal file
View File

@ -0,0 +1,232 @@
"""
Embed HTML assets.
It creates an HTML file that has three script tags:
1. A virtual file tree containing all assets in zipped form
2. The pako JS library to unzip the assets
3. Some boostrap code that fixes the HTML so it loads all assets from the
virtual file tree instead of the file system
TODO:
* JS dependencies like mathjax don't work
* CSS delay noticable when using fonts
* CSS files can import other files with `@import`
Author: Adrian Vollmer
"""
import base64
from fnmatch import fnmatch
import json
import logging
import os
from pathlib import Path
import zlib
SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
def embed_assets(index_file):
init_files = {}
for filename in [
'init.js',
'inject.js',
'init.css',
'init.html',
'pako.min.js',
]:
path = os.path.join(SCRIPT_PATH, filename)
init_files[filename] = open(path, 'r').read()
base_dir = os.path.dirname(index_file)
base_name = os.path.basename(index_file)
new_base_name = 'SELF_CONTAINED_' + base_name
result_file = os.path.join(base_dir, new_base_name)
file_tree = load_filetree(base_dir, init_files['inject.js'], exclude_pattern=new_base_name)
file_tree = json.dumps(file_tree)
logger.debug('total asset size: %d' % len(file_tree))
file_tree = deflate(file_tree)
logger.debug('total asset size (compressed): %d' % len(file_tree))
remote_resources = []
data = {
'current_path': base_name,
'file_tree': file_tree,
'remote_resources': remote_resources,
}
data = json.dumps(data)
result = """
<!DOCTYPE html>
<html>
<head><style>{style}</style></head>
<body>{body}
<script>window.data = {data}</script>
<script>{pako} //# sourceURL=pako.js</script>
<script>{init_js}</script>
</body></html>
""".format(
style=init_files['init.css'],
init_js=init_files['init.js'],
pako=init_files['pako.min.js'],
body=init_files['init.html'],
data=data,
)
with open(result_file, 'w') as fp:
fp.write(result)
return result_file
def pack_file(filename, js):
_, ext = os.path.splitext(filename)
ext = ext.lower()[1:]
data = open(filename, 'rb').read()
if ext == 'css':
# assuming all CSS files have names ending in '.css'
data = embed_css_resources(data, filename)
elif ext in [
'png', 'jpg', 'jpeg', 'woff', 'woff2', 'eot', 'ttf', 'gif', 'ico',
]:
# JSON doesn't allow binary data
data = base64.b64encode(data)
elif ext in ['html', 'htm']:
data = embed_html_resources(data, os.path.dirname(filename), js).encode()
if not isinstance(data, str):
try:
data = data.decode()
except UnicodeError:
data = base64.b64encode(data).decode()
logger.debug('loaded file: %s [%d]' % (filename, len(data)))
return data
def deflate(data):
data = zlib.compress(data.encode())
data = base64.b64encode(data).decode()
return data
def embed_html_resources(html, base_dir, js):
"""Embed fonts in preload links to avoid jumps when loading"""
# This cannot be done in JavaScript, it would be too late
# TODO set body.display = None?
import bs4
soup = bs4.BeautifulSoup(html, 'lxml')
for link in soup.select('head > link[rel="preload"][as="font"]'):
# Get filename relative to resource
filename = os.path.join(
base_dir,
link['href'], # TODO convert from POSIX
)
link['href'] = to_data_uri(filename, mime_type=link['type'])
script = soup.new_tag("script")
script.string = js
soup.find('body').append(script)
return str(soup)
def to_data_uri(filename, mime_type=None):
data = open(filename, 'br').read()
data = base64.b64encode(data)
if not mime_type:
mime_type = 'application/octet-stream'
return "data:%s;charset=utf-8;base64, %s" % (
mime_type,
data.decode(),
)
def embed_css_resources(css, filename):
"""Replace url(<path>) with url(data:<mime_type>;base64, ...)"""
# TODO handle @import
# This uses some heuristics which could technically fail
import re
# Quotes are optional. But then URLs can contain escaped characters.
regex = (
rb'''(?P<url_statement>url\(['"]?(?P<url>.*?)['"]?\))'''
rb'''(\s*format\(['"](?P<format>.*?)['"]\))?'''
)
replace_rules = {}
for m in re.finditer(regex, css):
if re.match(b'''['"]?data:.*''', m['url']):
continue
path = m['url'].decode()
if '?' in path:
path = path.split('?')[0]
if '#' in path:
path = path.split('#')[0]
path = os.path.dirname(filename) + '/' + path
content = open(path, 'rb').read()
# If it's binary, determine mime type and encode in base64
if m['format']:
mime_type = 'font/' + m['format'].decode()
elif path[-3:].lower() == 'eot':
mime_type = 'font/eot'
elif path[-3:].lower() == 'css':
mime_type = 'text/css'
content = embed_css_resources(content, filename)
else:
from io import BytesIO
from sphinx.util.images import guess_mimetype_for_stream
mime_type = guess_mimetype_for_stream(BytesIO(content))
if not mime_type:
logger.error('Unable to determine mime type: %s' % path)
mime_type = 'application/octet-stream'
content = base64.b64encode(content)
replace_rules[m['url_statement']] = \
b'url("data:%(mime_type)s;charset=utf-8;base64, %(content)s")' % {
b'content': content,
b'mime_type': mime_type.encode(),
}
for orig, new in replace_rules.items():
css = css.replace(orig, new)
return css
def load_filetree(base_dir, js, exclude_pattern=None):
"""Load entire directory in a dict"""
result = {}
base_dir = Path(base_dir)
for path in base_dir.rglob('*'):
if exclude_pattern and fnmatch(path.name, exclude_pattern):
continue
if path.is_file():
key = path.relative_to(base_dir).as_posix()
result[key] = pack_file(
path.as_posix(),
js,
)
logger.debug('Packed file %s [%d]' % (key, len(result[key])))
return result

60
src/init.css Normal file
View File

@ -0,0 +1,60 @@
#main {
/* TODO rename main to avoid collisions*/
position:fixed;
top:0; left:0; bottom:0; right:0;
width:100%;
height:100%;
border:none;
margin:0;
padding:0;
overflow:hidden;
z-index:-1;
}
.lds-ripple {
display: block;
position: relative;
width: 80px;
height: 80px;
margin: auto;
z-index:1;
}
.lds-ripple div {
position: absolute;
border: 4px solid #aaa;
opacity: 1;
border-radius: 50%;
animation: lds-ripple 1s cubic-bezier(0, 0.2, 0.8, 1) infinite;
}
.lds-ripple div:nth-child(2) {
animation-delay: -0.5s;
}
@keyframes lds-ripple {
0% {
top: 36px;
left: 36px;
width: 0;
height: 0;
opacity: 0;
}
4.9% {
top: 36px;
left: 36px;
width: 0;
height: 0;
opacity: 0;
}
5% {
top: 36px;
left: 36px;
width: 0;
height: 0;
opacity: 1;
}
100% {
top: 0px;
left: 0px;
width: 72px;
height: 72px;
opacity: 0;
}
}

1
src/init.html Normal file
View File

@ -0,0 +1 @@
<div class="lds-ripple" id="loading-indicator"><div></div><div></div></div>

88
src/init.js Normal file
View File

@ -0,0 +1,88 @@
var _ArrayBufferToBase64 = function (array_buffer) {
var binary = '';
var bytes = new Uint8Array(array_buffer);
var len = bytes.byteLength;
for (var i = 0; i < len; i++) {
binary += String.fromCharCode(bytes[i])
}
return window.btoa(binary);
};
var _base64ToArrayBuffer = function (base64) {
if (!base64) { return []}
var binary_string = window.atob(base64);
var len = binary_string.length;
var bytes = new Uint8Array(len);
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
return bytes.buffer;
};
var createIframe = function() {
var iframe = document.getElementById('main');
if (iframe) { iframe.remove() };
iframe = document.createElement("iframe");
window.document.body.prepend(iframe);
iframe.setAttribute('src', '#');
iframe.setAttribute('name', 'main');
iframe.setAttribute('id', 'main');
iframe.style.display = 'none';
return iframe;
}
var load_virtual_page = (function (path, get_params, anchor) {
const data = window.data.file_tree[path];
var iframe = createIframe();
if (get_params) { iframe.src = path + '?' + get_params; }
else { iframe.src = path; }
iframe.contentDocument.write(data);
if (anchor) { iframe.contentDocument.location.hash = anchor; }
window.data.current_path = path;
});
window.onload = function() {
// Set up the virtual file tree
var FT = window.data.file_tree;
FT = _base64ToArrayBuffer(FT);
FT = pako.inflate(FT)
FT = new TextDecoder("utf-8").decode(FT);
FT = JSON.parse(FT);
window.data.file_tree = FT;
// Set up message listener
window.addEventListener("message", (evnt) => {
console.log("Received message in parent", evnt);
if (evnt.data.action == 'set_title') {
// iframe has finished loading and sent us its title
// parent sets the title and responds with the data object
window.document.title = evnt.data.argument;
var iframe = document.getElementById('main');
iframe.contentWindow.postMessage({
action: "set_data",
argument: window.data,
}, "*");
} else if (evnt.data.action == 'virtual_click') {
// user has clicked on a link in the iframe
var iframe = document.getElementById('main');
iframe.remove()
var loading = document.getElementById('loading-indicator');
loading.style.display = '';
load_virtual_page(
evnt.data.argument.path,
evnt.data.argument.get_parameters,
evnt.data.argument.anchor,
);
} else if (evnt.data.action == 'show_iframe') {
// iframe finished fixing the document
var iframe = document.getElementById('main');
iframe.style.display = '';
var loading = document.getElementById('loading-indicator');
loading.style.display = 'none';
}
}, false);
// Load first page
load_virtual_page(window.data.current_path, "", "");
}

185
src/inject.js Normal file
View File

@ -0,0 +1,185 @@
var embed_css = function(origin) {
Array.from(document.querySelectorAll("link")).forEach( link => {
if (link.getAttribute('rel') == 'stylesheet') {
const style = document.createElement("style");
var href = link.getAttribute('href');
let [path, get_parameters, anchor] = split_url(href);
path = normalize_path(path);
style.innerText = retrieve_file(path);
link.replaceWith(style);
};
});
};
var embed_js = function(origin) {
Array.from(document.querySelectorAll("script")).forEach( oldScript => {
const newScript = document.createElement("script");
Array.from(oldScript.attributes).forEach( attr => {
newScript.setAttribute(attr.name, attr.value);
});
try {
if (newScript.hasAttribute('src') && is_virtual(newScript.getAttribute('src'))) {
var src = newScript.getAttribute('src');
let [path, get_parameters, anchor] = split_url(src);
path = normalize_path(path);
var src = retrieve_file(path);
newScript.appendChild(document.createTextNode(src));
newScript.removeAttribute('src');
oldScript.parentNode.replaceChild(newScript, oldScript);
}
} catch (e) {
// Make sure all scripts are loaded
console.error(e);
}
});
};
var split_url = function(url) {
// Return a list of three elements: path, GET parameters, anchor
var anchor = url.split('#')[1] || "";
var get_parameters = url.split('#')[0].split('?')[1] || "";
var path = url.split('#')[0];
path = path.split('?')[0];
let result = [path, get_parameters, anchor];
// console.log("Split URL", url, result);
return result;
}
var virtual_click = function(evnt) {
// Handle GET parameters and anchors
console.log("Virtual click", evnt);
var a = evnt.currentTarget;
let [path, get_parameters, anchor] = split_url(a.getAttribute('href'));
path = normalize_path(path);
window.parent.postMessage({
action: "virtual_click",
argument: {
path: path,
get_parameters: get_parameters,
anchor: anchor,
}
}, '*');
evnt.preventDefault();
evnt.stopPropagation();
return false;
};
var fix_links = function(origin) {
Array.from(document.querySelectorAll("a")).forEach( a => {
if (is_virtual(a.getAttribute('href'))) {
a.addEventListener('click', virtual_click);
}
});
};
var fix_forms = function(origin) {
Array.from(document.querySelectorAll("form")).forEach( form => {
var href = form.getAttribute('action');
if (is_virtual(href)) {
// TODO test this
// let [path, get_parameters, anchor] = split_url(href);
// path = normalize_path(path);
// var new_href = to_blob(retrieve_file(path), 'text/html');
// if (get_parameters) { new_href += '?' + get_parameters; }
// if (anchor) { new_href += '?' + anchor; }
// form.action = new_href;
}
});
};
var embed_img = function(origin) {
Array.from(document.querySelectorAll("img")).forEach( img => {
if (img.hasAttribute('src')) {
const src = img.getAttribute('src');
if (is_virtual(src)) {
var path = normalize_path(src);
const file = retrieve_file(path);
// TODO handle mime type
if (file.startsWith('<svg')) {
img.setAttribute('src', "data:image/svg+xml;charset=utf-8;base64, " + btoa(file));
} else {
img.setAttribute('src', "data:image/png;base64, " + file);
}
};
};
});
};
var is_virtual = function(url) {
// Return true if the url should be retrieved from the virtual file tree
var _url = url.toString().toLowerCase();
return (! (
_url == "" ||
_url[0] == "#" ||
_url.startsWith('https://') ||
_url.startsWith('http://') ||
_url.startsWith('data:') ||
_url.startsWith('blob:')
));
};
var retrieve_file = function(path) {
// console.log("Retrieving file: " + path);
var file_tree = window.data.file_tree;
var file = file_tree[path];
return file;
};
var normalize_path = function(path) {
// make relative paths absolute
var result = window.data.current_path;
result = result.split('/');
result.pop();
result = result.concat(path.split('/'));
// resolve relative directories
var array = [];
Array.from(result).forEach( component => {
if (component == '..') {
if (array) {
array.pop();
}
} else if (component == '.') {
} else {
if (component) { array.push(component); }
}
});
result = array.join('/');
// console.log(`Normalized path: ${path} -> ${result} (@${window.data.current_path})`);
return result;
};
// Set up message listener
window.addEventListener("message", (evnt) => {
console.log("Received message in iframe", evnt);
if (evnt.data.action == 'set_data') {
window.data = evnt.data.argument;
console.log("Received data from parent", window.data);
// dynamically fix elements on this page
try {
embed_css();
fix_links();
fix_forms();
embed_img();
embed_js();
} finally {
window.parent.postMessage({
action: "show_iframe",
argument: "",
}, '*');
}
}
}, false);
// Set parent window title
window.parent.postMessage({
action: "set_title",
argument: window.document.querySelector('head>title').innerText
}, '*');

2
src/pako.min.js vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,8 @@
def setup(app):
app.add_config_value('html_embed_assets', False, 'html')
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}

2
test-requirements.txt Normal file
View File

@ -0,0 +1,2 @@
pytest
sphinx

9
tests/conftest.py Normal file
View File

@ -0,0 +1,9 @@
"""
pytest config for sphinxcontrib/sphinx-self-contained-html/tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2017 by Adrian Vollmer <computerfluesterer@protonmail.com>
:license: BSD, see LICENSE for details.
"""
pytest_plugins = 'sphinx.testing.fixtures'

32
tox.ini Normal file
View File

@ -0,0 +1,32 @@
[tox]
minversion = 2.0
envlist = py{34,35,36,py},style
[testenv]
deps = -r{toxinidir}/test-requirements.txt
commands=
pytest
[testenv:mypy]
description =
Run type checks.
deps =
mypy
commands=
mypy sphinxcontrib
[testenv:style]
description =
Run style checks.
deps =
flake8
isort
yapf
commands =
isort -rc -c -df sphinxcontrib tests
yapf -rd sphinxcontrib tests
flake8 sphinxcontrib tests setup.py
[travis]
python =
3.6: py36, mypy, style