set -e
-curdir=`pwd`
+srcdir=`pwd`/processor
mkdir -p "$1"
cd "$1"
-for file in "$curdir"/*.do "$curdir"/helpers.sh "$curdir"/intermediate.tmpl; do
+for file in "$srcdir"/*; do
set +e
ln -s "$file"
set -e
+++ /dev/null
-#!/bin/sh
-
-# Remove target files for which no sources files can be found.
-for file in *.intermediate; do
- if test -f "$file" &&
- ! test -f "${file%.intermediate}.md" &&
- ! test -f "${file%.intermediate}.rst"; then
- rm "$file"
- fi
-done
-for file in *.uuid; do
- if test -f "$file" &&
- ! test -f "${file%.uuid}.md" &&
- ! test -f "${file%.uuid}.rst"; then
- rm "$file"
- fi
-done
-for file in *.html; do
- if test -f "$file" &&
- ! test "$file" = "index.html" &&
- ! test -f "${file%.html}.intermediate"; then
- rm "$file"
- fi
-done
-
-# Determine target files from the sources files present, declare dependencies
-# of the all.do script on them / build them if necessary.
-for file in *.rst *.md; do
- if test -f "$file"; then
- redo-ifchange "${file%.*}.intermediate"
- fi
-done
-for file in *.intermediate; do
- if test -f "$file"; then
- redo-ifchange "${file%.*}.html"
- fi
-done
-
-# Regenerate feed and index pages. Always.
-redo "feed.xml"
-redo "index.html"
+++ /dev/null
-#!/bin/sh
-
-if [ ! -f "$1" ]; then
- printf "Joe Sixpack"
-fi
+++ /dev/null
-#!/bin/sh
-
-# Pull in global dependencies.
-. ./helpers.sh
-intermediate_file="${1%.html}.intermediate"
-redo-ifchange title
-redo-ifchange "$intermediate_file"
-
-# Build entry data.
-blog_title=`read_and_escape_file title | head -1`
-title_html=`cat "$intermediate_file" | head -1`
-title_plaintext=`echo "$title_html" | html2text`
-title_plaintext_escaped=`escape_html "$title_plaintext"`
-body=`cat "$intermediate_file" | sed 1d`
-
-# Write first part of entry head.
-cat << EOF
-<!DOCTYPE html>
-<html>
-<head>
-EOF
-
-# Write remaining entry head and body.
-printf "<title>%s – %s</title>\n</head>\n<body>\n" "$blog_title" "$title_plaintext_escaped"
-#printf "<title>%s – %s</title>\n</head>\n<body>\n" "$blog_title" "$entry_title"
-printf "<h1>%s</h1>\n" "$title_html"
-printf "<section>\n%s\n</section>\n</body>\n</html>" "$body"
+++ /dev/null
-#!/bin/sh
-
-template=intermediate.tmpl
-uuidfile="${1%.intermediate}.uuid"
-redo-ifchange "$uuidfile"
-redo-ifchange "$template"
-mdfile="${1%.intermediate}.md"
-rstfile="${1%.intermediate}.rst"
-if [ -f "$rstfile" ]; then
- redo-ifchange "$rstfile"
- pandoc -f rst --template="$template" --mathml -t html5 "$rstfile" > "$3"
-elif [ -f "$mdfile" ]; then
- redo-ifchange "$mdfile"
- pandoc -f markdown --template="$template" --mathml -t html5 "$mdfile" > "$3"
-fi
+++ /dev/null
-#!/bin/sh
-
-if [ ! -f "$1" ]; then
- uuidgen > "$1"
-fi
+++ /dev/null
-#!/bin/sh
-
-# Pull in global dependencies.
-. ./helpers.sh
-redo-ifchange url
-redo-ifchange author
-redo-ifchange uuid
-redo-ifchange title
-
-# Build some variables. XML-escape even file contents that should not contain
-# dangerous characters, just to avoid any XML trouble.
-base_url=`cat url | head -1`
-url_protocol=`echo $base_url | cut -d ':' -f 1`
-url_basepath=`echo $base_url | cut -d '/' -f 3-`
-url_basepath_escaped=`escape_url "$url_basepath"`
-basepath="$url_protocol""://""$url_basepath_escaped"
-title=`read_and_escape_file title | head -1`
-author=`read_and_escape_file author | head -1`
-uuid=`read_and_escape_file uuid | head -1`
-
-# Write majority of feed head.
-cat << EOF
-<?xml version="1.0" encoding="utf-8"?>
-<feed xmlns="http://www.w3.org/2005/Atom">
-EOF
-printf "<link href=\"%s\" />\n" "$basepath"
-printf "<link href=\"%sfeed.xml\" rel=\"self\" />\n" "$basepath"
-printf "<title type=\"html\">%s</title>\n" "$title"
-printf "<author><name>%s</name></author>\n" "$author"
-printf "<id>urn:uuid:%s</id>\n" "$uuid"
-
-# Iterate through most recent entries (go by lastmod date of source files) to
-# build feed head "updated" element, and individual entries.
-first_run=0
-files=`ls -1t *.rst *.md | head -10 | tr '\n' $'\0'`
-oldIFS="$IFS"
-IFS=$'\0'
-for file in $files; do
- lastmod=`stat -c%y "$file"`
- lastmod_rfc3339=`date -u "+%Y-%m-%dT%TZ" -d "$lastmod"`
- if [ "$first_run" -lt "1" ]; then
- IFS="$oldIFS"
- printf "<updated>%s</updated>\n\n" "$lastmod_rfc3339"
- first_run=1
- fi
-
- # Build some variables and dependencies.
- intermediate_file="${file%.*}.intermediate"
- htmlfile=`escape_url "${file%.*}.html"`
- redo-ifchange "$intermediate_file"
- redo-ifchange "$uuidfile"
- title=`read_and_escape_file "$intermediate_file" | head -1`
- uuidfile="${file%.*}.uuid"
- uuid=`read_and_escape_file "$uuidfile" | head -1`
- body=`read_and_escape_file "$intermediate_file" | sed 1d`
- published=`stat -c%y "$uuidfile"`
- published_rfc3339=`date -u "+%Y-%m-%dT%TZ" -d "$published"`
-
- # Write entry.
- printf "<entry>\n"
- printf "<title type=\"html\">%s</title>\n" "$title"
- printf "<id>urn:uuid:%s</id>\n" "$uuid"
- printf "<updated>%s</updated>\n" "$lastmod_rfc3339"
- printf "<published>%s</published>\n" "$published_rfc3339"
- printf "<link href=\"%s%s\" />\n" "$basepath" "$htmlfile"
- printf "<content type=\"html\">\n%s\n</content>\n" "$body"
- printf "</entry>\n\n"
-done
-
-printf "</feed>"
+++ /dev/null
-#!/bin/sh
-
-escape_html()
-{
-out=`python3 -c 'import sys, html; print(html.escape(sys.argv[1]))' "$1"`
-printf "%s" "$out"
-}
-
-read_and_escape_file()
-{
-in=`cat "$1"`
-escape_html "$in"
-}
-
-escape_url()
-{
-out=`python3 -c 'import sys, urllib.parse; print(urllib.parse.quote(sys.argv[1]))' "$1"`
-printf "%s" "$out"
-}
+++ /dev/null
-#!/bin/sh
-
-# Pull in global dependencies.
-. ./helpers.sh
-redo-ifchange title
-
-# Write index head.
-cat << EOF
-<!DOCTYPE html>
-<html>
-<head>
-EOF
-blog_title=`read_and_escape_file title | head -1`
-printf "<title>%s</title>\n</head>\n<body>\n" "$blog_title"
-printf "<h1>%s</h1>\n<ul>\n" "$blog_title"
-
-# Iterate through entries sorted by lastmod of their source files, write entry
-# list.
-first_run=0
-files=`ls -1t *.rst *.md | tr '\n' $'\0'`
-oldIFS="$IFS"
-IFS=$'\0'
-for file in $files; do
- if [ "$first_run" -lt "1" ]; then
- IFS="$oldIFS"
- first_run=1
- fi
- intermediate_file="${file%.*}.intermediate"
- html_file="${file%.*}.html"
- redo-ifchange "$intermediate_file"
- redo-ifchange "$html_file"
- title_html=`cat "$intermediate_file" | head -1`
- html_file_escaped=`escape_url "$html_file"`
- printf "<li><a href=\"%s\" />%s</a></li>\n" "$html_file_escaped" "$title_html"
-done
-
-printf "</ul>\n</body>\n</html>"
+++ /dev/null
-$title$
-$body$
--- /dev/null
+#!/bin/sh
+
+# Remove target files for which no sources files can be found.
+for file in *.intermediate; do
+ if test -f "$file" &&
+ ! test -f "${file%.intermediate}.md" &&
+ ! test -f "${file%.intermediate}.rst"; then
+ rm "$file"
+ fi
+done
+for file in *.uuid; do
+ if test -f "$file" &&
+ ! test -f "${file%.uuid}.md" &&
+ ! test -f "${file%.uuid}.rst"; then
+ rm "$file"
+ fi
+done
+for file in *.html; do
+ if test -f "$file" &&
+ ! test "$file" = "index.html" &&
+ ! test -f "${file%.html}.intermediate"; then
+ rm "$file"
+ fi
+done
+
+# Determine target files from the sources files present, declare dependencies
+# of the all.do script on them / build them if necessary.
+for file in *.rst *.md; do
+ if test -f "$file"; then
+ redo-ifchange "${file%.*}.intermediate"
+ fi
+done
+for file in *.intermediate; do
+ if test -f "$file"; then
+ redo-ifchange "${file%.*}.html"
+ fi
+done
+
+# Regenerate feed and index pages. Always.
+redo "feed.xml"
+redo "index.html"
--- /dev/null
+#!/bin/sh
+
+if [ ! -f "$1" ]; then
+ printf "Joe Sixpack"
+fi
--- /dev/null
+#!/bin/sh
+
+# Pull in global dependencies.
+. ./helpers.sh
+intermediate_file="${1%.html}.intermediate"
+redo-ifchange title
+redo-ifchange "$intermediate_file"
+
+# Build entry data.
+blog_title=`read_and_escape_file title | head -1`
+title_html=`cat "$intermediate_file" | head -1`
+title_plaintext=`echo "$title_html" | html2text`
+title_plaintext_escaped=`escape_html "$title_plaintext"`
+body=`cat "$intermediate_file" | sed 1d`
+
+# Write first part of entry head.
+cat << EOF
+<!DOCTYPE html>
+<html>
+<head>
+EOF
+
+# Write remaining entry head and body.
+printf "<title>%s – %s</title>\n</head>\n<body>\n" "$blog_title" "$title_plaintext_escaped"
+#printf "<title>%s – %s</title>\n</head>\n<body>\n" "$blog_title" "$entry_title"
+printf "<h1>%s</h1>\n" "$title_html"
+printf "<section>\n%s\n</section>\n</body>\n</html>" "$body"
--- /dev/null
+#!/bin/sh
+
+template=intermediate.tmpl
+uuidfile="${1%.intermediate}.uuid"
+redo-ifchange "$uuidfile"
+redo-ifchange "$template"
+mdfile="${1%.intermediate}.md"
+rstfile="${1%.intermediate}.rst"
+if [ -f "$rstfile" ]; then
+ redo-ifchange "$rstfile"
+ pandoc -f rst --template="$template" --mathml -t html5 "$rstfile" > "$3"
+elif [ -f "$mdfile" ]; then
+ redo-ifchange "$mdfile"
+ pandoc -f markdown --template="$template" --mathml -t html5 "$mdfile" > "$3"
+fi
--- /dev/null
+#!/bin/sh
+
+if [ ! -f "$1" ]; then
+ uuidgen > "$1"
+fi
--- /dev/null
+#!/bin/sh
+
+# Pull in global dependencies.
+. ./helpers.sh
+redo-ifchange url
+redo-ifchange author
+redo-ifchange uuid
+redo-ifchange title
+
+# Build some variables. XML-escape even file contents that should not contain
+# dangerous characters, just to avoid any XML trouble.
+base_url=`cat url | head -1`
+url_protocol=`echo $base_url | cut -d ':' -f 1`
+url_basepath=`echo $base_url | cut -d '/' -f 3-`
+url_basepath_escaped=`escape_url "$url_basepath"`
+basepath="$url_protocol""://""$url_basepath_escaped"
+title=`read_and_escape_file title | head -1`
+author=`read_and_escape_file author | head -1`
+uuid=`read_and_escape_file uuid | head -1`
+
+# Write majority of feed head.
+cat << EOF
+<?xml version="1.0" encoding="utf-8"?>
+<feed xmlns="http://www.w3.org/2005/Atom">
+EOF
+printf "<link href=\"%s\" />\n" "$basepath"
+printf "<link href=\"%sfeed.xml\" rel=\"self\" />\n" "$basepath"
+printf "<title type=\"html\">%s</title>\n" "$title"
+printf "<author><name>%s</name></author>\n" "$author"
+printf "<id>urn:uuid:%s</id>\n" "$uuid"
+
+# Iterate through most recent entries (go by lastmod date of source files) to
+# build feed head "updated" element, and individual entries.
+first_run=0
+files=`ls -1t *.rst *.md | head -10 | tr '\n' $'\0'`
+oldIFS="$IFS"
+IFS=$'\0'
+for file in $files; do
+ lastmod=`stat -c%y "$file"`
+ lastmod_rfc3339=`date -u "+%Y-%m-%dT%TZ" -d "$lastmod"`
+ if [ "$first_run" -lt "1" ]; then
+ IFS="$oldIFS"
+ printf "<updated>%s</updated>\n\n" "$lastmod_rfc3339"
+ first_run=1
+ fi
+
+ # Build some variables and dependencies.
+ intermediate_file="${file%.*}.intermediate"
+ htmlfile=`escape_url "${file%.*}.html"`
+ redo-ifchange "$intermediate_file"
+ redo-ifchange "$uuidfile"
+ title=`read_and_escape_file "$intermediate_file" | head -1`
+ uuidfile="${file%.*}.uuid"
+ uuid=`read_and_escape_file "$uuidfile" | head -1`
+ body=`read_and_escape_file "$intermediate_file" | sed 1d`
+ published=`stat -c%y "$uuidfile"`
+ published_rfc3339=`date -u "+%Y-%m-%dT%TZ" -d "$published"`
+
+ # Write entry.
+ printf "<entry>\n"
+ printf "<title type=\"html\">%s</title>\n" "$title"
+ printf "<id>urn:uuid:%s</id>\n" "$uuid"
+ printf "<updated>%s</updated>\n" "$lastmod_rfc3339"
+ printf "<published>%s</published>\n" "$published_rfc3339"
+ printf "<link href=\"%s%s\" />\n" "$basepath" "$htmlfile"
+ printf "<content type=\"html\">\n%s\n</content>\n" "$body"
+ printf "</entry>\n\n"
+done
+
+printf "</feed>"
--- /dev/null
+#!/bin/sh
+
+escape_html()
+{
+out=`python3 -c 'import sys, html; print(html.escape(sys.argv[1]))' "$1"`
+printf "%s" "$out"
+}
+
+read_and_escape_file()
+{
+in=`cat "$1"`
+escape_html "$in"
+}
+
+escape_url()
+{
+out=`python3 -c 'import sys, urllib.parse; print(urllib.parse.quote(sys.argv[1]))' "$1"`
+printf "%s" "$out"
+}
--- /dev/null
+#!/bin/sh
+
+# Pull in global dependencies.
+. ./helpers.sh
+redo-ifchange title
+
+# Write index head.
+cat << EOF
+<!DOCTYPE html>
+<html>
+<head>
+EOF
+blog_title=`read_and_escape_file title | head -1`
+printf "<title>%s</title>\n</head>\n<body>\n" "$blog_title"
+printf "<h1>%s</h1>\n<ul>\n" "$blog_title"
+
+# Iterate through entries sorted by lastmod of their source files, write entry
+# list.
+first_run=0
+files=`ls -1t *.rst *.md | tr '\n' $'\0'`
+oldIFS="$IFS"
+IFS=$'\0'
+for file in $files; do
+ if [ "$first_run" -lt "1" ]; then
+ IFS="$oldIFS"
+ first_run=1
+ fi
+ intermediate_file="${file%.*}.intermediate"
+ html_file="${file%.*}.html"
+ redo-ifchange "$intermediate_file"
+ redo-ifchange "$html_file"
+ title_html=`cat "$intermediate_file" | head -1`
+ html_file_escaped=`escape_url "$html_file"`
+ printf "<li><a href=\"%s\" />%s</a></li>\n" "$html_file_escaped" "$title_html"
+done
+
+printf "</ul>\n</body>\n</html>"
--- /dev/null
+$title$
+$body$
--- /dev/null
+#!/bin/sh
+
+if [ ! -f "$1" ]; then
+ printf "Yet another blog"
+fi
--- /dev/null
+#!/bin/sh
+
+if [ ! -f "$1" ]; then
+ printf "http://example.org/"
+fi
--- /dev/null
+#!/bin/sh
+
+if [ ! -f "$1" ]; then
+ uuidgen
+fi
+++ /dev/null
-#!/bin/sh
-
-if [ ! -f "$1" ]; then
- printf "Yet another blog"
-fi
+++ /dev/null
-#!/bin/sh
-
-if [ ! -f "$1" ]; then
- printf "http://example.org/"
-fi
+++ /dev/null
-#!/bin/sh
-
-if [ ! -f "$1" ]; then
- uuidgen
-fi