t* fefes blog on gopher
URI git clone git://git.codevoid.de/fefe-gopher
DIR Log
DIR Files
DIR Refs
DIR LICENSE
---
DIR commit 1f3177c6fbe58e3f4ef62b99a7f7828d91129538
URI Author: Stefan Hagen <github@textmail.me>
Date: Sun, 13 May 2018 09:42:28 +0200
Initial commit
Diffstat:
A LICENSE | 15 +++++++++++++++
A fefe.dcgi | 111 ++++++++++++++++++++++++++++++
2 files changed, 126 insertions(+), 0 deletions(-)
---
DIR diff --git a/LICENSE b/LICENSE
t@@ -0,0 +1,15 @@
+ Version 2, December 2004
+
+ Copyright (C) 2018 Stefan Hagen <sh at vimcode dot de>
+
+ Everyone is permitted to copy and distribute verbatim or modified
+copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
+
+
DIR diff --git a/fefe.dcgi b/fefe.dcgi
t@@ -0,0 +1,111 @@
+#!/usr/local/bin/perl
+
+# LICENSE: WTFPL
+# THE CODE IS PROVIDED "AS IS". DO WITH IT WHAT YOU WANT
+# Copyright 2018 Stefan Hagen <sh dot vimcode dot de>
+
+use strict;
+use warnings;
+
+use Encode;
+use XML::LibXML qw( );
+use LWP::UserAgent;
+use HTML::LinkExtractor;
+use HTML::Restrict;
+use Text::Wrap;
+$Text::Wrap::columns=80;
+
+print '
+ ___ __ ___ _
+ | __|___ / _| ___ ___ | _ )| | ___ __ _
+ | _|/ -_)| _|/ -_)(_-< | _ \| |/ _ \/ _` |
+ |_| \___||_| \___|/__/ |___/|_|\___/\__, |
+ On Gopher (inofficial) |___/
+-------------------------------------------------------------------------------
+[h| Visit Fefes Blog on the Internet|URL:https://blog.fefe.de|codevoid.de|70]
+-------------------------------------------------------------------------------
+
+
+';
+
+# Fefes Blog Config
+my $protocol = "https";
+my $server = "blog.fefe.de";
+my $uri = "/rss.xml?html";
+
+# fetch data
+my $REST= ({HOST => "$server",
+ URL => "$protocol://$server$uri" });
+$REST->{UA} = LWP::UserAgent->new(keep_alive => 0, timeout => 5);
+$REST->{UA}->agent("codevoid-fefe-gopherproxy/0.1");
+$REST->{resource} = $REST->{URL};
+$REST->{request} = HTTP::Request->new( GET => $REST->{resource} );
+$REST->{response} = $REST->{UA}->request( $REST->{request} );
+
+# parse data
+my $parser = XML::LibXML->new();
+my $document = $parser->parse_string($REST->{response}->content);
+my $root = $document->documentElement();
+
+# loop through items
+foreach my $channel ($root->findnodes('channel')) {
+ foreach my $item ($channel->findnodes('item')) {
+
+ # Encode to proper utf8
+ my $description = encode("UTF-8", $item->findvalue('description'));
+
+ # Search for links
+ my $LX = new HTML::LinkExtractor();
+ $LX->strip(1);
+ $LX->parse(\$description);
+
+ # Replace some HTML elements
+ my $HR = HTML::Restrict->new();
+ $description =~ s/<p>/\n\n/g;
+ $description =~ s/<blockquote>/\n\n /g;
+ $description =~ s/<\/blockquote>/\n\n/g;
+
+ # Strip remaining html
+ my $description_clean = $HR->process($description);
+
+ # htmldecode (quick fix - could be done properly)
+ $description_clean =~ s/&/\&/gi;
+
+ # Loop at links, match text, add [counter] and generate output.
+ my $c = 0;
+ my $links = "";
+ foreach my $link ($LX->links) {
+ foreach my $linkitem (@$link) {
+ $c++;
+ $description_clean =~ s/(\Q$linkitem->{_TEXT}\E)/$1\[$c]/gi;
+
+ # shorten links
+ my $short = $linkitem->{href};
+ if(length($short) > 70) { $short = substr($short,0,70)." ..."; }
+
+ # add link to output scalar
+ $links .= sprintf("[h|[%i]: %s|URL:%s|codevoid.de|70]\n", $c, $short, $linkitem->{href});
+ }
+ }
+
+ # Wrap to 80 character width
+ $description_clean = wrap("","",$description_clean)."\n";
+
+ # fix geomyidae ^t design
+ $description_clean =~ s/^t/&&/;
+
+ # nobody needs more that one newline.
+ $description_clean =~ s/\n\n(\n)*/\n\n/g;
+
+ # print!
+ print $description_clean, "\nLinks:\n", $links, "\n";
+ print "*******************************************************************************\n\n";
+ }
+}
+
+# TODO:
+# * consolidate regex and call the regex parser only twice (before parsing => strip / replace html,
+# after parsing => format output)
+# * proper htmldecode the output
+# * maybe ask fefe to include the dates into the feed.
+# * add source link somewhere