home · contact · privacy
Strip URLs from markov texts.
[plomlombot-irc.git] / plomlombot.py
index f34d97a821378c5e092cf064a78a2137d82350c7..c8c11f45ebb03a3e0a73a9339ba82013813278b7 100755 (executable)
@@ -6,10 +6,14 @@ import datetime
 import select
 import time
 import re
-import urllib.request
-import http.client
-import html
-import html.parser
+import requests
+import bs4
+import random
+import hashlib
+import os
+import plomsearch
+
+URLREGEX = "(https?://[^\s>]+)"
 
 # Defaults, may be overwritten by command line arguments.
 SERVER = "irc.freenode.net"
@@ -19,22 +23,6 @@ USERNAME = "plomlombot"
 NICKNAME = USERNAME
 
 
-class HTMLParser(html.parser.HTMLParser):
-    def __init__(self, html, tag):
-        super().__init__()
-        self._tag = ""
-        self.data = ""
-        self.feed(html)
-    def handle_starttag(self, tag, attrs):
-        if self.data == "":
-            self._tag = tag
-    def handle_endtag(self, tag):
-        self._tag = ""
-    def handle_data(self, data):
-        if self._tag != "":
-            self.data = data
-
-
 class ExceptionForRestart(Exception):
     pass
 
@@ -107,80 +95,265 @@ class IO:
         return line
 
 
-def init_session(server, port, timeout, nickname, username, channel):
-    print("CONNECTING TO " + server)
-    io = IO(server, port, timeout)
-    io.send_line("NICK " + nickname)
-    io.send_line("USER " + username + " 0 * : ")
-    io.send_line("JOIN " + channel)
-    return io
-
-
-def lineparser_loop(io, nickname):
-
-    def act_on_privmsg(tokens):
-
-        def url_check(msg):
-
-            def notice(msg):
-                io.send_line("NOTICE " + target + " :" + msg)
-
-            matches = re.findall("(https?://[^\s>]+)", msg)
-            for i in range(len(matches)):
-                url = matches[i]
-                request = urllib.request.Request(url, headers={
-                    "User-Agent": "plomlombot"
-                })
-                try:
-                    webpage = urllib.request.urlopen(request, timeout=15)
-                except (urllib.error.HTTPError, urllib.error.URLError,
-                        UnicodeError, http.client.BadStatusLine) as error:
-                    notice("TROUBLE FOLLOWING URL: " + str(error))
-                    continue
-                charset = webpage.info().get_content_charset()
-                if not charset:
-                    notice("TROUBLE READING PAGE TITLE: no charset in header")
-                    continue
-                content_type = webpage.info().get_content_type()
-                if content_type not in ('text/html', 'text/xml',
-                                        'application/xhtml+xml'):
-                    notice("TROUBLE READING PAGE TITLE: bad content type "
-                           + content_type)
-                    continue
-                content = webpage.read().decode(charset)
-                title = HTMLParser(content, "title").data
-                title = html.unescape(title)
-                notice("PAGE TITLE FOR URL: " + title)
-
-        sender = ""
-        for rune in tokens[0]:
-            if rune == "!":
-                break
-            if rune != ":":
-                sender += rune
-        receiver = ""
-        for rune in tokens[2]:
-            if rune == "!":
+def handle_command(command, argument, notice, target, session):
+    hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
+    quotesfile_name = "quotes_" + hash_string
+
+    def addquote():
+        if not os.access(quotesfile_name, os.F_OK):
+            quotesfile = open(quotesfile_name, "w")
+            quotesfile.write("QUOTES FOR " + target + ":\n")
+            quotesfile.close()
+        quotesfile = open(quotesfile_name, "a")
+        quotesfile.write(argument + "\n")
+        quotesfile.close()
+        quotesfile = open(quotesfile_name, "r")
+        lines = quotesfile.readlines()
+        quotesfile.close()
+        notice("ADDED QUOTE #" + str(len(lines) - 1))
+
+    def quote():
+
+        def help():
+            notice("SYNTAX: !quote [int] OR !quote search QUERY")
+            notice("QUERY may be a boolean grouping of quoted or unquoted " +
+                   "search terms, examples:")
+            notice("!quote search foo")
+            notice("!quote search foo AND (bar OR NOT baz)")
+            notice("!quote search \"foo\\\"bar\" AND ('NOT\"' AND \"'foo'\"" +
+                   " OR 'bar\\'baz')")
+
+        if "" == argument:
+            tokens = []
+        else:
+            tokens = argument.split(" ")
+        if (len(tokens) > 1 and tokens[0] != "search") or \
+            (len(tokens) == 1 and
+                (tokens[0] == "search" or not tokens[0].isdigit())):
+            help()
+            return
+        if not os.access(quotesfile_name, os.F_OK):
+            notice("NO QUOTES AVAILABLE")
+            return
+        quotesfile = open(quotesfile_name, "r")
+        lines = quotesfile.readlines()
+        quotesfile.close()
+        lines = lines[1:]
+        if len(tokens) == 1:
+            i = int(tokens[0])
+            if i == 0 or i > len(lines):
+                notice("THERE'S NO QUOTE OF THAT INDEX")
+                return
+            i = i - 1
+        elif len(tokens) > 1:
+            query = str.join(" ", tokens[1:])
+            try:
+                results = plomsearch.search(query, lines)
+            except plomsearch.LogicParserError as err:
+                notice("FAILED QUERY PARSING: " + str(err))
+                return
+            if len(results) == 0:
+                notice("NO QUOTES MATCHING QUERY")
+            else:
+                for result in results:
+                    notice("QUOTE #" + str(result[0] + 1) + " : " + result[1])
+            return
+        else:
+            i = random.randrange(len(lines))
+        notice("QUOTE #" + str(i + 1) + ": " + lines[i])
+
+    def markov():
+        from random import shuffle
+        select_length = 2
+        selections = []
+
+        def markov(snippet):
+            usable_selections = []
+            for i in range(select_length, 0, -1):
+                for selection in selections:
+                    add = True
+                    for j in range(i):
+                        if snippet[j] != selection[j]:
+                            add = False
+                            break
+                    if add:
+                        usable_selections += [selection]
+                if [] != usable_selections:
+                    break
+            if [] == usable_selections:
+                usable_selections = selections
+            shuffle(usable_selections)
+            return usable_selections[0][select_length]
+
+        def purge_undesired(tokens):
+            for token in tokens:
+                if None != re.match("^" + URLREGEX, token):
+                    del(tokens[tokens.index(token)])
+            for name in session.uses_in_chan:
+                while True:
+                    try:
+                        del(tokens[tokens.index(name)])
+                    except ValueError:
+                        break
+            return tokens
+
+        hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
+        markovfeed_name = "markovfeed_" + hash_string
+        if not os.access(markovfeed_name, os.F_OK):
+            notice("NOT ENOUGH TEXT TO MARKOV.")
+            return
+        file = open(markovfeed_name, "r")
+        lines = file.readlines()
+        file.close()
+        tokens = []
+        for line in lines:
+            line = line.replace("\n", "")
+            tokens += line.split()
+        tokens = purge_undesired(tokens)
+        if len(tokens) <= select_length:
+            notice("NOT ENOUGH TEXT TO MARKOV.")
+            return
+        for i in range(len(tokens) - select_length):
+            token_list = []
+            for j in range(select_length + 1):
+                token_list += [tokens[i + j]]
+            selections += [token_list]
+        snippet = []
+        for i in range(select_length):
+            snippet += [""]
+        msg = ""
+        while 1:
+            new_end = markov(snippet)
+            if len(msg) + len(new_end) > 200:
                 break
-            if rune != ":":
-                receiver += rune
-        target = sender
-        if receiver != nickname:
-            target = receiver
-        msg = str.join(" ", tokens[3:])[1:]
-        url_check(msg)
-
-    while True:
-        line = io.recv_line()
-        if not line:
-            continue
-        tokens = line.split(" ")
-        if len(tokens) > 1:
-            if tokens[1] == "PRIVMSG":
-                act_on_privmsg(tokens)
-            if tokens[0] == "PING":
-                io.send_line("PONG " + tokens[1])
+            msg += new_end + " "
+            for i in range(select_length - 1):
+                snippet[i] = snippet[i + 1]
+            snippet[select_length - 1] = new_end
+        notice(msg.lower() + "malkovich.")
+
+    if "addquote" == command:
+        addquote()
+    elif "quote" == command:
+        quote()
+    elif "markov" == command:
+        markov()
+
+
+def handle_url(url, notice, show_url=False):
 
+    def mobile_twitter_hack(url):
+        re1 = 'https?://(mobile.twitter.com/)[^/]+(/status/)'
+        re2 = 'https?://mobile.twitter.com/([^/]+)/status/([^\?/]+)'
+        m = re.search(re1, url)
+        if m and m.group(1) == 'mobile.twitter.com/' \
+                and m.group(2) == '/status/':
+            m = re.search(re2, url)
+            url = 'https://twitter.com/' + m.group(1) + '/status/' + m.group(2)
+            handle_url(url, notice, True)
+            return True
+
+    try:
+        r = requests.get(url, timeout=15)
+    except (requests.exceptions.TooManyRedirects,
+            requests.exceptions.ConnectionError,
+            requests.exceptions.InvalidURL,
+            requests.exceptions.InvalidSchema) as error:
+        notice("TROUBLE FOLLOWING URL: " + str(error))
+        return
+    if mobile_twitter_hack(url):
+        return
+    title = bs4.BeautifulSoup(r.text, "html.parser").title
+    if title:
+        prefix = "PAGE TITLE: "
+        if show_url:
+            prefix = "PAGE TITLE FOR <" + url + ">: "
+        notice(prefix + title.string.strip())
+    else:
+        notice("PAGE HAS NO TITLE TAG")
+
+
+class Session:
+
+    def __init__(self, io, username, nickname, channel):
+        self.io = io
+        self.nickname = nickname
+        self.channel = channel
+        self.uses_in_chan = []
+        self.io.send_line("NICK " + self.nickname)
+        self.io.send_line("USER " + username + " 0 * : ")
+        self.io.send_line("JOIN " + self.channel)
+
+    def loop(self):
+
+        def handle_privmsg(tokens):
+
+            def handle_input(msg, target):
+
+                def notice(msg):
+                    self.io.send_line("NOTICE " + target + " :" + msg)
+
+                matches = re.findall(URLREGEX, msg)
+                for i in range(len(matches)):
+                    handle_url(matches[i], notice)
+                if "!" == msg[0]:
+                    tokens = msg[1:].split()
+                    argument = str.join(" ", tokens[1:])
+                    handle_command(tokens[0], argument, notice, target, self)
+                    return
+                hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
+                markovfeed_name = "markovfeed_" + hash_string
+                file = open(markovfeed_name, "a")
+                file.write(msg + "\n")
+                file.close()
+
+            sender = ""
+            for rune in tokens[0]:
+                if rune == "!":
+                    break
+                if rune != ":":
+                    sender += rune
+            receiver = ""
+            for rune in tokens[2]:
+                if rune == "!":
+                    break
+                if rune != ":":
+                    receiver += rune
+            target = sender
+            if receiver != self.nickname:
+                target = receiver
+            msg = str.join(" ", tokens[3:])[1:]
+            handle_input(msg, target)
+
+        def name_from_join_or_part(tokens):
+            token = tokens[0][1:]
+            index_cut = token.find("@")
+            index_ex = token.find("!")
+            if index_ex > 0 and index_ex < index_cut:
+                index_cut = index_ex
+            return token[:index_cut]
+
+        while True:
+            line = self.io.recv_line()
+            if not line:
+                continue
+            tokens = line.split(" ")
+            if len(tokens) > 1:
+                if tokens[0] == "PING":
+                    self.io.send_line("PONG " + tokens[1])
+                elif tokens[1] == "PRIVMSG":
+                    handle_privmsg(tokens)
+                elif tokens[1] == "353":
+                    names = tokens[5:]
+                    names[0] = names[0][1:]
+                    self.uses_in_chan += names
+                elif tokens[1] == "JOIN":
+                    name = name_from_join_or_part(tokens)
+                    if name != self.nickname:
+                        self.uses_in_chan += [name]
+                elif tokens[1] == "PART":
+                    name = name_from_join_or_part(tokens)
+                    del(self.uses_in_chan[self.uses_in_chan.index(name)])
 
 def parse_command_line_arguments():
     parser = argparse.ArgumentParser()
@@ -205,12 +378,13 @@ def parse_command_line_arguments():
     opts, unknown = parser.parse_known_args()
     return opts
 
+
 opts = parse_command_line_arguments()
 while True:
     try:
-        io = init_session(opts.server, opts.port, opts.timeout, opts.nickname,
-                          opts.username, opts.CHANNEL)
-        lineparser_loop(io, opts.nickname)
+        io = IO(opts.server, opts.port, opts.timeout)
+        session = Session(io, opts.username, opts.nickname, opts.CHANNEL)
+        session.loop()
     except ExceptionForRestart:
         io.socket.close()
         continue