import select
import time
import re
-import urllib.request
-import http.client
-import html
+import requests
+import bs4
+import random
+import hashlib
+import os
+import plomsearch
# Defaults, may be overwritten by command line arguments.
SERVER = "irc.freenode.net"
TIMEOUT = 240
USERNAME = "plomlombot"
NICKNAME = USERNAME
+TWTFILE = ""
class ExceptionForRestart(Exception):
self._pingtest(send_ping)
return None
self.last_pong = time.time()
- received_runes = self.socket.recv(1024).decode("UTF-8")
+ received_bytes = self.socket.recv(1024)
+ try:
+ received_runes = received_bytes.decode("UTF-8")
+ except UnicodeDecodeError:
+ received_runes = received_bytes.decode("latin1")
if len(received_runes) == 0:
print("SOCKET CONNECTION BROKEN")
raise ExceptionForRestart
return line
-def init_session(server, port, timeout, nickname, username, channel):
- print("CONNECTING TO " + server)
- io = IO(server, port, timeout)
- io.send_line("NICK " + nickname)
- io.send_line("USER " + username + " 0 * : ")
- io.send_line("JOIN " + channel)
- return io
-
-
-def lineparser_loop(io, nickname):
-
- def act_on_privmsg(tokens):
-
- def url_check(msg):
-
- def notice(msg):
- io.send_line("NOTICE " + target + " :" + msg)
-
- matches = re.findall("(https?://[^\s>]+)", msg)
- for i in range(len(matches)):
- url = matches[i]
- request = urllib.request.Request(url, headers={
- "User-Agent": "plomlombot"
- })
- try:
- webpage = urllib.request.urlopen(request, timeout=15)
- except (urllib.error.HTTPError, urllib.error.URLError,
- UnicodeError, http.client.BadStatusLine) as error:
- notice("TROUBLE FOLLOWING URL: " + str(error))
- continue
- charset = webpage.info().get_content_charset()
- if not charset:
- charset = "utf-8"
- content_type = webpage.info().get_content_type()
- if content_type not in ('text/html', 'text/xml',
- 'application/xhtml+xml'):
- notice("TROUBLE READING PAGE TITLE: bad content type "
- + content_type)
- continue
- content = webpage.read().decode(charset)
- title = str(content).split('<title>')[1].split('</title>')[0]
- title = html.unescape(title)
- notice("PAGE TITLE FOR URL: " + title)
-
- sender = ""
- for rune in tokens[0]:
- if rune == "!":
+def handle_command(command, argument, notice, target, session):
+ hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
+ quotesfile_name = "quotes_" + hash_string
+
+ def addquote():
+ if not os.access(quotesfile_name, os.F_OK):
+ quotesfile = open(quotesfile_name, "w")
+ quotesfile.write("QUOTES FOR " + target + ":\n")
+ quotesfile.close()
+ quotesfile = open(quotesfile_name, "a")
+ quotesfile.write(argument + "\n")
+ quotesfile.close()
+ quotesfile = open(quotesfile_name, "r")
+ lines = quotesfile.readlines()
+ quotesfile.close()
+ notice("ADDED QUOTE #" + str(len(lines) - 1))
+
+ def quote():
+
+ def help():
+ notice("SYNTAX: !quote [int] OR !quote search QUERY")
+ notice("QUERY may be a boolean grouping of quoted or unquoted " +
+ "search terms, examples:")
+ notice("!quote search foo")
+ notice("!quote search foo AND (bar OR NOT baz)")
+ notice("!quote search \"foo\\\"bar\" AND ('NOT\"' AND \"'foo'\"" +
+ " OR 'bar\\'baz')")
+
+ if "" == argument:
+ tokens = []
+ else:
+ tokens = argument.split(" ")
+ if (len(tokens) > 1 and tokens[0] != "search") or \
+ (len(tokens) == 1 and
+ (tokens[0] == "search" or not tokens[0].isdigit())):
+ help()
+ return
+ if not os.access(quotesfile_name, os.F_OK):
+ notice("NO QUOTES AVAILABLE")
+ return
+ quotesfile = open(quotesfile_name, "r")
+ lines = quotesfile.readlines()
+ quotesfile.close()
+ lines = lines[1:]
+ if len(tokens) == 1:
+ i = int(tokens[0])
+ if i == 0 or i > len(lines):
+ notice("THERE'S NO QUOTE OF THAT INDEX")
+ return
+ i = i - 1
+ elif len(tokens) > 1:
+ query = str.join(" ", tokens[1:])
+ try:
+ results = plomsearch.search(query, lines)
+ except plomsearch.LogicParserError as err:
+ notice("FAILED QUERY PARSING: " + str(err))
+ return
+ if len(results) == 0:
+ notice("NO QUOTES MATCHING QUERY")
+ else:
+ for result in results:
+ notice("QUOTE #" + str(result[0] + 1) + " : " + result[1])
+ return
+ else:
+ i = random.randrange(len(lines))
+ notice("QUOTE #" + str(i + 1) + ": " + lines[i])
+
+ def markov():
+ from random import choice
+ select_length = 2
+ selections = []
+
+ def markov(snippet):
+ usable_selections = []
+ for i in range(select_length, 0, -1):
+ for selection in selections:
+ add = True
+ for j in range(i):
+ j += 1
+ if snippet[-j] != selection[-(j+1)]:
+ add = False
+ break
+ if add:
+ usable_selections += [selection]
+ if [] != usable_selections:
+ break
+ if [] == usable_selections:
+ usable_selections = selections
+ selection = choice(usable_selections)
+ return selection[select_length]
+
+ hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
+ markovfeed_name = "markovfeed_" + hash_string
+ if not os.access(markovfeed_name, os.F_OK):
+ notice("NOT ENOUGH TEXT TO MARKOV.")
+ return
+ file = open(markovfeed_name, "r")
+ lines = file.readlines()
+ file.close()
+ tokens = []
+ for line in lines:
+ line = line.replace("\n", "").lower()
+ tokens += line.split()
+ if len(tokens) <= select_length:
+ notice("NOT ENOUGH TEXT TO MARKOV.")
+ return
+ urls = []
+ url_escape = "\nURL"
+ url_starts = ["http://", "https://", "<http://", "<https://"]
+ for i in range(len(tokens)):
+ for url_start in url_starts:
+ if tokens[i][:len(url_start)] == url_start:
+ length = len(tokens[i])
+ if url_start[0] == "<":
+ try:
+ length = tokens[i].index(">") + 1
+ except ValueError:
+ pass
+ urls += [tokens[i][:length]]
+ tokens[i] = url_escape + tokens[i][length:]
+ break
+ for i in range(len(tokens) - select_length):
+ token_list = []
+ for j in range(select_length + 1):
+ token_list += [tokens[i + j]]
+ selections += [token_list]
+ snippet = []
+ for i in range(select_length):
+ snippet += [""]
+ msg = ""
+ while 1:
+ new_end = markov(snippet)
+ for name in session.users_in_chan:
+ if new_end[:len(name)] == name.lower():
+ new_end = "malkovich" + new_end[len(name):]
+ break
+ if len(msg) + len(new_end) > 200:
break
- if rune != ":":
- sender += rune
- receiver = ""
- for rune in tokens[2]:
- if rune == "!":
+ msg += new_end + " "
+ for i in range(select_length - 1):
+ snippet[i] = snippet[i + 1]
+ snippet[select_length - 1] = new_end
+ while True:
+ index = msg.find(url_escape)
+ if index < 0:
break
- if rune != ":":
- receiver += rune
- target = sender
- if receiver != nickname:
- target = receiver
- msg = str.join(" ", tokens[3:])[1:]
- url_check(msg)
-
- while True:
- line = io.recv_line()
- if not line:
- continue
- tokens = line.split(" ")
- if len(tokens) > 1:
- if tokens[1] == "PRIVMSG":
- act_on_privmsg(tokens)
- if tokens[0] == "PING":
- io.send_line("PONG " + tokens[1])
+ msg = msg.replace(url_escape, choice(urls), 1)
+ notice(msg + "malkovich.")
+
+ def twt():
+ def try_open(mode):
+ try:
+ twtfile = open(session.twtfile, mode)
+ except (PermissionError, FileNotFoundError) as err:
+ notice("CAN'T ACCESS OR CREATE TWT FILE: " + str(err))
+ return None
+ return twtfile
+
+ from datetime import datetime
+ if not os.access(session.twtfile, os.F_OK):
+ twtfile = try_open("w")
+ if None == twtfile:
+ return
+ twtfile.close()
+ twtfile = try_open("a")
+ if None == twtfile:
+ return
+ twtfile.write(datetime.utcnow().isoformat() + "\t" + argument + "\n")
+ twtfile.close()
+ notice("WROTE TWT.")
+
+ if "addquote" == command:
+ addquote()
+ elif "quote" == command:
+ quote()
+ elif "markov" == command:
+ markov()
+ elif "twt" == command:
+ twt()
+def handle_url(url, notice, show_url=False):
+
+ def mobile_twitter_hack(url):
+ re1 = 'https?://(mobile.twitter.com/)[^/]+(/status/)'
+ re2 = 'https?://mobile.twitter.com/([^/]+)/status/([^\?/]+)'
+ m = re.search(re1, url)
+ if m and m.group(1) == 'mobile.twitter.com/' \
+ and m.group(2) == '/status/':
+ m = re.search(re2, url)
+ url = 'https://twitter.com/' + m.group(1) + '/status/' + m.group(2)
+ handle_url(url, notice, True)
+ return True
+
+ try:
+ r = requests.get(url, timeout=15)
+ except (requests.exceptions.TooManyRedirects,
+ requests.exceptions.ConnectionError,
+ requests.exceptions.InvalidURL,
+ UnicodeError,
+ requests.exceptions.InvalidSchema) as error:
+ notice("TROUBLE FOLLOWING URL: " + str(error))
+ return
+ if mobile_twitter_hack(url):
+ return
+ title = bs4.BeautifulSoup(r.text, "html.parser").title
+ if title:
+ prefix = "PAGE TITLE: "
+ if show_url:
+ prefix = "PAGE TITLE FOR <" + url + ">: "
+ notice(prefix + title.string.strip())
+ else:
+ notice("PAGE HAS NO TITLE TAG")
+
+
+class Session:
+
+ def __init__(self, io, username, nickname, channel, twtfile):
+ self.io = io
+ self.nickname = nickname
+ self.channel = channel
+ self.users_in_chan = []
+ self.twtfile = twtfile
+ self.io.send_line("NICK " + self.nickname)
+ self.io.send_line("USER " + username + " 0 * : ")
+ self.io.send_line("JOIN " + self.channel)
+
+ def loop(self):
+
+ def handle_privmsg(tokens):
+
+ def handle_input(msg, target):
+
+ def notice(msg):
+ self.io.send_line("NOTICE " + target + " :" + msg)
+
+ matches = re.findall("(https?://[^\s>]+)", msg)
+ for i in range(len(matches)):
+ handle_url(matches[i], notice)
+ if "!" == msg[0]:
+ tokens = msg[1:].split()
+ argument = str.join(" ", tokens[1:])
+ handle_command(tokens[0], argument, notice, target, self)
+ return
+ hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
+ markovfeed_name = "markovfeed_" + hash_string
+ file = open(markovfeed_name, "a")
+ file.write(msg + "\n")
+ file.close()
+
+ sender = ""
+ for rune in tokens[0]:
+ if rune == "!":
+ break
+ if rune != ":":
+ sender += rune
+ receiver = ""
+ for rune in tokens[2]:
+ if rune == "!":
+ break
+ if rune != ":":
+ receiver += rune
+ target = sender
+ if receiver != self.nickname:
+ target = receiver
+ msg = str.join(" ", tokens[3:])[1:]
+ handle_input(msg, target)
+
+ def name_from_join_or_part(tokens):
+ token = tokens[0][1:]
+ index_cut = token.find("@")
+ index_ex = token.find("!")
+ if index_ex > 0 and index_ex < index_cut:
+ index_cut = index_ex
+ return token[:index_cut]
+
+ while True:
+ line = self.io.recv_line()
+ if not line:
+ continue
+ tokens = line.split(" ")
+ if len(tokens) > 1:
+ if tokens[0] == "PING":
+ self.io.send_line("PONG " + tokens[1])
+ elif tokens[1] == "PRIVMSG":
+ handle_privmsg(tokens)
+ elif tokens[1] == "353":
+ names = tokens[5:]
+ names[0] = names[0][1:]
+ self.users_in_chan += names
+ elif tokens[1] == "JOIN":
+ name = name_from_join_or_part(tokens)
+ if name != self.nickname:
+ self.users_in_chan += [name]
+ elif tokens[1] == "PART":
+ name = name_from_join_or_part(tokens)
+ del(self.users_in_chan[self.users_in_chan.index(name)])
+
def parse_command_line_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-s, --server", action="store", dest="server",
parser.add_argument("-p, --port", action="store", dest="port", type=int,
default=PORT, help="port to connect to (default : "
+ str(PORT) + ")")
- parser.add_argument("-t, --timeout", action="store", dest="timeout",
+ parser.add_argument("-w, --wait", action="store", dest="timeout",
type=int, default=TIMEOUT,
help="timeout in seconds after which to attempt " +
"reconnect (default: " + str(TIMEOUT) + ")")
parser.add_argument("-n, --nickname", action="store", dest="nickname",
default=NICKNAME, help="nickname to use (default: "
+ NICKNAME + ")")
+ parser.add_argument("-t, --twtfile", action="store", dest="twtfile",
+ default=TWTFILE, help="twtfile to use (default: "
+ + TWTFILE + ")")
parser.add_argument("CHANNEL", action="store", help="channel to join")
opts, unknown = parser.parse_known_args()
return opts
+
opts = parse_command_line_arguments()
while True:
try:
- io = init_session(opts.server, opts.port, opts.timeout, opts.nickname,
- opts.username, opts.CHANNEL)
- lineparser_loop(io, opts.nickname)
+ io = IO(opts.server, opts.port, opts.timeout)
+ session = Session(io, opts.username, opts.nickname, opts.CHANNEL,
+ opts.twtfile)
+ session.loop()
except ExceptionForRestart:
io.socket.close()
continue