import random
import hashlib
import os
+import signal
import plomsearch
+import irclog
# Defaults, may be overwritten by command line arguments.
SERVER = "irc.freenode.net"
+CHANNEL = "#plomlombot-test"
PORT = 6667
TIMEOUT = 240
USERNAME = "plomlombot"
NICKNAME = USERNAME
+TWTFILE = ""
+DBDIR = os.path.expanduser("~/plomlombot_db")
+
+
+def write_to_file(path, mode, text):
+ f = open(path, mode)
+ f.write(text)
+ f.close()
class ExceptionForRestart(Exception):
pass
+class Line:
+
+ def __init__(self, line):
+ self.line = line
+ self.tokens = line.split(" ")
+ self.sender = ""
+ if self.tokens[0][0] == ":":
+ for rune in self.tokens[0][1:]:
+ if rune in {"!", "@"}:
+ break
+ self.sender += rune
+ self.receiver = ""
+ if len(self.tokens) > 2:
+ for rune in self.tokens[2]:
+ if rune in {"!", "@"}:
+ break
+ if rune != ":":
+ self.receiver += rune
+
+
+class Log:
+
+ def __init__(self, chandir, nickname, username, channel, rmlogs):
+ self.nickname = nickname
+ self.username = username
+ self.channel = channel
+ self.chandir = chandir
+ self.rmlogcycle = rmlogs
+ self.rawlogdir = chandir + "raw_logs/"
+ self.logdir = chandir + "logs/"
+ if not os.path.exists(self.logdir):
+ os.makedirs(self.logdir)
+ if not os.path.exists(self.rawlogdir):
+ os.makedirs(self.rawlogdir)
+
+ def log(self, line, sent=False):
+ identity = ""
+ separator = " > "
+ if sent:
+ separator = " "
+ line = Line("< " + line)
+ line.sender = self.nickname
+ identity = self.username + "@localhost"
+ else:
+ if type(line) == str:
+ line = Line(line)
+ now = datetime.datetime.utcnow()
+ form = "%Y-%m-%d %H:%M:%S UTC"
+ write_to_file(self.rawlogdir + now.strftime("%Y-%m-%d") + ".txt", "a",
+ now.strftime(form) + separator + line.line + "\n")
+ to_log = irclog.format_logline(line, self.channel, identity)
+ if to_log != None:
+ write_to_file(self.logdir + now.strftime("%Y-%m-%d") + ".txt", "a",
+ now.strftime(form) + " " + to_log + "\n")
+
+ def rmlogs(self):
+ if self.rmlogcycle > 0:
+ for f in os.listdir(self.logdir):
+ f = os.path.join(self.logdir, f)
+ if os.path.isfile(f) and \
+ os.stat(f).st_mtime < time.time() - self.rmlogcycle:
+ os.remove(f)
+
+ def separator_line(self):
+ now = datetime.datetime.utcnow()
+ write_to_file(self.logdir + now.strftime("%Y-%m-%d") + ".txt", "a",
+ "-----------------------\n")
+
+
class IO:
def __init__(self, server, port, timeout):
+ self.log = None
self.timeout = timeout
self.socket = socket.socket()
- self.socket.connect((server, port))
+ try:
+ self.socket.connect((server, port))
+ except TimeoutError:
+ raise ExceptionForRestart
self.socket.setblocking(0)
self.line_buffer = []
self.rune_buffer = ""
self.last_pong = time.time()
- self.servername = self.recv_line(send_ping=False).split(" ")[0][1:]
+ line = self.recv_line(send_ping=False)
+ if not line or len(line) < 1:
+ raise ExceptionForRestart
+ self.servername = line.split(" ")[0][1:]
def _pingtest(self, send_ping=True):
if self.last_pong + self.timeout < time.time():
print("NOT SENT LINE TO SERVER (too long): " + msg)
print("LINE TO SERVER: "
+ str(datetime.datetime.now()) + ": " + msg)
+ if self.log != None:
+ self.log.log(msg, True)
msg = msg + "\r\n"
msg_len = len(msg)
total_sent_len = 0
def recv_line(self, send_ping=True):
line = self._recv_line_wrapped(send_ping)
if line:
+ if self.log != None:
+ self.log.log(line)
print("LINE FROM SERVER " + str(datetime.datetime.now()) + ": " +
line)
return line
def handle_command(command, argument, notice, target, session):
- hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
- quotesfile_name = "quotes_" + hash_string
def addquote():
- if not os.access(quotesfile_name, os.F_OK):
- quotesfile = open(quotesfile_name, "w")
- quotesfile.write("QUOTES FOR " + target + ":\n")
- quotesfile.close()
- quotesfile = open(quotesfile_name, "a")
- quotesfile.write(argument + "\n")
- quotesfile.close()
- quotesfile = open(quotesfile_name, "r")
+ if not os.access(session.quotesfile, os.F_OK):
+ write_to_file(session.quotesfile, "w",
+ "QUOTES FOR " + target + ":\n")
+ write_to_file(session.quotesfile, "a", argument + "\n")
+ quotesfile = open(session.quotesfile, "r")
lines = quotesfile.readlines()
quotesfile.close()
- notice("ADDED QUOTE #" + str(len(lines) - 1))
+ notice("added quote #" + str(len(lines) - 1))
def quote():
def help():
- notice("SYNTAX: !quote [int] OR !quote search QUERY")
+ notice("syntax: !quote [int] OR !quote search QUERY "
+ "OR !quote offset-search [int] QUERY")
notice("QUERY may be a boolean grouping of quoted or unquoted " +
"search terms, examples:")
notice("!quote search foo")
notice("!quote search foo AND (bar OR NOT baz)")
notice("!quote search \"foo\\\"bar\" AND ('NOT\"' AND \"'foo'\"" +
" OR 'bar\\'baz')")
+ notice("The offset-search int argument defines how many matches "
+ "to skip (useful if results are above maximum number to "
+ "display).")
if "" == argument:
tokens = []
else:
tokens = argument.split(" ")
- if (len(tokens) > 1 and tokens[0] != "search") or \
- (len(tokens) == 1 and
- (tokens[0] == "search" or not tokens[0].isdigit())):
+ if (len(tokens) == 1 and not tokens[0].isdigit()) or \
+ (len(tokens) > 1 and
+ (tokens[0] not in {"search", "offset-search"} or
+ (tokens[0] == "offset-search" and
+ ((not len(tokens) > 2) or (not tokens[1].isdigit()))))):
help()
return
- if not os.access(quotesfile_name, os.F_OK):
- notice("NO QUOTES AVAILABLE")
+ if not os.access(session.quotesfile, os.F_OK):
+ notice("no quotes available")
return
- quotesfile = open(quotesfile_name, "r")
+ quotesfile = open(session.quotesfile, "r")
lines = quotesfile.readlines()
quotesfile.close()
lines = lines[1:]
if len(tokens) == 1:
i = int(tokens[0])
if i == 0 or i > len(lines):
- notice("THERE'S NO QUOTE OF THAT INDEX")
+ notice("there's no quote of that index")
return
i = i - 1
elif len(tokens) > 1:
- query = str.join(" ", tokens[1:])
+ to_skip = 0
+ if tokens[0] == "search":
+ query = str.join(" ", tokens[1:])
+ elif tokens[0] == "offset-search":
+ to_skip = int(tokens[1])
+ query = str.join(" ", tokens[2:])
try:
results = plomsearch.search(query, lines)
except plomsearch.LogicParserError as err:
- notice("FAILED QUERY PARSING: " + str(err))
+ notice("failed query parsing: " + str(err))
return
if len(results) == 0:
- notice("NO QUOTES MATCHING QUERY")
+ notice("no quotes matching query")
else:
- for result in results:
- notice("QUOTE #" + str(result[0] + 1) + " : " + result[1])
+ if to_skip >= len(results):
+ notice("skipped all quotes matching query")
+ else:
+ notice("found %s matches, showing max. 3, skipping %s"
+ % (len(results), to_skip))
+ for i in range(len(results)):
+ if i >= to_skip and i < to_skip + 3:
+ result = results[i]
+ notice("quote #" + str(result[0] + 1) + ": "
+ + result[1][:-1])
return
else:
i = random.randrange(len(lines))
- notice("QUOTE #" + str(i + 1) + ": " + lines[i])
+ notice("quote #" + str(i + 1) + ": " + lines[i][:-1])
def markov():
- from random import choice
- select_length = 2
- selections = []
+
+ def help():
+ notice("syntax: !markov [integer from 1 to infinite]")
def markov(snippet):
usable_selections = []
selection = choice(usable_selections)
return selection[select_length]
- hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
- markovfeed_name = "markovfeed_" + hash_string
- if not os.access(markovfeed_name, os.F_OK):
- notice("NOT ENOUGH TEXT TO MARKOV.")
+ if "" == argument:
+ tokens = []
+ else:
+ tokens = argument.split(" ")
+ if (len(tokens) > 1 or (len(tokens) == 1 and not tokens[0].isdigit())):
+ help()
+ return
+
+ from random import choice, shuffle
+ select_length = 2
+ if len(tokens) == 1:
+ n = int(tokens[0])
+ if n > 0:
+ select_length = n
+ else:
+ notice("bad value, using default: " + str(select_length))
+ selections = []
+
+ if not os.access(session.markovfile, os.F_OK):
+ notice("not enough text to markov for selection length")
return
- file = open(markovfeed_name, "r")
+
+ # Lowercase incoming lines, ensure they end in a sentence end mark.
+ file = open(session.markovfile, "r")
lines = file.readlines()
file.close()
tokens = []
+ sentence_end_markers = ".!?)("
for line in lines:
- line = line.replace("\n", "").lower()
+ line = line.lower().replace("\n", "")
+ if line[-1] not in sentence_end_markers:
+ line += "."
tokens += line.split()
- if len(tokens) <= select_length:
- notice("NOT ENOUGH TEXT TO MARKOV.")
+ if len(tokens) - 1 <= select_length:
+ notice("not enough text to markov")
return
+
+ # Replace URLs with escape string for now, so that the Markov selector
+ # won't see them as different strings. Stash replaced URLs in urls.
urls = []
url_escape = "\nURL"
url_starts = ["http://", "https://", "<http://", "<https://"]
urls += [tokens[i][:length]]
tokens[i] = url_escape + tokens[i][length:]
break
+
+ # For each snippet of select_length, use markov() to find continuation
+ # token from selections. Replace present users' names with malkovich.
+ # Start snippets with the beginning of a sentence, if possible.
for i in range(len(tokens) - select_length):
token_list = []
for j in range(select_length + 1):
snippet = []
for i in range(select_length):
snippet += [""]
+ shuffle(selections)
+ for i in range(len(selections)):
+ if selections[i][0][-1] in sentence_end_markers:
+ for j in range(select_length):
+ snippet[j] = selections[j][j + 1]
+ break
msg = ""
+ malkovich = "malkovich"
while 1:
new_end = markov(snippet)
- for name in session.uses_in_chan:
+ for name in session.users_in_chan:
if new_end[:len(name)] == name.lower():
- new_end = "malkovich" + new_end[len(name):]
+ new_end = malkovich + new_end[len(name):]
break
if len(msg) + len(new_end) > 200:
break
for i in range(select_length - 1):
snippet[i] = snippet[i + 1]
snippet[select_length - 1] = new_end
+
+ # Replace occurences of url escape string with random choice from urls.
while True:
index = msg.find(url_escape)
if index < 0:
break
msg = msg.replace(url_escape, choice(urls), 1)
- notice(msg + "malkovich.")
+
+ # More meaningful ways to randomly end sentences.
+ notice(msg + malkovich + ".")
+
+ def twt():
+ def try_open(mode):
+ try:
+ twtfile = open(session.twtfile, mode)
+ except (PermissionError, FileNotFoundError) as err:
+ notice("can't access or create twt file: " + str(err))
+ return None
+ return twtfile
+
+ from datetime import datetime
+ if not os.access(session.twtfile, os.F_OK):
+ twtfile = try_open("w")
+ if None == twtfile:
+ return
+ twtfile.close()
+ twtfile = try_open("a")
+ if None == twtfile:
+ return
+ twtfile.write(datetime.utcnow().isoformat() + "\t" + argument + "\n")
+ twtfile.close()
+ notice("wrote twt.")
if "addquote" == command:
addquote()
quote()
elif "markov" == command:
markov()
+ elif "twt" == command:
+ twt()
def handle_url(url, notice, show_url=False):
handle_url(url, notice, True)
return True
+ class TimeOut(Exception):
+ pass
+
+ def timeout_handler(ignore1, ignore2):
+ raise TimeOut("timeout")
+
+ signal.signal(signal.SIGALRM, timeout_handler)
+ signal.alarm(15)
try:
- r = requests.get(url, timeout=15)
+ r = requests.get(url, headers = {'User-Agent': 'plomlombot'}, stream=True)
+ r.raw.decode_content = True
+ text = r.raw.read(10000000+1)
+ if len(text) > 10000000:
+ raise ValueError('Too large a response')
except (requests.exceptions.TooManyRedirects,
requests.exceptions.ConnectionError,
requests.exceptions.InvalidURL,
+ TimeOut,
+ UnicodeError,
+ ValueError,
requests.exceptions.InvalidSchema) as error:
- notice("TROUBLE FOLLOWING URL: " + str(error))
- return
+ signal.alarm(0)
+ notice("trouble following url: " + str(error))
+ return False
+ signal.alarm(0)
if mobile_twitter_hack(url):
- return
- title = bs4.BeautifulSoup(r.text, "html.parser").title
- if title:
- prefix = "PAGE TITLE: "
+ return True
+ title = bs4.BeautifulSoup(text, "html5lib").title
+ if title and title.string:
+ prefix = "page title: "
if show_url:
- prefix = "PAGE TITLE FOR <" + url + ">: "
+ prefix = "page title for <" + url + ">: "
notice(prefix + title.string.strip())
else:
- notice("PAGE HAS NO TITLE TAG")
+ notice("page has no title tag")
+ return True
class Session:
- def __init__(self, io, username, nickname, channel):
+ def __init__(self, io, username, nickname, sasl, channel, twtfile, dbdir, rmlogs,
+ markov_input, no_show_page_titles):
+ import base64
self.io = io
self.nickname = nickname
- self.channel = channel
- self.uses_in_chan = []
+ self.users_in_chan = []
+ self.twtfile = twtfile
+ hash_channel = hashlib.md5(channel.encode("utf-8")).hexdigest()
+ chandir = dbdir + "/" + hash_channel + "/"
+ self.markov_input = markov_input
+ self.markovfile = chandir + "markovfeed"
+ self.quotesfile = chandir + "quotes"
+ self.log = Log(chandir, self.nickname, username, channel, rmlogs)
+ if sasl:
+ self.io.send_line("CAP REQ :sasl")
self.io.send_line("NICK " + self.nickname)
self.io.send_line("USER " + username + " 0 * : ")
- self.io.send_line("JOIN " + self.channel)
+ if sasl:
+ self.io.send_line("AUTHENTICATE PLAIN")
+ auth = username + '\0' + username + '\0' + password
+ auth_encoded = base64.b64encode(auth.encode()).decode().rstrip()
+ self.io.send_line("AUTHENTICATE " + auth_encoded)
+ self.io.send_line("CAP END")
+ self.io.send_line("JOIN " + channel)
+ self.io.log = self.log
+ self.log.separator_line()
+ self.show_page_titles = not no_show_page_titles
def loop(self):
- def handle_privmsg(tokens):
-
- def handle_input(msg, target):
+ def handle_privmsg(line):
- def notice(msg):
- self.io.send_line("NOTICE " + target + " :" + msg)
+ def notice(msg):
+ line = "NOTICE " + target + " :" + msg
+ self.io.send_line(line)
+ target = line.sender
+ if line.receiver != self.nickname:
+ target = line.receiver
+ msg = str.join(" ", line.tokens[3:])[1:]
+ if self.show_page_titles:
matches = re.findall("(https?://[^\s>]+)", msg)
+ url_count = 0
for i in range(len(matches)):
- handle_url(matches[i], notice)
- if "!" == msg[0]:
- tokens = msg[1:].split()
- argument = str.join(" ", tokens[1:])
- handle_command(tokens[0], argument, notice, target, self)
- return
- hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
- markovfeed_name = "markovfeed_" + hash_string
- file = open(markovfeed_name, "a")
- file.write(msg + "\n")
- file.close()
-
- sender = ""
- for rune in tokens[0]:
- if rune == "!":
- break
- if rune != ":":
- sender += rune
- receiver = ""
- for rune in tokens[2]:
- if rune == "!":
- break
- if rune != ":":
- receiver += rune
- target = sender
- if receiver != self.nickname:
- target = receiver
- msg = str.join(" ", tokens[3:])[1:]
- handle_input(msg, target)
-
- def name_from_join_or_part(tokens):
- token = tokens[0][1:]
- index_cut = token.find("@")
- index_ex = token.find("!")
- if index_ex > 0 and index_ex < index_cut:
- index_cut = index_ex
- return token[:index_cut]
+ if handle_url(matches[i], notice):
+ url_count += 1
+ if url_count == 3:
+ notice("maximum number of urls to parse per "
+ "message reached")
+ break
+ if "!" == msg[0] and len(msg) > 1:
+ tokens = msg[1:].split()
+ argument = str.join(" ", tokens[1:])
+ handle_command(tokens[0], argument, notice, target, self)
+ return
+ if self.markov_input:
+ write_to_file(self.markovfile, "a", msg + "\n")
while True:
+ self.log.rmlogs()
line = self.io.recv_line()
if not line:
continue
- tokens = line.split(" ")
- if len(tokens) > 1:
- if tokens[0] == "PING":
- self.io.send_line("PONG " + tokens[1])
- elif tokens[1] == "PRIVMSG":
- handle_privmsg(tokens)
- elif tokens[1] == "353":
- names = tokens[5:]
+ line = Line(line)
+ if len(line.tokens) > 1:
+ if line.tokens[0] == "PING":
+ self.io.send_line("PONG " + line.tokens[1])
+ elif line.tokens[1] == "PRIVMSG":
+ handle_privmsg(line)
+ elif line.tokens[1] == "353":
+ names = line.tokens[5:]
names[0] = names[0][1:]
- self.uses_in_chan += names
- elif tokens[1] == "JOIN":
- name = name_from_join_or_part(tokens)
- if name != self.nickname:
- self.uses_in_chan += [name]
- elif tokens[1] == "PART":
- name = name_from_join_or_part(tokens)
- del(self.uses_in_chan[self.uses_in_chan.index(name)])
+ for i in range(len(names)):
+ names[i] = names[i].replace("@", "").replace("+", "")
+ self.users_in_chan += names
+ elif line.tokens[1] == "JOIN" and line.sender != self.nickname:
+ self.users_in_chan += [line.sender]
+ elif line.tokens[1] == "PART":
+ del(self.users_in_chan[self.users_in_chan.index(line.sender)])
+ elif line.tokens[1] == "NICK":
+ del(self.users_in_chan[self.users_in_chan.index(line.sender)])
+ self.users_in_chan += [line.receiver]
+
def parse_command_line_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-p, --port", action="store", dest="port", type=int,
default=PORT, help="port to connect to (default : "
+ str(PORT) + ")")
- parser.add_argument("-t, --timeout", action="store", dest="timeout",
+ parser.add_argument("-c, --channel", action="store", dest="channel",
+ default=SERVER, help="channel to join")
+ parser.add_argument("-w, --wait", action="store", dest="timeout",
type=int, default=TIMEOUT,
- help="timeout in seconds after which to attempt " +
+ help="timeout in seconds after which to attempt "
"reconnect (default: " + str(TIMEOUT) + ")")
parser.add_argument("-u, --username", action="store", dest="username",
default=USERNAME, help="username to use (default: "
parser.add_argument("-n, --nickname", action="store", dest="nickname",
default=NICKNAME, help="nickname to use (default: "
+ NICKNAME + ")")
- parser.add_argument("CHANNEL", action="store", help="channel to join")
+ parser.add_argument("-a, --authenticate", action="store", dest="sasl",
+ default=None, help="SASL password (default: none)")
+ parser.add_argument("-t, --twtxtfile", action="store", dest="twtfile",
+ default=TWTFILE, help="twtxt file to use (default: "
+ + TWTFILE + ")")
+ parser.add_argument("-d, --dbdir", action="store", dest="dbdir",
+ default=DBDIR, help="directory to store DB files in")
+ parser.add_argument("-r, --rmlogs", action="store", dest="rmlogs",
+ type=int, default=0,
+ help="maximum age in seconds for logfiles in logs/ "
+ "(0 means: never delete, and is default)")
+ parser.add_argument("-m, --markov_store", action="store_true",
+ dest="markov_store",
+ help="log channel discussions for !markov input")
+ parser.add_argument("--no-show-page-titles", action="store_true",
+ dest="no_show_page_titles",
+ help="do not show page titles")
opts, unknown = parser.parse_known_args()
return opts
while True:
try:
io = IO(opts.server, opts.port, opts.timeout)
- session = Session(io, opts.username, opts.nickname, opts.CHANNEL)
+ hash_server = hashlib.md5(opts.server.encode("utf-8")).hexdigest()
+ dbdir = opts.dbdir + "/" + hash_server
+ session = Session(io, opts.username, opts.nickname, opts.sasl, opts.channel,
+ opts.twtfile, dbdir, opts.rmlogs, opts.markov_store,
+ opts.no_show_page_titles)
session.loop()
except ExceptionForRestart:
io.socket.close()