import os
import plomsearch
-URLREGEX = "(https?://[^\s>]+)"
-
# Defaults, may be overwritten by command line arguments.
SERVER = "irc.freenode.net"
PORT = 6667
TIMEOUT = 240
USERNAME = "plomlombot"
NICKNAME = USERNAME
+TWTFILE = ""
+DBDIR = os.path.expanduser("~/plomlombot_db")
class ExceptionForRestart(Exception):
def handle_command(command, argument, notice, target, session):
- hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
- quotesfile_name = "quotes_" + hash_string
def addquote():
- if not os.access(quotesfile_name, os.F_OK):
- quotesfile = open(quotesfile_name, "w")
+ if not os.access(session.quotesfile, os.F_OK):
+ quotesfile = open(session.quotesfile, "w")
quotesfile.write("QUOTES FOR " + target + ":\n")
quotesfile.close()
- quotesfile = open(quotesfile_name, "a")
+ quotesfile = open(session.quotesfile, "a")
quotesfile.write(argument + "\n")
quotesfile.close()
- quotesfile = open(quotesfile_name, "r")
+ quotesfile = open(session.quotesfile, "r")
lines = quotesfile.readlines()
quotesfile.close()
notice("ADDED QUOTE #" + str(len(lines) - 1))
(tokens[0] == "search" or not tokens[0].isdigit())):
help()
return
- if not os.access(quotesfile_name, os.F_OK):
+ if not os.access(session.quotesfile, os.F_OK):
notice("NO QUOTES AVAILABLE")
return
- quotesfile = open(quotesfile_name, "r")
+ quotesfile = open(session.quotesfile, "r")
lines = quotesfile.readlines()
quotesfile.close()
lines = lines[1:]
notice("QUOTE #" + str(i + 1) + ": " + lines[i])
def markov():
- from random import shuffle
+ from random import choice, shuffle
select_length = 2
selections = []
for selection in selections:
add = True
for j in range(i):
- if snippet[j] != selection[j]:
+ j += 1
+ if snippet[-j] != selection[-(j+1)]:
add = False
break
if add:
break
if [] == usable_selections:
usable_selections = selections
- shuffle(usable_selections)
- return usable_selections[0][select_length]
-
- def purge_undesired(tokens):
- for token in tokens:
- if None != re.match("^" + URLREGEX, token):
- del(tokens[tokens.index(token)])
- for name in session.uses_in_chan:
- while True:
- try:
- del(tokens[tokens.index(name)])
- except ValueError:
- break
- return tokens
-
- hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
- markovfeed_name = "markovfeed_" + hash_string
- if not os.access(markovfeed_name, os.F_OK):
+ selection = choice(usable_selections)
+ return selection[select_length]
+
+ if not os.access(session.markovfile, os.F_OK):
notice("NOT ENOUGH TEXT TO MARKOV.")
return
- file = open(markovfeed_name, "r")
+
+ # Lowercase incoming lines, ensure they end in a sentence end mark.
+ file = open(session.markovfile, "r")
lines = file.readlines()
file.close()
tokens = []
+ sentence_end_markers = ".!?)("
for line in lines:
- line = line.replace("\n", "")
+ line = line.lower().replace("\n", "")
+ if line[-1] not in sentence_end_markers:
+ line += "."
tokens += line.split()
- tokens = purge_undesired(tokens)
if len(tokens) <= select_length:
notice("NOT ENOUGH TEXT TO MARKOV.")
return
+
+ # Replace URLs with escape string for now, so that the Markov selector
+ # won't see them as different strings. Stash replaced URLs in urls.
+ urls = []
+ url_escape = "\nURL"
+ url_starts = ["http://", "https://", "<http://", "<https://"]
+ for i in range(len(tokens)):
+ for url_start in url_starts:
+ if tokens[i][:len(url_start)] == url_start:
+ length = len(tokens[i])
+ if url_start[0] == "<":
+ try:
+ length = tokens[i].index(">") + 1
+ except ValueError:
+ pass
+ urls += [tokens[i][:length]]
+ tokens[i] = url_escape + tokens[i][length:]
+ break
+
+ # For each snippet of select_length, use markov() to find continuation
+ # token from selections. Replace present users' names with malkovich.
+ # Start snippets with the beginning of a sentence, if possible.
for i in range(len(tokens) - select_length):
token_list = []
for j in range(select_length + 1):
snippet = []
for i in range(select_length):
snippet += [""]
+ shuffle(selections)
+ for i in range(len(selections)):
+ if selections[i][0][-1] in sentence_end_markers:
+ for i in range(select_length):
+ snippet[i] = selections[i][i + 1]
+ break
msg = ""
+ malkovich = "malkovich"
while 1:
new_end = markov(snippet)
+ for name in session.users_in_chan:
+ if new_end[:len(name)] == name.lower():
+ new_end = malkovich + new_end[len(name):]
+ break
if len(msg) + len(new_end) > 200:
break
msg += new_end + " "
for i in range(select_length - 1):
snippet[i] = snippet[i + 1]
snippet[select_length - 1] = new_end
- notice(msg.lower() + "malkovich.")
+
+ # Replace occurences of url escape string with random choice from urls.
+ while True:
+ index = msg.find(url_escape)
+ if index < 0:
+ break
+ msg = msg.replace(url_escape, choice(urls), 1)
+
+ # More meaningful ways to randomly end sentences.
+ notice(msg + malkovich + ".")
+
+ def twt():
+ def try_open(mode):
+ try:
+ twtfile = open(session.twtfile, mode)
+ except (PermissionError, FileNotFoundError) as err:
+ notice("CAN'T ACCESS OR CREATE TWT FILE: " + str(err))
+ return None
+ return twtfile
+
+ from datetime import datetime
+ if not os.access(session.twtfile, os.F_OK):
+ twtfile = try_open("w")
+ if None == twtfile:
+ return
+ twtfile.close()
+ twtfile = try_open("a")
+ if None == twtfile:
+ return
+ twtfile.write(datetime.utcnow().isoformat() + "\t" + argument + "\n")
+ twtfile.close()
+ notice("WROTE TWT.")
if "addquote" == command:
addquote()
quote()
elif "markov" == command:
markov()
+ elif "twt" == command:
+ twt()
def handle_url(url, notice, show_url=False):
except (requests.exceptions.TooManyRedirects,
requests.exceptions.ConnectionError,
requests.exceptions.InvalidURL,
+ UnicodeError,
requests.exceptions.InvalidSchema) as error:
notice("TROUBLE FOLLOWING URL: " + str(error))
return
if mobile_twitter_hack(url):
return
- title = bs4.BeautifulSoup(r.text, "html.parser").title
- if title:
+ title = bs4.BeautifulSoup(r.text, "html5lib").title
+ if title and title.string:
prefix = "PAGE TITLE: "
if show_url:
prefix = "PAGE TITLE FOR <" + url + ">: "
class Session:
- def __init__(self, io, username, nickname, channel):
+ def __init__(self, io, username, nickname, channel, twtfile, dbdir):
self.io = io
self.nickname = nickname
self.channel = channel
- self.uses_in_chan = []
+ self.users_in_chan = []
+ self.twtfile = twtfile
+ self.dbdir = dbdir
self.io.send_line("NICK " + self.nickname)
self.io.send_line("USER " + username + " 0 * : ")
self.io.send_line("JOIN " + self.channel)
+ hash_channel = hashlib.md5(self.channel.encode("utf-8")).hexdigest()
+ self.chandir = self.dbdir + "/" + hash_channel + "/"
+ self.logdir = self.chandir + "logs/"
+ if not os.path.exists(self.logdir):
+ os.makedirs(self.logdir)
+ self.markovfile = self.chandir + "markovfeed"
+ self.quotesfile = self.chandir + "quotes"
def loop(self):
+ def log(line):
+ now = datetime.datetime.utcnow()
+ logfile = open(self.logdir + now.strftime("%Y-%m-%d") + ".txt", "a")
+ form = "%Y-%m-%d %H:%M:%S UTC\t"
+ logfile.write(now.strftime(form) + " " + line + "\n")
+ logfile.close()
+
def handle_privmsg(tokens):
def handle_input(msg, target):
def notice(msg):
self.io.send_line("NOTICE " + target + " :" + msg)
- matches = re.findall(URLREGEX, msg)
+ matches = re.findall("(https?://[^\s>]+)", msg)
for i in range(len(matches)):
handle_url(matches[i], notice)
if "!" == msg[0]:
argument = str.join(" ", tokens[1:])
handle_command(tokens[0], argument, notice, target, self)
return
- hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
- markovfeed_name = "markovfeed_" + hash_string
- file = open(markovfeed_name, "a")
+ file = open(self.markovfile, "a")
file.write(msg + "\n")
file.close()
if receiver != self.nickname:
target = receiver
msg = str.join(" ", tokens[3:])[1:]
+ if target == self.channel:
+ log("<" + sender + "> " + msg)
handle_input(msg, target)
def name_from_join_or_part(tokens):
elif tokens[1] == "353":
names = tokens[5:]
names[0] = names[0][1:]
- self.uses_in_chan += names
+ for i in range(len(names)):
+ names[i] = names[i].replace("@", "").replace("+", "")
+ self.users_in_chan += names
+ log(line)
elif tokens[1] == "JOIN":
name = name_from_join_or_part(tokens)
if name != self.nickname:
- self.uses_in_chan += [name]
+ self.users_in_chan += [name]
+ log(line)
elif tokens[1] == "PART":
name = name_from_join_or_part(tokens)
- del(self.uses_in_chan[self.uses_in_chan.index(name)])
+ del(self.users_in_chan[self.users_in_chan.index(name)])
+ log(line)
+ else:
+ log(line)
+
def parse_command_line_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-p, --port", action="store", dest="port", type=int,
default=PORT, help="port to connect to (default : "
+ str(PORT) + ")")
- parser.add_argument("-t, --timeout", action="store", dest="timeout",
+ parser.add_argument("-w, --wait", action="store", dest="timeout",
type=int, default=TIMEOUT,
help="timeout in seconds after which to attempt " +
"reconnect (default: " + str(TIMEOUT) + ")")
parser.add_argument("-n, --nickname", action="store", dest="nickname",
default=NICKNAME, help="nickname to use (default: "
+ NICKNAME + ")")
+ parser.add_argument("-t, --twtxtfile", action="store", dest="twtfile",
+ default=TWTFILE, help="twtxt file to use (default: "
+ + TWTFILE + ")")
+ parser.add_argument("-d, --dbdir", action="store", dest="dbdir",
+ default=DBDIR, help="directory to store DB files in")
parser.add_argument("CHANNEL", action="store", help="channel to join")
opts, unknown = parser.parse_known_args()
return opts
while True:
try:
io = IO(opts.server, opts.port, opts.timeout)
- session = Session(io, opts.username, opts.nickname, opts.CHANNEL)
+ hash_server = hashlib.md5(opts.server.encode("utf-8")).hexdigest()
+ dbdir = opts.dbdir + "/" + hash_server
+ session = Session(io, opts.username, opts.nickname, opts.CHANNEL,
+ opts.twtfile, dbdir)
session.loop()
except ExceptionForRestart:
io.socket.close()