import random
import hashlib
import os
+import signal
import plomsearch
import irclog
DBDIR = os.path.expanduser("~/plomlombot_db")
+def write_to_file(path, mode, text):
+ f = open(path, mode)
+ f.write(text)
+ f.close()
+
+
class ExceptionForRestart(Exception):
pass
def __init__(self, server, port, timeout):
self.timeout = timeout
self.socket = socket.socket()
- self.socket.connect((server, port))
+ try:
+ self.socket.connect((server, port))
+ except TimeoutError:
+ raise ExceptionForRestart
self.socket.setblocking(0)
self.line_buffer = []
self.rune_buffer = ""
def addquote():
if not os.access(session.quotesfile, os.F_OK):
- quotesfile = open(session.quotesfile, "w")
- quotesfile.write("QUOTES FOR " + target + ":\n")
- quotesfile.close()
- quotesfile = open(session.quotesfile, "a")
- quotesfile.write(argument + "\n")
- quotesfile.close()
+ write_to_file(session.quotesfile, "w",
+ "QUOTES FOR " + target + ":\n")
+ write_to_file(session.quotesfile, "a", argument + "\n")
quotesfile = open(session.quotesfile, "r")
lines = quotesfile.readlines()
quotesfile.close()
- notice("ADDED QUOTE #" + str(len(lines) - 1))
+ notice("added quote #" + str(len(lines) - 1))
def quote():
def help():
- notice("SYNTAX: !quote [int] OR !quote search QUERY")
+ notice("syntax: !quote [int] OR !quote search QUERY")
notice("QUERY may be a boolean grouping of quoted or unquoted " +
"search terms, examples:")
notice("!quote search foo")
help()
return
if not os.access(session.quotesfile, os.F_OK):
- notice("NO QUOTES AVAILABLE")
+ notice("no quotes available")
return
quotesfile = open(session.quotesfile, "r")
lines = quotesfile.readlines()
if len(tokens) == 1:
i = int(tokens[0])
if i == 0 or i > len(lines):
- notice("THERE'S NO QUOTE OF THAT INDEX")
+ notice("there's no quote of that index")
return
i = i - 1
elif len(tokens) > 1:
try:
results = plomsearch.search(query, lines)
except plomsearch.LogicParserError as err:
- notice("FAILED QUERY PARSING: " + str(err))
+ notice("failed query parsing: " + str(err))
return
if len(results) == 0:
- notice("NO QUOTES MATCHING QUERY")
+ notice("no quotes matching query")
else:
- for result in results:
- notice("QUOTE #" + str(result[0] + 1) + " : "
- + result[1][-1])
+ if len(results) > 3:
+ notice("showing 3 of " + str(len(results)) + " quotes")
+ for result in results[:3]:
+ notice("quote #" + str(result[0] + 1) + ": "
+ + result[1][:-1])
return
else:
i = random.randrange(len(lines))
- notice("QUOTE #" + str(i + 1) + ": " + lines[i][:-1])
+ notice("quote #" + str(i + 1) + ": " + lines[i][:-1])
def markov():
- from random import choice, shuffle
- select_length = 2
- selections = []
+
+ def help():
+ notice("syntax: !markov [int]")
def markov(snippet):
usable_selections = []
selection = choice(usable_selections)
return selection[select_length]
+ if "" == argument:
+ tokens = []
+ else:
+ tokens = argument.split(" ")
+ if (len(tokens) > 1 or (len(tokens) == 1 and not tokens[0].isdigit())):
+ help()
+ return
+
+ from random import choice, shuffle
+ select_length = 2
+ if len(tokens) == 1:
+ n = int(tokens[0])
+ if n > 0:
+ select_length = n
+ else:
+ notice("bad value, using default: " + str(select_length))
+ selections = []
+
if not os.access(session.markovfile, os.F_OK):
- notice("NOT ENOUGH TEXT TO MARKOV.")
+ notice("not enough text to markov")
return
# Lowercase incoming lines, ensure they end in a sentence end mark.
if line[-1] not in sentence_end_markers:
line += "."
tokens += line.split()
- if len(tokens) <= select_length:
- notice("NOT ENOUGH TEXT TO MARKOV.")
+ if len(tokens) - 1 <= select_length:
+ notice("not enough text to markov")
return
# Replace URLs with escape string for now, so that the Markov selector
shuffle(selections)
for i in range(len(selections)):
if selections[i][0][-1] in sentence_end_markers:
- for i in range(select_length):
- snippet[i] = selections[i][i + 1]
+ for j in range(select_length):
+ snippet[j] = selections[j][j + 1]
break
msg = ""
malkovich = "malkovich"
try:
twtfile = open(session.twtfile, mode)
except (PermissionError, FileNotFoundError) as err:
- notice("CAN'T ACCESS OR CREATE TWT FILE: " + str(err))
+ notice("can't access or create twt file: " + str(err))
return None
return twtfile
return
twtfile.write(datetime.utcnow().isoformat() + "\t" + argument + "\n")
twtfile.close()
- notice("WROTE TWT.")
+ notice("wrote twt.")
if "addquote" == command:
addquote()
handle_url(url, notice, True)
return True
+ class TimeOut(Exception):
+ pass
+
+ def timeout_handler(ignore1, ignore2):
+ raise TimeOut("timeout")
+
+ signal.signal(signal.SIGALRM, timeout_handler)
+ signal.alarm(15)
try:
- r = requests.get(url, timeout=15)
+ r = requests.get(url, headers = {'User-Agent': 'plomlombot'}, stream=True)
+ r.raw.decode_content = True
+ text = r.raw.read(10000000+1)
+ if len(text) > 10000000:
+ raise ValueError('Too large a response')
except (requests.exceptions.TooManyRedirects,
requests.exceptions.ConnectionError,
requests.exceptions.InvalidURL,
+ TimeOut,
UnicodeError,
+ ValueError,
requests.exceptions.InvalidSchema) as error:
- notice("TROUBLE FOLLOWING URL: " + str(error))
- return
+ signal.alarm(0)
+ notice("trouble following url: " + str(error))
+ return False
+ signal.alarm(0)
if mobile_twitter_hack(url):
- return
- title = bs4.BeautifulSoup(r.text, "html5lib").title
+ return True
+ title = bs4.BeautifulSoup(text, "html5lib").title
if title and title.string:
- prefix = "PAGE TITLE: "
+ prefix = "page title: "
if show_url:
- prefix = "PAGE TITLE FOR <" + url + ">: "
+ prefix = "page title for <" + url + ">: "
notice(prefix + title.string.strip())
else:
- notice("PAGE HAS NO TITLE TAG")
+ notice("page has no title tag")
+ return True
class Session:
- def __init__(self, io, username, nickname, channel, twtfile, dbdir):
+ def __init__(self, io, username, nickname, channel, twtfile, dbdir, rmlogs):
self.io = io
self.nickname = nickname
self.username = username
self.users_in_chan = []
self.twtfile = twtfile
self.dbdir = dbdir
+ self.rmlogs = rmlogs
self.io.send_line("NICK " + self.nickname)
self.io.send_line("USER " + self.username + " 0 * : ")
self.io.send_line("JOIN " + self.channel)
hash_channel = hashlib.md5(self.channel.encode("utf-8")).hexdigest()
self.chandir = self.dbdir + "/" + hash_channel + "/"
+ self.rawlogdir = self.chandir + "raw_logs/"
self.logdir = self.chandir + "logs/"
if not os.path.exists(self.logdir):
os.makedirs(self.logdir)
+ if not os.path.exists(self.rawlogdir):
+ os.makedirs(self.rawlogdir)
self.markovfile = self.chandir + "markovfeed"
self.quotesfile = self.chandir + "quotes"
line = Line(":" + self.nickname + "!~" + self.username +
"@localhost" + " " + line)
now = datetime.datetime.utcnow()
- logfile = open(self.logdir + now.strftime("%Y-%m-%d") + ".raw_log", "a")
form = "%Y-%m-%d %H:%M:%S UTC\t"
- logfile.write(now.strftime(form) + " " + line.line + "\n")
- logfile.close()
+ write_to_file(self.rawlogdir + now.strftime("%Y-%m-%d") + ".txt",
+ "a", now.strftime(form) + " " + line.line + "\n")
to_log = irclog.format_logline(line, self.channel)
if to_log != None:
- logfile = open(self.logdir + now.strftime("%Y-%m-%d") + ".txt", "a")
- logfile.write(now.strftime(form) + " " + to_log + "\n")
- logfile.close()
+ write_to_file(self.logdir + now.strftime("%Y-%m-%d") + ".txt",
+ "a", now.strftime(form) + " " + to_log + "\n")
def handle_privmsg(line):
target = line.receiver
msg = str.join(" ", line.tokens[3:])[1:]
matches = re.findall("(https?://[^\s>]+)", msg)
+ url_count = 0
for i in range(len(matches)):
- handle_url(matches[i], notice)
+ if handle_url(matches[i], notice):
+ url_count += 1
+ if url_count == 3:
+ notice("maximum number of urls to parse per message "
+ "reached")
+ break
if "!" == msg[0]:
tokens = msg[1:].split()
argument = str.join(" ", tokens[1:])
handle_command(tokens[0], argument, notice, target, self)
return
- file = open(self.markovfile, "a")
- file.write(msg + "\n")
- file.close()
+ write_to_file(self.markovfile, "a", msg + "\n")
+ now = datetime.datetime.utcnow()
+ write_to_file(self.logdir + now.strftime("%Y-%m-%d") + ".txt", "a",
+ "-----------------------\n")
while True:
+ if self.rmlogs > 0:
+ for f in os.listdir(self.logdir):
+ f = os.path.join(self.logdir, f)
+ if os.path.isfile(f) and \
+ os.stat(f).st_mtime < time.time() - self.rmlogs:
+ os.remove(f)
line = self.io.recv_line()
if not line:
continue
+ str(PORT) + ")")
parser.add_argument("-w, --wait", action="store", dest="timeout",
type=int, default=TIMEOUT,
- help="timeout in seconds after which to attempt " +
+ help="timeout in seconds after which to attempt "
"reconnect (default: " + str(TIMEOUT) + ")")
parser.add_argument("-u, --username", action="store", dest="username",
default=USERNAME, help="username to use (default: "
+ TWTFILE + ")")
parser.add_argument("-d, --dbdir", action="store", dest="dbdir",
default=DBDIR, help="directory to store DB files in")
+ parser.add_argument("-r, --rmlogs", action="store", dest="rmlogs",
+ type=int, default=0,
+ help="maximum age in seconds for logfiles in logs/ "
+ "(0 means: never delete, and is default)")
parser.add_argument("CHANNEL", action="store", help="channel to join")
opts, unknown = parser.parse_known_args()
return opts
hash_server = hashlib.md5(opts.server.encode("utf-8")).hexdigest()
dbdir = opts.dbdir + "/" + hash_server
session = Session(io, opts.username, opts.nickname, opts.CHANNEL,
- opts.twtfile, dbdir)
+ opts.twtfile, dbdir, opts.rmlogs)
session.loop()
except ExceptionForRestart:
io.socket.close()