+ if "" == argument:
+ tokens = []
+ else:
+ tokens = argument.split(" ")
+ if len(tokens) != 0:
+ if (len(tokens) == 1 and not tokens[0].isdigit()) or \
+ tokens[0] not in {"search", "offset-search"} or \
+ (tokens[0] == "offset-search" and
+ ((not len(tokens) > 2) or (not tokens[1].isdigit()))):
+ help()
+ return
+ if not os.access(session.quotesfile, os.F_OK):
+ notice("no quotes available")
+ return
+ quotesfile = open(session.quotesfile, "r")
+ lines = quotesfile.readlines()
+ quotesfile.close()
+ lines = lines[1:]
+ if len(tokens) == 1:
+ i = int(tokens[0])
+ if i == 0 or i > len(lines):
+ notice("there's no quote of that index")
+ return
+ i = i - 1
+ elif len(tokens) > 1:
+ to_skip = 0
+ if tokens[0] == "search":
+ query = str.join(" ", tokens[1:])
+ elif tokens[0] == "offset-search":
+ to_skip = int(tokens[1])
+ query = str.join(" ", tokens[2:])
+ try:
+ results = plomsearch.search(query, lines)
+ except plomsearch.LogicParserError as err:
+ notice("failed query parsing: " + str(err))
+ return
+ if len(results) == 0:
+ notice("no quotes matching query")
+ else:
+ if to_skip >= len(results):
+ notice("skipped all quotes matching query")
+ else:
+ notice("found %s matches, showing max. 3, skipping %s"
+ % (len(results), to_skip))
+ for i in range(len(results)):
+ if i >= to_skip and i < to_skip + 3:
+ result = results[i]
+ notice("quote #" + str(result[0] + 1) + ": "
+ + result[1][:-1])
+ return
+ else:
+ i = random.randrange(len(lines))
+ notice("quote #" + str(i + 1) + ": " + lines[i][:-1])
+
+ def markov():
+
+ def help():
+ notice("syntax: !markov [integer from 1 to infinite]")
+
+ def markov(snippet):
+ usable_selections = []
+ for i in range(select_length, 0, -1):
+ for selection in selections:
+ add = True
+ for j in range(i):
+ j += 1
+ if snippet[-j] != selection[-(j+1)]:
+ add = False
+ break
+ if add:
+ usable_selections += [selection]
+ if [] != usable_selections:
+ break
+ if [] == usable_selections:
+ usable_selections = selections
+ selection = choice(usable_selections)
+ return selection[select_length]
+
+ if "" == argument:
+ tokens = []
+ else:
+ tokens = argument.split(" ")
+ if (len(tokens) > 1 or (len(tokens) == 1 and not tokens[0].isdigit())):
+ help()
+ return
+
+ from random import choice, shuffle
+ select_length = 2
+ if len(tokens) == 1:
+ n = int(tokens[0])
+ if n > 0:
+ select_length = n
+ else:
+ notice("bad value, using default: " + str(select_length))
+ selections = []
+
+ if not os.access(session.markovfile, os.F_OK):
+ notice("not enough text to markov for selection length")
+ return
+
+ # Lowercase incoming lines, ensure they end in a sentence end mark.
+ file = open(session.markovfile, "r")
+ lines = file.readlines()
+ file.close()
+ tokens = []
+ sentence_end_markers = ".!?)("
+ for line in lines:
+ line = line.lower().replace("\n", "")
+ if line[-1] not in sentence_end_markers:
+ line += "."
+ tokens += line.split()
+ if len(tokens) - 1 <= select_length:
+ notice("not enough text to markov")
+ return
+
+ # Replace URLs with escape string for now, so that the Markov selector
+ # won't see them as different strings. Stash replaced URLs in urls.
+ urls = []
+ url_escape = "\nURL"
+ url_starts = ["http://", "https://", "<http://", "<https://"]
+ for i in range(len(tokens)):
+ for url_start in url_starts:
+ if tokens[i][:len(url_start)] == url_start:
+ length = len(tokens[i])
+ if url_start[0] == "<":
+ try:
+ length = tokens[i].index(">") + 1
+ except ValueError:
+ pass
+ urls += [tokens[i][:length]]
+ tokens[i] = url_escape + tokens[i][length:]
+ break
+
+ # For each snippet of select_length, use markov() to find continuation
+ # token from selections. Replace present users' names with malkovich.
+ # Start snippets with the beginning of a sentence, if possible.
+ for i in range(len(tokens) - select_length):
+ token_list = []
+ for j in range(select_length + 1):
+ token_list += [tokens[i + j]]
+ selections += [token_list]
+ snippet = []
+ for i in range(select_length):
+ snippet += [""]
+ shuffle(selections)
+ for i in range(len(selections)):
+ if selections[i][0][-1] in sentence_end_markers:
+ for j in range(select_length):
+ snippet[j] = selections[j][j + 1]
+ break
+ msg = ""
+ malkovich = "malkovich"
+ while 1:
+ new_end = markov(snippet)
+ for name in session.users_in_chan:
+ if new_end[:len(name)] == name.lower():
+ new_end = malkovich + new_end[len(name):]
+ break
+ if len(msg) + len(new_end) > 200:
+ break
+ msg += new_end + " "
+ for i in range(select_length - 1):
+ snippet[i] = snippet[i + 1]
+ snippet[select_length - 1] = new_end
+
+ # Replace occurences of url escape string with random choice from urls.
+ while True:
+ index = msg.find(url_escape)
+ if index < 0:
+ break
+ msg = msg.replace(url_escape, choice(urls), 1)
+
+ # More meaningful ways to randomly end sentences.
+ notice(msg + malkovich + ".")
+
+ def twt():
+ def try_open(mode):
+ try:
+ twtfile = open(session.twtfile, mode)
+ except (PermissionError, FileNotFoundError) as err:
+ notice("can't access or create twt file: " + str(err))
+ return None
+ return twtfile
+
+ from datetime import datetime
+ if not os.access(session.twtfile, os.F_OK):
+ twtfile = try_open("w")
+ if None == twtfile:
+ return
+ twtfile.close()
+ twtfile = try_open("a")
+ if None == twtfile:
+ return
+ twtfile.write(datetime.utcnow().isoformat() + "\t" + argument + "\n")
+ twtfile.close()
+ notice("wrote twt.")
+
+ if "addquote" == command:
+ addquote()
+ elif "quote" == command:
+ quote()
+ elif "markov" == command:
+ markov()
+ elif "twt" == command:
+ twt()
+
+
+def handle_url(url, notice, show_url=False):
+
+ def mobile_twitter_hack(url):
+ re1 = 'https?://(mobile.twitter.com/)[^/]+(/status/)'
+ re2 = 'https?://mobile.twitter.com/([^/]+)/status/([^\?/]+)'
+ m = re.search(re1, url)
+ if m and m.group(1) == 'mobile.twitter.com/' \
+ and m.group(2) == '/status/':
+ m = re.search(re2, url)
+ url = 'https://twitter.com/' + m.group(1) + '/status/' + m.group(2)
+ handle_url(url, notice, True)
+ return True
+
+ class TimeOut(Exception):
+ pass
+
+ def timeout_handler(ignore1, ignore2):
+ raise TimeOut("timeout")
+
+ signal.signal(signal.SIGALRM, timeout_handler)
+ signal.alarm(15)
+ try:
+ r = requests.get(url, headers = {'User-Agent': 'plomlombot'}, stream=True)
+ r.raw.decode_content = True
+ text = r.raw.read(10000000+1)
+ if len(text) > 10000000:
+ raise ValueError('Too large a response')
+ except (requests.exceptions.TooManyRedirects,
+ requests.exceptions.ConnectionError,
+ requests.exceptions.InvalidURL,
+ TimeOut,
+ UnicodeError,
+ ValueError,
+ requests.exceptions.InvalidSchema) as error:
+ signal.alarm(0)
+ notice("trouble following url: " + str(error))
+ return False
+ signal.alarm(0)
+ if mobile_twitter_hack(url):
+ return True
+ title = bs4.BeautifulSoup(text, "html5lib").title
+ if title and title.string:
+ prefix = "page title: "
+ if show_url:
+ prefix = "page title for <" + url + ">: "
+ notice(prefix + title.string.strip())
+ else:
+ notice("page has no title tag")
+ return True
+
+
+class Session:
+
+ def __init__(self, io, username, nickname, channel, twtfile, dbdir, rmlogs,
+ markov_input):
+ self.io = io
+ self.nickname = nickname
+ self.users_in_chan = []
+ self.twtfile = twtfile
+ hash_channel = hashlib.md5(channel.encode("utf-8")).hexdigest()
+ chandir = dbdir + "/" + hash_channel + "/"
+ self.markov_input = markov_input
+ self.markovfile = chandir + "markovfeed"
+ self.quotesfile = chandir + "quotes"
+ self.log = Log(chandir, self.nickname, username, channel, rmlogs)
+ self.io.send_line("NICK " + self.nickname)
+ self.io.send_line("USER " + username + " 0 * : ")
+ self.io.send_line("JOIN " + channel)
+ self.io.log = self.log
+ self.log.separator_line()
+
+ def loop(self):
+
+ def handle_privmsg(line):