- tokens = msg[1:].split()
- hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
- quotesfile_name = "quotes_" + hash_string
- if tokens[0] == "addquote":
- if not os.access(quotesfile_name, os.F_OK):
- quotesfile = open(quotesfile_name, "w")
- quotesfile.write("QUOTES FOR " + target + ":\n")
- quotesfile.close()
- quotesfile = open(quotesfile_name, "a")
- quotesfile.write(str.join(" ", tokens[1:]) + "\n")
- quotesfile.close()
- quotesfile = open(quotesfile_name, "r")
- lines = quotesfile.readlines()
- quotesfile.close()
- notice("ADDED QUOTE #" + str(len(lines) - 1))
- elif tokens[0] == "quote":
- if not os.access(quotesfile_name, os.F_OK):
- notice("NO QUOTES AVAILABLE")
- return
- quotesfile = open(quotesfile_name, "r")
- lines = quotesfile.readlines()
- quotesfile.close()
- lines = lines[1:]
- i = random.randrange(len(lines))
- notice("QUOTE #" + str(i + 1) + ": " + lines[i])
-
- sender = ""
- for rune in tokens[0]:
- if rune == "!":
+ i = i - 1
+ elif len(tokens) > 1:
+ query = str.join(" ", tokens[1:])
+ try:
+ results = plomsearch.search(query, lines)
+ except plomsearch.LogicParserError as err:
+ notice("FAILED QUERY PARSING: " + str(err))
+ return
+ if len(results) == 0:
+ notice("NO QUOTES MATCHING QUERY")
+ else:
+ for result in results:
+ notice("QUOTE #" + str(result[0] + 1) + " : " + result[1])
+ return
+ else:
+ i = random.randrange(len(lines))
+ notice("QUOTE #" + str(i + 1) + ": " + lines[i])
+
+ def markov():
+ from random import choice
+ select_length = 2
+ selections = []
+
+ def markov(snippet):
+ usable_selections = []
+ for i in range(select_length, 0, -1):
+ for selection in selections:
+ add = True
+ for j in range(i):
+ j += 1
+ if snippet[-j] != selection[-(j+1)]:
+ add = False
+ break
+ if add:
+ usable_selections += [selection]
+ if [] != usable_selections:
+ break
+ if [] == usable_selections:
+ usable_selections = selections
+ selection = choice(usable_selections)
+ return selection[select_length]
+
+ hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
+ markovfeed_name = "markovfeed_" + hash_string
+ if not os.access(markovfeed_name, os.F_OK):
+ notice("NOT ENOUGH TEXT TO MARKOV.")
+ return
+
+ # Lowercase incoming lines, ensure they end in a sentence end mark.
+ file = open(markovfeed_name, "r")
+ lines = file.readlines()
+ file.close()
+ tokens = []
+ for line in lines:
+ line = line.lower().replace("\n", "")
+ if line[-1] not in ".!?":
+ line += "."
+ tokens += line.split()
+ if len(tokens) <= select_length:
+ notice("NOT ENOUGH TEXT TO MARKOV.")
+ return
+
+ # Replace URLs with escape string for now, so that the Markov selector
+ # won't see them as different strings. Stash replaced URLs in urls.
+ urls = []
+ url_escape = "\nURL"
+ url_starts = ["http://", "https://", "<http://", "<https://"]
+ for i in range(len(tokens)):
+ for url_start in url_starts:
+ if tokens[i][:len(url_start)] == url_start:
+ length = len(tokens[i])
+ if url_start[0] == "<":
+ try:
+ length = tokens[i].index(">") + 1
+ except ValueError:
+ pass
+ urls += [tokens[i][:length]]
+ tokens[i] = url_escape + tokens[i][length:]
+ break
+
+ # For each snippet of select_length, use markov() to find continuation
+ # token from selections. Replace present users' names with malkovich.
+ for i in range(len(tokens) - select_length):
+ token_list = []
+ for j in range(select_length + 1):
+ token_list += [tokens[i + j]]
+ selections += [token_list]
+ snippet = []
+ for i in range(select_length):
+ snippet += [""]
+ msg = ""
+ malkovich = "malkovich"
+ while 1:
+ new_end = markov(snippet)
+ for name in session.users_in_chan:
+ if new_end[:len(name)] == name.lower():
+ new_end = malkovich + new_end[len(name):]
+ break
+ if len(msg) + len(new_end) > 200: