-def init_session(server, port, timeout, nickname, username, channel):
- print("CONNECTING TO " + server)
- io = IO(server, port, timeout)
- io.send_line("NICK " + nickname)
- io.send_line("USER " + username + " 0 * : ")
- io.send_line("JOIN " + channel)
- return io
-
-
-def lineparser_loop(io, nickname):
-
- def act_on_privmsg(tokens):
-
- def url_check(msg):
-
- def notice(msg):
- io.send_line("NOTICE " + target + " :" + msg)
-
- matches = re.findall("(https?://[^\s>]+)", msg)
- for i in range(len(matches)):
- url = matches[i]
- request = urllib.request.Request(url, headers={
- "User-Agent": "plomlombot"
- })
- try:
- webpage = urllib.request.urlopen(request, timeout=15)
- except (urllib.error.HTTPError, urllib.error.URLError,
- UnicodeError, http.client.BadStatusLine) as error:
- notice("TROUBLE FOLLOWING URL: " + str(error))
- continue
- charset = webpage.info().get_content_charset()
- if not charset:
- notice("TROUBLE READING PAGE TITLE: no charset in header")
- continue
- content_type = webpage.info().get_content_type()
- if content_type not in ('text/html', 'text/xml',
- 'application/xhtml+xml'):
- notice("TROUBLE READING PAGE TITLE: bad content type "
- + content_type)
- continue
- content = webpage.read().decode(charset)
- title = HTMLParser(content, "title").data
- title = html.unescape(title)
- notice("PAGE TITLE FOR URL: " + title)
-
- sender = ""
- for rune in tokens[0]:
- if rune == "!":
+def handle_command(command, argument, notice, target, session):
+ hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
+ quotesfile_name = session.dbdir + "/quotes_" + hash_string
+
+ def addquote():
+ if not os.access(quotesfile_name, os.F_OK):
+ quotesfile = open(quotesfile_name, "w")
+ quotesfile.write("QUOTES FOR " + target + ":\n")
+ quotesfile.close()
+ quotesfile = open(quotesfile_name, "a")
+ quotesfile.write(argument + "\n")
+ quotesfile.close()
+ quotesfile = open(quotesfile_name, "r")
+ lines = quotesfile.readlines()
+ quotesfile.close()
+ notice("ADDED QUOTE #" + str(len(lines) - 1))
+
+ def quote():
+
+ def help():
+ notice("SYNTAX: !quote [int] OR !quote search QUERY")
+ notice("QUERY may be a boolean grouping of quoted or unquoted " +
+ "search terms, examples:")
+ notice("!quote search foo")
+ notice("!quote search foo AND (bar OR NOT baz)")
+ notice("!quote search \"foo\\\"bar\" AND ('NOT\"' AND \"'foo'\"" +
+ " OR 'bar\\'baz')")
+
+ if "" == argument:
+ tokens = []
+ else:
+ tokens = argument.split(" ")
+ if (len(tokens) > 1 and tokens[0] != "search") or \
+ (len(tokens) == 1 and
+ (tokens[0] == "search" or not tokens[0].isdigit())):
+ help()
+ return
+ if not os.access(quotesfile_name, os.F_OK):
+ notice("NO QUOTES AVAILABLE")
+ return
+ quotesfile = open(quotesfile_name, "r")
+ lines = quotesfile.readlines()
+ quotesfile.close()
+ lines = lines[1:]
+ if len(tokens) == 1:
+ i = int(tokens[0])
+ if i == 0 or i > len(lines):
+ notice("THERE'S NO QUOTE OF THAT INDEX")
+ return
+ i = i - 1
+ elif len(tokens) > 1:
+ query = str.join(" ", tokens[1:])
+ try:
+ results = plomsearch.search(query, lines)
+ except plomsearch.LogicParserError as err:
+ notice("FAILED QUERY PARSING: " + str(err))
+ return
+ if len(results) == 0:
+ notice("NO QUOTES MATCHING QUERY")
+ else:
+ for result in results:
+ notice("QUOTE #" + str(result[0] + 1) + " : " + result[1])
+ return
+ else:
+ i = random.randrange(len(lines))
+ notice("QUOTE #" + str(i + 1) + ": " + lines[i])
+
+ def markov():
+ from random import choice, shuffle
+ select_length = 2
+ selections = []
+
+ def markov(snippet):
+ usable_selections = []
+ for i in range(select_length, 0, -1):
+ for selection in selections:
+ add = True
+ for j in range(i):
+ j += 1
+ if snippet[-j] != selection[-(j+1)]:
+ add = False
+ break
+ if add:
+ usable_selections += [selection]
+ if [] != usable_selections:
+ break
+ if [] == usable_selections:
+ usable_selections = selections
+ selection = choice(usable_selections)
+ return selection[select_length]
+
+ hash_string = hashlib.md5(target.encode("utf-8")).hexdigest()
+ markovfeed_name = session.dbdir + "/markovfeed_" + hash_string
+ if not os.access(markovfeed_name, os.F_OK):
+ notice("NOT ENOUGH TEXT TO MARKOV.")
+ return
+
+ # Lowercase incoming lines, ensure they end in a sentence end mark.
+ file = open(markovfeed_name, "r")
+ lines = file.readlines()
+ file.close()
+ tokens = []
+ sentence_end_markers = ".!?)("
+ for line in lines:
+ line = line.lower().replace("\n", "")
+ if line[-1] not in sentence_end_markers:
+ line += "."
+ tokens += line.split()
+ if len(tokens) <= select_length:
+ notice("NOT ENOUGH TEXT TO MARKOV.")
+ return
+
+ # Replace URLs with escape string for now, so that the Markov selector
+ # won't see them as different strings. Stash replaced URLs in urls.
+ urls = []
+ url_escape = "\nURL"
+ url_starts = ["http://", "https://", "<http://", "<https://"]
+ for i in range(len(tokens)):
+ for url_start in url_starts:
+ if tokens[i][:len(url_start)] == url_start:
+ length = len(tokens[i])
+ if url_start[0] == "<":
+ try:
+ length = tokens[i].index(">") + 1
+ except ValueError:
+ pass
+ urls += [tokens[i][:length]]
+ tokens[i] = url_escape + tokens[i][length:]
+ break
+
+ # For each snippet of select_length, use markov() to find continuation
+ # token from selections. Replace present users' names with malkovich.
+ # Start snippets with the beginning of a sentence, if possible.
+ for i in range(len(tokens) - select_length):
+ token_list = []
+ for j in range(select_length + 1):
+ token_list += [tokens[i + j]]
+ selections += [token_list]
+ snippet = []
+ for i in range(select_length):
+ snippet += [""]
+ shuffle(selections)
+ for i in range(len(selections)):
+ if selections[i][0][-1] in sentence_end_markers:
+ for i in range(select_length):
+ snippet[i] = selections[i][i + 1]