min/modules/nlp.py

118 lines
3.2 KiB
Python
Raw Normal View History

2021-01-31 02:05:34 +00:00
from bot import *
2020-04-19 23:52:41 +00:00
import dataset
import random
import time
2020-04-19 23:52:41 +00:00
2020-04-23 01:43:07 +00:00
async def rec(self, m):
2021-01-31 02:05:34 +00:00
prew = shared.db['prew']
noch = shared.db['noun']
beg = shared.db['beg']
end = shared.db['end']
2020-04-23 01:43:07 +00:00
pre = ''
words = m.split(' ')
2020-04-23 02:14:17 +00:00
if words[0] == 'admin':
return
2020-04-23 01:43:07 +00:00
for w in words:
if pre == '':
beg.insert(dict(word=w))
else:
2020-06-19 15:45:32 +00:00
prew.insert_ignore(dict(pre=pre, pro=w),['id'])
2020-04-23 01:43:07 +00:00
pre = w
noch.insert(dict(word=w))
end.insert(dict(word=pre))
2020-07-12 12:21:53 +00:00
async def getNoun(self, words, c):
2021-01-31 02:05:34 +00:00
if c in shared.cstate:
oldnoun = shared.cstate[c]
2020-07-12 12:21:53 +00:00
else:
oldnoun = None
2021-01-31 01:49:56 +00:00
2021-01-31 02:05:34 +00:00
shared.db['remsg'].insert_ignore(dict(noun=oldnoun,msg=' '.join(words)),['id'])
2021-01-31 01:49:56 +00:00
2021-01-31 02:05:34 +00:00
nouns = [i['word'] for i in shared.db['noun'].find()]
2020-07-12 12:21:53 +00:00
out = {}
for i in words:
out[i] = nouns.count(i)
noun = min(out, key=out.get)
2021-01-31 02:05:34 +00:00
conversation = shared.db['conver']
2020-07-12 12:21:53 +00:00
if oldnoun != None:
print("adding", [oldnoun,noun])
conversation.insert_ignore(dict(pre=oldnoun,pro=noun),['id'])
nextnoun = [i['pro'] for i in conversation.find(pre=noun)]
print("nextnoun:",nextnoun)
if len(nextnoun) > 0:
noun = random.choice(nextnoun)
2021-01-31 02:05:34 +00:00
shared.cstate[c] = noun
2020-07-12 12:21:53 +00:00
return noun
2020-04-23 01:43:07 +00:00
async def genOut(self, noun):
2021-01-31 02:05:34 +00:00
oldresponses = [i['msg'] for i in shared.db['remsg'].find(noun=noun)]
2021-01-31 01:49:56 +00:00
if len(oldresponses) > 0:
return random.choice(oldresponses).split(' ')
2021-01-31 02:05:34 +00:00
prew = shared.db['prew']
beg = [ i['word'] for i in shared.db['beg'].find() ]
end = [ i['word'] for i in shared.db['end'].find() ]
nouns = [i['word'] for i in shared.db['noun'].find()]
2020-04-23 01:43:07 +00:00
iter=0
out = [noun]
2021-01-31 02:05:34 +00:00
while (out[0] not in beg or nouns.count(out[0])-1 > iter * shared.enmul) and iter < 7:
2020-04-23 20:05:16 +00:00
try:
out = [ random.choice(list(prew.find(pro=out[0])))['pre'] ] + out
except IndexError:
iter += 69
2020-04-23 01:43:07 +00:00
iter += 1
iter = 0
2021-01-31 02:05:34 +00:00
while (out[-1] not in end or nouns.count(out[-1])-1 > iter * shared.enmul) and iter < 7:
2020-04-23 20:05:16 +00:00
try:
out.append(random.choice(list(prew.find(pre=out[-1])))['pro'])
except IndexError:
iter += 69
2020-04-23 01:43:07 +00:00
iter += 1
return out
async def filter(self, c, n, m):
2021-01-31 02:05:34 +00:00
if c in shared.qtime and shared.qtime[c] > time.time():
return
2021-01-31 02:05:34 +00:00
if m[:len(shared.prefix)] == shared.prefix:
m = m[len(shared.prefix):]
await go(self, c, n, m)
2021-05-22 22:55:16 +00:00
elif m[:len(self.nickname)+1] == self.nickname+' ':
m = m[len(self.nickname)+1:]
await go(self, c, n, m)
2021-05-22 20:51:55 +00:00
elif '#' not in c and n != self.nickname:
await go(self, c, n, m)
2020-04-27 14:42:23 +00:00
else:
if len(m.split(' ')) > 1:
2021-01-31 02:05:34 +00:00
if shared.learntime + shared.learndelay < time.time():
2020-05-15 20:44:43 +00:00
await rec(self, m)
2021-01-31 02:05:34 +00:00
shared.learntime = time.time()
async def go(self, c, n, m):
2020-04-23 01:43:07 +00:00
await rec(self, m)
words = m.split(' ')
2020-04-23 02:14:17 +00:00
if words[0] == 'admin':
return
2020-07-12 12:21:53 +00:00
await self.message(c, ' '.join(await genOut(self, await getNoun(self, words, c))))
2020-04-19 23:52:41 +00:00
async def init(self):
2021-01-31 02:05:34 +00:00
shared.qtime = {}
shared.learntime = 0
2021-06-02 14:27:38 +00:00
# delay between grabbing random messages and passively
# learning.
2021-05-22 22:55:16 +00:00
shared.learndelay = 1
2021-06-02 14:27:38 +00:00
# sentance ending weight, lower means longer sentances,
# higher means shorter sentances. this will need to slowly
# get larger as the database grows
shared.enmul = 6
2020-04-23 01:43:07 +00:00
2021-06-02 14:27:38 +00:00
shared.rawm['nlp'] = filter
2021-01-31 02:05:34 +00:00
shared.cstate = {}