m4pl1mp/plugins/openai_plugin.py
2022-07-27 00:17:33 -05:00

475 lines
25 KiB
Python

# -*- coding: utf-8 -*-
import os
import re
from difflib import SequenceMatcher
import urllib.parse
import irc3
import openai
import requests
from irc3.plugins.command import command
from random import randint as rint
from random import choices
dir_path = os.path.dirname(os.path.realpath(__file__))
from glob import glob
###########################################################################################
OPENAPI_KEY = os.environ['OPENAPI_KEY']
###########################################################################################
DREY="\x02\x0315"
GREY="\x02\x0314"
DRED="\x02\x0302"
LRED="\x02\x0312"
###########################################################################################
class OPENAI_MESSAGE_HISTORY():
#######################################################################################
openai_messages = []
user_messages = []
user_users = []
#######################################################################################
def __init__(self):
self.processing=0
#######################################################################################
def push_openai_messages(self,data):
self.openai_messages = self.openai_messages[-1:] + self.openai_messages[:-1]
self.openai_messages[0] = data
#######################################################################################
def push_user_messages(self,user,data):
self.user_users.append(user)
self.user_messages.append(data)
#######################################################################################
def similar(self,a,b):
return SequenceMatcher(None,a,b).ratio()
###########################################################################################
###########################################################################################
@irc3.plugin
class Plugin:
#######################################################################################
#######################################################################################
def __init__(self, bot):
self.bot = bot
self.bot.openai_history=OPENAI_MESSAGE_HISTORY()
#############################################
for _ in range(5):
self.bot.openai_history.openai_messages.append("")
#############################################
self.openai_io=[]
self.start_chat_log=""
self.lastterm=""
self.lastresponse=""
self.default_model="text-davinci-002"
self.temperature=1.1
self.max_tokens=2000
self.top_p=1.0
self.frequency_penalty=0.0
self.presence_penalty=0.0
self.flipcolor=False
self.default_load()
#######################################################################################
#######################################################################################
@command(permission='view')
def ai(self, mask, target, args):
"""OpenAi Question A Term
%%ai <term>...
"""
term=' '.join(args['<term>'])
if not term[-1] == ".": term+="."
openai.api_key = OPENAPI_KEY
######################################################################################
print(f"<<< openai: processing {target} {mask.nick.lower()} message: {term.lower()}")
######################################################################################
MESSAGE_OK=True
TRAP_OK=True
LOOP_COUNT_LIMIT=5
LOOP_COUNT=0
while MESSAGE_OK:
LOOP_COUNT+=1
print(f'<<< loop: {LOOP_COUNT} >>>')
prompt_text=f'{term}'
self.lastterm=f'{term}'
response=openai.Completion.create(
model=self.default_model,
prompt=prompt_text,
temperature=self.temperature,
max_tokens=self.max_tokens,
top_p=self.top_p,
frequency_penalty=self.frequency_penalty,
presence_penalty=self.presence_penalty
)
self.lastresponse=response
##################################################################################
openai_message=response.choices[0].text
USER=mask.nick
MESSAGE=term
# ################################################################################### REPROCESSOR SOF
# # SIMILARITY - QUERY SENT VS QUERY ANSWER
# if MESSAGE_OK:
# if self.bot.openai_history.similar(openai_message.lower(),term.lower())>0.4:
# self.openai_io.append({'user':USER,'message':MESSAGE,'target':target})
# print(f'openai - logic ! rejected // openai similarity - response too similar to query')
# TRAP_OK=False
# ################################################################################### REPROCESSOR SOF
# # SIMILARITY - QUERY ANSWER VS PERSONALITY PROFILE DATABASE
# if MESSAGE_OK:
# if self.bot.openai_history.similar(openai_message.lower().strip(),' '.join(self.start_chat_log.strip().splitlines())[:len(openai_message.strip())])>0.4:
# self.openai_io.append({'user':USER,'message':MESSAGE,'target':target})
# print(f'openai - logic ! rejected // openai similarity - response too similar to persoanlity profile database')
# TRAP_OK=False
# ###################################################################################
# # SIMILARITY - HISTORY
# if MESSAGE_OK:
# for i in range(len(self.bot.openai_history.openai_messages)):
# if self.bot.openai_history.similar(openai_message,str(self.bot.openai_history.openai_messages[i]))>0.8:
# self.openai_io.append({'user':USER,'message':MESSAGE,'target':target})
# print(f'openai - logic ! rejected // openai similarity - repeat of previous response')
# TRAP_OK=False
# ###################################################################################
# # MOCK / DUPE
# if MESSAGE_OK:
# if self.bot.openai_history.similar(openai_message,MESSAGE)>0.8:
# self.openai_io.append({'user':USER,'message':MESSAGE,'target':target})
# print(f'openai - logic ! rejected // human mock - openai response same as human')
# TRAP_OK=False
# ###################################################################################
# # GPT LOOP GLITCH
# if MESSAGE_OK:
# n=len(openai_message.split())
# i=len(set(openai_message.split()))
# if i<int(n/2):
# self.openai_io.append({'user':USER,'message':MESSAGE,'target':target})
# print(f'openai - logic ! rejected // gpt loop glitch - reiterating same thing in multiples')
# TRAP_OK=False
# ###################################################################################
# # LIMITED RESPONSE
# if MESSAGE_OK:
# n=len(openai_message.split())
# if i<3:
# self.openai_io.append({'user':USER,'message':MESSAGE,'target':target})
# print(f'openai - logic ! rejected // limited response - skip an unfinished token chain')
# TRAP_OK=False
###################################################################################
if MESSAGE_OK and TRAP_OK:
self.bot.openai_history.push_openai_messages(openai_message)
_msg = re.findall(r'.{1,400}(?:\s+|$)', openai_message)
#if len(_msg) > 1:
# if len(_msg[0]) < len(_msg[1])//2:
# print(f'openai - discovered and removed a preface glitch: {_msg[0].strip()}')
# _msg.reverse()
# _msg.pop()
# _msg.reverse()
COLOR=""
self.flipcolor = not self.flipcolor
if self.flipcolor:
COLOR=DREY
else:
COLOR=GREY
for i,_ in enumerate(_msg):
if i==0:
self.bot.privmsg(target, f"\x02\x0302{USER:}\x0F\x02\x0309 ▶ {COLOR}{_.strip()}\x0F")
else:
self.bot.privmsg(target, f"{COLOR}{_.strip()}\x0F")
MESSAGE_OK=False
print('<<< openai finished >>>')
###################################################################################
if LOOP_COUNT > LOOP_COUNT_LIMIT:
print(f"<<< openai failed: bouncing to mapleai >>>")
self.bot.history.bounce={'user':USER,'message':term,'target':target}
#MESSAGE=f"{GREY}<<< {DRED}i got nothing to say {GREY}>>>"
#self.bot.privmsg(target, f"{USER}: {MESSAGE}")
break
################################################################################### REPROCESSOR EOF
#######################################################################################
def random_float(self,n):
i=float(rint(0,n))
i/=10
return i
#######################################################################################
def print_response_properties(self,target):
self.bot.privmsg(target, f"{DRED} model{GREY}: {LRED}{self.default_model}")
self.bot.privmsg(target, f"{DRED} temperature{GREY}: {LRED}{self.temperature}")
self.bot.privmsg(target, f"{DRED} max_tokens{GREY}: {LRED}{self.max_tokens}")
self.bot.privmsg(target, f"{DRED} top_p{GREY}: {LRED}{self.top_p}")
self.bot.privmsg(target, f"{DRED}frequency_penalty{GREY}: {LRED}{self.frequency_penalty}")
self.bot.privmsg(target, f"{DRED} presence_penalty{GREY}: {LRED}{self.presence_penalty}")
#######################################################################################
@command(permission='admin')
def airand(self, mask, target, args):
"""OpenAi Randomize Response Properties
%%airand
"""
MODELS=["text-davinci-002","text-curie-001","text-babbage-001","text-ada-001"]
MODEL=choices(MODELS)[0]
TOKEN_CEILING=1000
if MODEL==MODELS[0]:
TOKEN_CEILING=2000
self.default_model=MODEL
self.temperature=self.random_float(20)
self.max_tokens=rint(1,TOKEN_CEILING)
self.top_p=self.random_float(10)
self.frequency_penalty=self.random_float(10000)
self.presence_penalty=self.random_float(20)
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}randomizing personality properties {GREY}>>>"))
self.print_response_properties(target)
#######################################################################################
def default_load(self):
FILE='%s/../personalities/default.db' % dir_path
f=open(FILE,'r')
self.start_chat_log=f.read()
if self.start_chat_log.find('\n')==0:
self.start_chat_log=self.start_chat_log[1:]
f.close()
#######################################################################################
@command(permission='admin')
def airead(self, mask, target, args):
"""OpenAi Read Current Personality
%%airead
"""
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}reading current personality profile {GREY}>>>"))
if self.start_chat_log==None:
self.bot.privmsg(target,"<NULL>")
else:
for _ in self.start_chat_log.splitlines():
msg = re.findall(r'.{1,400}(?:\s+|$)', _)
for __ in msg:
self.bot.privmsg(target, f'{__.strip()}')
#######################################################################################
@command(permission='admin')
def aishow(self, mask, target, args):
"""OpenAi Show Current Personality Properties and Values.
%%aishow
"""
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}showing current personality properties {GREY}>>>"))
self.print_response_properties(target)
#######################################################################################
@command(permission='admin')
def aiterm(self, mask, target, args):
"""OpenAi Show Last Term.
%%aiterm
"""
self.bot.privmsg(target, self.bot.emo(f'{GREY}<<< {DRED}showing last term query {GREY}>>>'))
for _ in self.lastterm.splitlines():
msg = re.findall(r'.{1,400}(?:\s+|$)', _)
for __ in msg:
self.bot.privmsg(target, f'{__.strip()}')
#######################################################################################
@command(permission='admin')
def airesponse(self, mask, target, args):
"""OpenAi Show Last Server Response.
%%airesponse
"""
self.bot.privmsg(target, self.bot.emo(f'{GREY}<<< {DRED}showing last openai server response {GREY}>>>'))
msg=[]
FINISH_REASON=self.lastresponse['choices'][0]['finish_reason']
INDEX=self.lastresponse['choices'][0]['index']
LOGPROBS=self.lastresponse['choices'][0]['logprobs']
TEXT=self.lastresponse['choices'][0]['text'].strip()
MODEL=self.lastresponse['model']
OBJECT=MODEL=self.lastresponse['object']
COMPLETION_TOKENS=MODEL=self.lastresponse['usage']['completion_tokens']
PROMPT_TOKENS=MODEL=self.lastresponse['usage']['prompt_tokens']
TOTAL_TOKENS=MODEL=self.lastresponse['usage']['total_tokens']
_TEXT=re.findall(r'.{1,400}(?:\s+|$)', TEXT)
#msg.append(f'{GREY}[{DRED}usage{GREY}]')
msg.append(f'{DRED}completion_tokens{GREY}: {LRED}{COMPLETION_TOKENS}')
msg.append(f' {DRED}prompt_tokens{GREY}: {LRED}{PROMPT_TOKENS}')
msg.append(f' {DRED}total_tokens{GREY}: {LRED}{TOTAL_TOKENS}')
#msg.append(f'{GREY}[{DRED}choices{GREY}]')
msg.append(f' {DRED}index{GREY}: {LRED}{INDEX}')
msg.append(f' {DRED}logprobs{GREY}: {LRED}{LOGPROBS}')
if len(_TEXT) > 1:
if len(_TEXT[0]) < len(_TEXT[1])//2:
print(f'discovered and removed a preface glitch: {_TEXT[0].strip()}')
_TEXT.reverse()
_TEXT.pop()
_TEXT.reverse()
for i,_ in enumerate(_TEXT):
if i == 0:
msg.append(f' {DRED}text{GREY}: {LRED}{_.strip()}')
else:
msg.append(f'{LRED}{_.strip()}')
for _ in msg:
self.bot.privmsg(target, _)
#######################################################################################
@command(permission='admin')
def ailist(self, mask, target, args):
"""OpenAi List Personalities
%%ailist
"""
PATH='%s/../personalities' % dir_path
FILES=glob(f'{PATH}/*.db')
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}listing personality databases {GREY}>>>"))
for i,_ in enumerate(FILES):
FILE=_.split('/')[-1].replace('.db','')
self.bot.privmsg(target, f'{DRED}{i}{GREY}: {LRED}{FILE}')
#######################################################################################
@command(permission='admin')
def aiload(self, mask, target, args):
"""OpenAi Load Personalities
%%aiload <msg>...
"""
msg = ''.join(args['<msg>'])
try:
i=int(msg)
except:
self.bot.privmsg(target, self.bot.emo(f'{GREY}<<< {DRED}error{GREY}: {LRED}not an integer, use only numbers of the personality databases {GREY}>>>'))
return
PATH='%s/../personalities' % dir_path
FILES=glob(f'{PATH}/*.db')
try:
f=open(FILES[i],'r')
buffer=f.read().splitlines()
f.close()
self.start_chat_log='\n'.join(buffer)
if self.start_chat_log.find('\n')==0:
self.start_chat_log=self.start_chat_log[1:]
FILE=FILES[i].split('/')[-1].replace('.db', '')
self.bot.privmsg(target, self.bot.emo(f'{GREY}<<< {DRED}loaded {FILE} personality database {GREY}>>>'))
except:
self.bot.privmsg(target, self.bot.emo(f'{GREY}<<< {DRED}error{GREY}: {LRED}could not load this personality database, maybe invalid index number {GREY}>>>'))
return
#######################################################################################
@command(permission='admin')
def aiwrite(self, mask, target, args):
"""OpenAi List Personalities
%%aiwrite <msg>...
"""
msg = ''.join(args['<msg>'])
if self.start_chat_log.find('None\n')==0:
self.start_chat_log=self.start_chat_log.replace('None\n','')
msg=msg.replace('.','').replace('/','')
PATH='%s/../personalities' % dir_path
FILE=f'{PATH}/{msg}.db'
if os.path.exists(FILE):
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}personality database already exists, choose a different filename {GREY}>>>"))
return
f=open(FILE, "a")
f.write(f'{self.start_chat_log}\n')
f.close()
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}created {msg} personality database {GREY}>>>"))
#######################################################################################
@command(permission='admin')
def aitrain(self, mask, target, args):
"""OpenAi Question A Term
%%aitrain <term>...
"""
term = ' '.join(args['<term>'])
if term[-1]==',': term=term[::-1].replace(',','')[::-1]
if not term[-1] == ".": term+="."
FILE='%s/../personalities/trained.db' % dir_path
f=open(FILE, "a")
f.write(f'{term}\n')
f.close()
self.start_chat_log=f'{self.start_chat_log}\n{term}'
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}trained {GREY}>>>"))
#######################################################################################
@command(permission='admin')
def aidefault(self, mask, target, args):
"""OpenAi Return to Defaults
%%aidefault
"""
self.default_model="text-davinci-002"
self.temperature=1.1
self.max_tokens=2000
self.top_p=1.0
self.frequency_penalty=0.0
self.presence_penalty=0.0
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}setting personality and properties to defaults {GREY}>>>"))
self.print_response_properties(target)
#######################################################################################
@command(permission='admin')
def aiset(self, mask, target, args):
"""OpenAi Set Response Properties. Properties are default_model, temperature, max_tokens, top_p, frequency_penalty, presence_penalty. Example Usage: ?aiset top_p 1.0
%%aiset <msg>...
"""
msg= ' '.join(args['<msg>'])
PROPERTIES=['model','temperature','max_tokens','top_p','frequency_penalty','presence_penalty']
MODELS=["text-davinci-002","text-curie-001","text-babbage-001","text-ada-001"]
prop=""
val=""
try:
prop=msg.split()[0].lower()
val=msg.split()[1].lower()
except:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}not enough parameters {GREY}- {DRED}property choices{GREY}: {LRED}{PROPERTIES} {GREY}- {DRED}model choices{GREY}: {LRED}{MODELS} {GREY}- {DRED}usage examples{GREY}: {LRED}?aiset model text-davinci-002, ?aiset max_tokens 2000, ?aiset model text-davinci-002, ?aiset temperature 0.7, ?aiset top_p 1.0, ?aiset frequency_penalty 0.0, ?aiset presence_penalty 0.0 {GREY}>>>"))
return
if prop in PROPERTIES:
if prop == "model":
try:
if val in MODELS:
self.default_model=val
if str(val)==MODELS[0]:
self.max_tokens = 2000
else:
self.max_tokens = 1000
else:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}property model value should be a string {GREY}- {DRED}choice of models{GREY}: {LRED}{MODELS} {GREY}- {DRED}example{GREY}: {LRED}?aiset model text-davinci-002 {GREY}>>>"))
except:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}property model value should be a string {GREY}- {DRED}choice of models{GREY}: {LRED}{MODELS} {GREY}- {DRED}example{GREY}: {LRED}?aiset model text-davinci-002 {GREY}>>>"))
return
elif prop == "temperature":
try:
if float(val) <= 2 and float(val) >= 0:
self.temperature=float(val)
except:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}property temperature value should be a float {GREY}- {DRED}example{GREY}: {LRED}?aiset temperature 0.7 {GREY}>>>"))
return
elif prop == "max_tokens":
try:
if int(val) <= 2000 and int(val) >= 100:
self.max_tokens=int(val)
except:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}property max_tokens value should be an integer not greater than 2000 {GREY}- {DRED}example{GREY}: {LRED}?aiset max_tokens 2000 {GREY}>>>"))
return
elif prop == "top_p":
try:
if float(val) <= 1.0 and float(val) >= 0.0:
self.top_p=float(val)
else:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: property model should be a float no greater than 1.0 {GREY}- {DRED}example{GREY}: {LRED}?aiset top_p 0.7 {GREY}>>>"))
return
except:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: property model should be a float no greater than 1.0 {GREY}- {DRED}example{GREY}: {LRED}?aiset top_p 0.7 {GREY}>>>"))
return
elif prop == "frequency_penalty":
try:
if float(val):
self.frequency_penalty=float(val)
except:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}property frequency_penalty should be a float {GREY}- {DRED}example{GREY}: {LRED}?aiset frequency_penalty 0.0 {GREY}>>>"))
return
elif prop == "presence_penalty":
try:
if float(val) <= 2.0 and float(val) >= 0.0:
self.presence_penalty=float(val)
else:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}property presence_penalty should be a float no greater than 2.0 {GREY}- {DRED}example{GREY}: {LRED}?aiset presence_penalty 0.0 {GREY}>>>"))
return
except:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}property presence_penalty should be a float no greater than 2.0 {GREY}- {DRED}example{GREY}: {LRED}?aiset presence_penalty 0.0 {GREY}>>>"))
return
else:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}no properties were set, they remain the same {GREY}>>>"))
self.print_response_properties(target)
return
else:
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}used an invalid property identifier {GREY}- {DRED}property identifiers are {LRED}{PROPERTIES} {GREY}>>>"))
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}error{GREY}: {LRED}no properties were set, they remain the same {GREY}>>>"))
self.print_response_properties(target)
return
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}{prop} property set to the value {val} {GREY}>>>"))
self.print_response_properties(target)
#######################################################################################
@command(permission='admin')
def aiclear(self, mask, target, args):
"""OpenAi Clear Term
%%aiclear
"""
FILE='%s/../personalities/trained.db' % dir_path
f=open(FILE, "w")
f.write("")
f.close()
self.start_chat_log = ""
self.bot.privmsg(target, self.bot.emo(f"{GREY}<<< {DRED}cleared {GREY}>>>"))
#######################################################################################
###########################################################################################
###########################################################################################