Merge branch 'main' into patch-2
This commit is contained in:
commit
ef9127d876
10 changed files with 168 additions and 19 deletions
|
@ -98,12 +98,15 @@ class StreamingCompletion:
|
||||||
action_type='new',
|
action_type='new',
|
||||||
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
|
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
|
||||||
model='gpt-4',
|
model='gpt-4',
|
||||||
|
proxy=None
|
||||||
) -> Generator[ForeFrontResponse, None, None]:
|
) -> Generator[ForeFrontResponse, None, None]:
|
||||||
if not token:
|
if not token:
|
||||||
raise Exception('Token is required!')
|
raise Exception('Token is required!')
|
||||||
if not chat_id:
|
if not chat_id:
|
||||||
chat_id = str(uuid4())
|
chat_id = str(uuid4())
|
||||||
|
|
||||||
|
proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else None
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
|
'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
|
||||||
'accept': '*/*',
|
'accept': '*/*',
|
||||||
|
@ -135,6 +138,7 @@ class StreamingCompletion:
|
||||||
for chunk in post(
|
for chunk in post(
|
||||||
'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
|
'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
|
||||||
headers=headers,
|
headers=headers,
|
||||||
|
proxies=proxies,
|
||||||
json=json_data,
|
json=json_data,
|
||||||
stream=True,
|
stream=True,
|
||||||
).iter_lines():
|
).iter_lines():
|
||||||
|
@ -169,6 +173,7 @@ class Completion:
|
||||||
action_type='new',
|
action_type='new',
|
||||||
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
|
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
|
||||||
model='gpt-4',
|
model='gpt-4',
|
||||||
|
proxy=None
|
||||||
) -> ForeFrontResponse:
|
) -> ForeFrontResponse:
|
||||||
text = ''
|
text = ''
|
||||||
final_response = None
|
final_response = None
|
||||||
|
@ -179,6 +184,7 @@ class Completion:
|
||||||
action_type=action_type,
|
action_type=action_type,
|
||||||
default_persona=default_persona,
|
default_persona=default_persona,
|
||||||
model=model,
|
model=model,
|
||||||
|
proxy=proxy
|
||||||
):
|
):
|
||||||
if response:
|
if response:
|
||||||
final_response = response
|
final_response = response
|
||||||
|
|
|
@ -187,7 +187,7 @@ class Account:
|
||||||
enable_bot_creation: bool = False,
|
enable_bot_creation: bool = False,
|
||||||
):
|
):
|
||||||
client = TLS(client_identifier='chrome110')
|
client = TLS(client_identifier='chrome110')
|
||||||
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
|
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else {}
|
||||||
|
|
||||||
mail_client = Emailnator()
|
mail_client = Emailnator()
|
||||||
mail_address = mail_client.get_mail()
|
mail_address = mail_client.get_mail()
|
||||||
|
@ -293,10 +293,13 @@ class StreamingCompletion:
|
||||||
custom_model: bool = None,
|
custom_model: bool = None,
|
||||||
prompt: str = 'hello world',
|
prompt: str = 'hello world',
|
||||||
token: str = '',
|
token: str = '',
|
||||||
|
proxy: Optional[str] = None
|
||||||
) -> Generator[PoeResponse, None, None]:
|
) -> Generator[PoeResponse, None, None]:
|
||||||
_model = MODELS[model] if not custom_model else custom_model
|
_model = MODELS[model] if not custom_model else custom_model
|
||||||
|
|
||||||
|
proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else False
|
||||||
client = PoeClient(token)
|
client = PoeClient(token)
|
||||||
|
client.proxy = proxies
|
||||||
|
|
||||||
for chunk in client.send_message(_model, prompt):
|
for chunk in client.send_message(_model, prompt):
|
||||||
yield PoeResponse(
|
yield PoeResponse(
|
||||||
|
@ -330,10 +333,13 @@ class Completion:
|
||||||
custom_model: str = None,
|
custom_model: str = None,
|
||||||
prompt: str = 'hello world',
|
prompt: str = 'hello world',
|
||||||
token: str = '',
|
token: str = '',
|
||||||
|
proxy: Optional[str] = None
|
||||||
) -> PoeResponse:
|
) -> PoeResponse:
|
||||||
_model = MODELS[model] if not custom_model else custom_model
|
_model = MODELS[model] if not custom_model else custom_model
|
||||||
|
|
||||||
|
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
|
||||||
client = PoeClient(token)
|
client = PoeClient(token)
|
||||||
|
client.proxy = proxies
|
||||||
|
|
||||||
chunk = None
|
chunk = None
|
||||||
for response in client.send_message(_model, prompt):
|
for response in client.send_message(_model, prompt):
|
||||||
|
|
37
gpt4free/quora/backup-mail.py
Normal file
37
gpt4free/quora/backup-mail.py
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
from requests import Session
|
||||||
|
from time import sleep
|
||||||
|
from json import loads
|
||||||
|
from re import findall
|
||||||
|
class Mail:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.client = Session()
|
||||||
|
self.client.post("https://etempmail.com/")
|
||||||
|
self.cookies = {'acceptcookie': 'true'}
|
||||||
|
self.cookies["ci_session"] = self.client.cookies.get_dict()["ci_session"]
|
||||||
|
self.email = None
|
||||||
|
def get_mail(self):
|
||||||
|
respone=self.client.post("https://etempmail.com/getEmailAddress")
|
||||||
|
#cookies
|
||||||
|
self.cookies["lisansimo"] = eval(respone.text)["recover_key"]
|
||||||
|
self.email = eval(respone.text)["address"]
|
||||||
|
return self.email
|
||||||
|
def get_message(self):
|
||||||
|
print("Waiting for message...")
|
||||||
|
while True:
|
||||||
|
sleep(5)
|
||||||
|
respone=self.client.post("https://etempmail.com/getInbox")
|
||||||
|
mail_token=loads(respone.text)
|
||||||
|
print(self.client.cookies.get_dict())
|
||||||
|
if len(mail_token) == 1:
|
||||||
|
break
|
||||||
|
|
||||||
|
params = {'id': '1',}
|
||||||
|
self.mail_context = self.client.post("https://etempmail.com/getInbox",params=params)
|
||||||
|
self.mail_context = eval(self.mail_context.text)[0]["body"]
|
||||||
|
return self.mail_context
|
||||||
|
#,cookies=self.cookies
|
||||||
|
def get_verification_code(self):
|
||||||
|
message = self.mail_context
|
||||||
|
code = findall(r';">(\d{6,7})</div>', message)[0]
|
||||||
|
print(f"Verification code: {code}")
|
||||||
|
return code
|
|
@ -2,7 +2,7 @@ from json import loads
|
||||||
from queue import Queue, Empty
|
from queue import Queue, Empty
|
||||||
from re import findall
|
from re import findall
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
from typing import Generator
|
from typing import Generator, Optional
|
||||||
|
|
||||||
from curl_cffi import requests
|
from curl_cffi import requests
|
||||||
from fake_useragent import UserAgent
|
from fake_useragent import UserAgent
|
||||||
|
@ -19,7 +19,7 @@ class Completion:
|
||||||
stream_completed = False
|
stream_completed = False
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def request(prompt: str):
|
def request(prompt: str, proxy: Optional[str]=None):
|
||||||
headers = {
|
headers = {
|
||||||
'authority': 'chatbot.theb.ai',
|
'authority': 'chatbot.theb.ai',
|
||||||
'content-type': 'application/json',
|
'content-type': 'application/json',
|
||||||
|
@ -27,9 +27,12 @@ class Completion:
|
||||||
'user-agent': UserAgent().random,
|
'user-agent': UserAgent().random,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None
|
||||||
|
|
||||||
requests.post(
|
requests.post(
|
||||||
'https://chatbot.theb.ai/api/chat-process',
|
'https://chatbot.theb.ai/api/chat-process',
|
||||||
headers=headers,
|
headers=headers,
|
||||||
|
proxies=proxies,
|
||||||
content_callback=Completion.handle_stream_response,
|
content_callback=Completion.handle_stream_response,
|
||||||
json={'prompt': prompt, 'options': {}},
|
json={'prompt': prompt, 'options': {}},
|
||||||
)
|
)
|
||||||
|
@ -37,8 +40,8 @@ class Completion:
|
||||||
Completion.stream_completed = True
|
Completion.stream_completed = True
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create(prompt: str) -> Generator[str, None, None]:
|
def create(prompt: str, proxy: Optional[str]=None) -> Generator[str, None, None]:
|
||||||
Thread(target=Completion.request, args=[prompt]).start()
|
Thread(target=Completion.request, args=[prompt, proxy]).start()
|
||||||
|
|
||||||
while not Completion.stream_completed or not Completion.message_queue.empty():
|
while not Completion.stream_completed or not Completion.message_queue.empty():
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -30,12 +30,16 @@ class Completion:
|
||||||
include_links: bool = False,
|
include_links: bool = False,
|
||||||
detailed: bool = False,
|
detailed: bool = False,
|
||||||
debug: bool = False,
|
debug: bool = False,
|
||||||
|
proxy: Optional[str] = None
|
||||||
) -> PoeResponse:
|
) -> PoeResponse:
|
||||||
if chat is None:
|
if chat is None:
|
||||||
chat = []
|
chat = []
|
||||||
|
|
||||||
|
proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else {}
|
||||||
|
|
||||||
client = Session(client_identifier='chrome_108')
|
client = Session(client_identifier='chrome_108')
|
||||||
client.headers = Completion.__get_headers()
|
client.headers = Completion.__get_headers()
|
||||||
|
client.proxies = proxies
|
||||||
|
|
||||||
response = client.get(
|
response = client.get(
|
||||||
f'https://you.com/api/streamingSearch',
|
f'https://you.com/api/streamingSearch',
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
|
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
|
||||||
|
|
||||||
|
@ -7,14 +8,14 @@ from gpt4free import quora, forefront, theb, you
|
||||||
import random
|
import random
|
||||||
|
|
||||||
|
|
||||||
def query_forefront(question: str) -> str:
|
def query_forefront(question: str, proxy: Optional[str] = None) -> str:
|
||||||
# create an account
|
# create an account
|
||||||
token = forefront.Account.create(logging=False)
|
token = forefront.Account.create(logging=False, proxy=proxy)
|
||||||
|
|
||||||
response = ""
|
response = ""
|
||||||
# get a response
|
# get a response
|
||||||
try:
|
try:
|
||||||
return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4').text
|
return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4', proxy=proxy).text
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Return error message if an exception occurs
|
# Return error message if an exception occurs
|
||||||
return (
|
return (
|
||||||
|
@ -22,16 +23,16 @@ def query_forefront(question: str) -> str:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def query_quora(question: str) -> str:
|
def query_quora(question: str, proxy: Optional[str] = None) -> str:
|
||||||
token = quora.Account.create(logging=False, enable_bot_creation=True)
|
token = quora.Account.create(logging=False, enable_bot_creation=True, proxy=proxy)
|
||||||
return quora.Completion.create(model='gpt-4', prompt=question, token=token).text
|
return quora.Completion.create(model='gpt-4', prompt=question, token=token, proxy=proxy).text
|
||||||
|
|
||||||
|
|
||||||
def query_theb(question: str) -> str:
|
def query_theb(question: str, proxy: Optional[str] = None) -> str:
|
||||||
# Set cloudflare clearance cookie and get answer from GPT-4 model
|
# Set cloudflare clearance cookie and get answer from GPT-4 model
|
||||||
response = ""
|
response = ""
|
||||||
try:
|
try:
|
||||||
return ''.join(theb.Completion.create(prompt=question))
|
return ''.join(theb.Completion.create(prompt=question, proxy=proxy))
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Return error message if an exception occurs
|
# Return error message if an exception occurs
|
||||||
|
@ -40,11 +41,11 @@ def query_theb(question: str) -> str:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def query_you(question: str) -> str:
|
def query_you(question: str, proxy: Optional[str] = None) -> str:
|
||||||
# Set cloudflare clearance cookie and get answer from GPT-4 model
|
# Set cloudflare clearance cookie and get answer from GPT-4 model
|
||||||
try:
|
try:
|
||||||
result = you.Completion.create(prompt=question)
|
result = you.Completion.create(prompt=question, proxy=proxy)
|
||||||
return result["response"]
|
return result.text
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Return error message if an exception occurs
|
# Return error message if an exception occurs
|
||||||
|
@ -66,11 +67,11 @@ avail_query_methods = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def query(user_input: str, selected_method: str = "Random") -> str:
|
def query(user_input: str, selected_method: str = "Random", proxy: Optional[str] = None) -> str:
|
||||||
# If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
|
# If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
|
||||||
if selected_method != "Random" and selected_method in avail_query_methods:
|
if selected_method != "Random" and selected_method in avail_query_methods:
|
||||||
try:
|
try:
|
||||||
return avail_query_methods[selected_method](user_input)
|
return avail_query_methods[selected_method](user_input, proxy=proxy)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error with {selected_method}: {e}")
|
print(f"Error with {selected_method}: {e}")
|
||||||
return "😵 Sorry, some error occurred please try again."
|
return "😵 Sorry, some error occurred please try again."
|
||||||
|
@ -89,7 +90,7 @@ def query(user_input: str, selected_method: str = "Random") -> str:
|
||||||
chosen_query_name = [k for k, v in avail_query_methods.items() if v == chosen_query][0]
|
chosen_query_name = [k for k, v in avail_query_methods.items() if v == chosen_query][0]
|
||||||
try:
|
try:
|
||||||
# Try to call the chosen method with the user input
|
# Try to call the chosen method with the user input
|
||||||
result = chosen_query(user_input)
|
result = chosen_query(user_input, proxy=proxy)
|
||||||
success = True
|
success = True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error with {chosen_query_name}: {e}")
|
print(f"Error with {chosen_query_name}: {e}")
|
||||||
|
|
|
@ -75,8 +75,10 @@ user_input = input_placeholder.text_input(
|
||||||
)
|
)
|
||||||
submit_button = st.button("Submit")
|
submit_button = st.button("Submit")
|
||||||
|
|
||||||
|
|
||||||
if (user_input and user_input != st.session_state['input_text']) or submit_button:
|
if (user_input and user_input != st.session_state['input_text']) or submit_button:
|
||||||
output = query(user_input, st.session_state['query_method'])
|
output = query(user_input, st.session_state['query_method'])
|
||||||
|
|
||||||
escaped_output = output.encode('utf-8').decode('unicode-escape')
|
escaped_output = output.encode('utf-8').decode('unicode-escape')
|
||||||
|
|
||||||
st.session_state.current_conversation['user_inputs'].append(user_input)
|
st.session_state.current_conversation['user_inputs'].append(user_input)
|
||||||
|
@ -95,6 +97,9 @@ if st.sidebar.button("New Conversation"):
|
||||||
|
|
||||||
st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)
|
st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)
|
||||||
|
|
||||||
|
# Proxy
|
||||||
|
st.session_state['proxy'] = st.sidebar.text_input("Proxy: ")
|
||||||
|
|
||||||
# Sidebar
|
# Sidebar
|
||||||
st.sidebar.header("Conversation History")
|
st.sidebar.header("Conversation History")
|
||||||
|
|
||||||
|
|
13
testing/usesless_test.py
Normal file
13
testing/usesless_test.py
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
import usesless
|
||||||
|
|
||||||
|
question1 = "Who won the world series in 2020?"
|
||||||
|
req = usesless.Completion.create(prompt=question1)
|
||||||
|
answer = req["text"]
|
||||||
|
message_id = req["parentMessageId"]
|
||||||
|
|
||||||
|
question2 = "Where was it played?"
|
||||||
|
req2 = usesless.Completion.create(prompt=question2, parentMessageId=message_id)
|
||||||
|
answer2 = req2["text"]
|
||||||
|
|
||||||
|
print(answer)
|
||||||
|
print(answer2)
|
23
unfinished/usesless/README.md
Normal file
23
unfinished/usesless/README.md
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
ai.usesless.com
|
||||||
|
|
||||||
|
to do:
|
||||||
|
|
||||||
|
- use random user agent in header
|
||||||
|
- make the code better I guess (?)
|
||||||
|
|
||||||
|
### Example: `usesless` <a name="example-usesless"></a>
|
||||||
|
|
||||||
|
```python
|
||||||
|
import usesless
|
||||||
|
|
||||||
|
message_id = ""
|
||||||
|
while True:
|
||||||
|
prompt = input("Question: ")
|
||||||
|
if prompt == "!stop":
|
||||||
|
break
|
||||||
|
|
||||||
|
req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id)
|
||||||
|
|
||||||
|
print(f"Answer: {req['text']}")
|
||||||
|
message_id = req["id"]
|
||||||
|
```
|
51
unfinished/usesless/__init__.py
Normal file
51
unfinished/usesless/__init__.py
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
import requests
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
class Completion:
|
||||||
|
headers = {
|
||||||
|
"authority": "ai.usesless.com",
|
||||||
|
"accept": "application/json, text/plain, */*",
|
||||||
|
"accept-language": "en-US,en;q=0.5",
|
||||||
|
"cache-control": "no-cache",
|
||||||
|
"sec-fetch-dest": "empty",
|
||||||
|
"sec-fetch-mode": "cors",
|
||||||
|
"sec-fetch-site": "same-origin",
|
||||||
|
"user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/112.0",
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create(
|
||||||
|
systemMessage: str = "You are a helpful assistant",
|
||||||
|
prompt: str = "",
|
||||||
|
parentMessageId: str = "",
|
||||||
|
presence_penalty: float = 1,
|
||||||
|
temperature: float = 1,
|
||||||
|
model: str = "gpt-3.5-turbo",
|
||||||
|
):
|
||||||
|
json_data = {
|
||||||
|
"openaiKey": "",
|
||||||
|
"prompt": prompt,
|
||||||
|
"options": {
|
||||||
|
"parentMessageId": parentMessageId,
|
||||||
|
"systemMessage": systemMessage,
|
||||||
|
"completionParams": {
|
||||||
|
"presence_penalty": presence_penalty,
|
||||||
|
"temperature": temperature,
|
||||||
|
"model": model,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
url = "https://ai.usesless.com/api/chat-process"
|
||||||
|
request = requests.post(url, headers=Completion.headers, json=json_data)
|
||||||
|
content = request.content
|
||||||
|
response = Completion.__response_to_json(content)
|
||||||
|
return response
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __response_to_json(cls, text) -> dict:
|
||||||
|
text = str(text.decode("utf-8"))
|
||||||
|
split_text = text.rsplit("\n", 1)[1]
|
||||||
|
to_json = json.loads(split_text)
|
||||||
|
return to_json
|
Loading…
Reference in a new issue