1
0
Fork 0
This commit is contained in:
t.me/xtekky 2023-04-29 23:48:54 +01:00
commit 9489dda310
70 changed files with 725 additions and 546 deletions

View file

@ -1,12 +0,0 @@
FROM python:3.10-slim
RUN apt-get update && apt-get install -y git
RUN git clone https://github.com/xtekky/gpt4free.git
WORKDIR /gpt4free
RUN pip install --no-cache-dir -r requirements.txt
RUN cp gui/streamlit_app.py .
EXPOSE 8501
CMD ["streamlit", "run", "streamlit_app.py"]

18
Dockerfile Normal file
View file

@ -0,0 +1,18 @@
FROM python:3.10
RUN apt-get update && apt-get install -y git
RUN mkdir -p /usr/src/gpt4free
WORKDIR /usr/src/gpt4free
# RUN pip config set global.index-url https://mirrors.aliyun.com/pypi/simple/
# RUN pip config set global.trusted-host mirrors.aliyun.com
COPY requirements.txt /usr/src/gpt4free/
RUN pip install --no-cache-dir -r requirements.txt
COPY . /usr/src/gpt4free
RUN cp gui/streamlit_app.py .
EXPOSE 8501
CMD ["streamlit", "run", "streamlit_app.py"]

View file

@ -56,7 +56,6 @@ Till the long bitter end, will this boy live to fight.
_____________________________ _____________________________
# GPT4free - use ChatGPT, for free!!
##### You may join our discord server for updates and support ; ) ##### You may join our discord server for updates and support ; )
- [Discord Link](https://discord.gg/gpt4free) - [Discord Link](https://discord.gg/gpt4free)
@ -92,10 +91,10 @@ Please note the following:
| **Copyright** | Copyright information | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#copyright) | - | | **Copyright** | Copyright information | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#copyright) | - |
| **Star History** | Star History | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#star-history) | - | | **Star History** | Star History | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#star-history) | - |
| **Usage Examples** | | | | | **Usage Examples** | | | |
| `theb` | Example usage for theb (gpt-3.5) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](openai_rev/theb/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | | `theb` | Example usage for theb (gpt-3.5) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/theb/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
| `forefront` | Example usage for forefront (gpt-4) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./forefront/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | || | `forefront` | Example usage for forefront (gpt-4) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/forefront/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | ||
| `quora (poe)` | Example usage for quora | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./quora/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | | `quora (poe)` | Example usage for quora | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/quora/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
| `you` | Example usage for you | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./you/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | | `you` | Example usage for you | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/you/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
| **Try it Out** | | | | | **Try it Out** | | | |
| Google Colab Jupyter Notebook | Example usage for gpt4free | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DanielShemesh/gpt4free-colab/blob/main/gpt4free.ipynb) | - | | Google Colab Jupyter Notebook | Example usage for gpt4free | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DanielShemesh/gpt4free-colab/blob/main/gpt4free.ipynb) | - |
| replit Example (feel free to fork this repl) | Example usage for gpt4free | [![](https://img.shields.io/badge/Open%20in-Replit-1A1E27?logo=replit)](https://replit.com/@gpt4free/gpt4free-webui) | - | | replit Example (feel free to fork this repl) | Example usage for gpt4free | [![](https://img.shields.io/badge/Open%20in-Replit-1A1E27?logo=replit)](https://replit.com/@gpt4free/gpt4free-webui) | - |
@ -127,10 +126,10 @@ Please note the following:
## Best sites <a name="best-sites"></a> ## Best sites <a name="best-sites"></a>
#### gpt-4 #### gpt-4
- [`/forefront`](./forefront/README.md) - [`/forefront`](gpt4free/forefront/README.md)
#### gpt-3.5 #### gpt-3.5
- [`/you`](./you/README.md) - [`/you`](gpt4free/you/README.md)
## Install <a name="install"></a> ## Install <a name="install"></a>
Download or clone this GitHub repo Download or clone this GitHub repo

View file

@ -1,57 +0,0 @@
import requests
class Completion:
@staticmethod
def create(prompt:str, cookieInput:str) -> str:
# Initialize a session with custom headers
session = Completion._initialize_session(cookieInput)
# Set the data that will be submitted
payload = Completion._create_payload(prompt, ("ASSUME I HAVE FULL ACCESS TO COCALC. "))
# Submit the request and return the results
return Completion._submit_request(session, payload)
@classmethod
def _initialize_session(cls, conversationCookie) -> requests.Session:
"""Initialize a session with custom headers for the request."""
session = requests.Session()
headers = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Origin': 'https://cocalc.com',
'Referer': 'https://cocalc.com/api/v2/openai/chatgpt',
'Cookie': conversationCookie,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
}
session.headers.update(headers)
return session
@classmethod
def _create_payload(
cls,
prompt: str,
system_prompt: str
) -> dict:
return {
"input": prompt,
"system": system_prompt,
"tag": "next:index"
}
@classmethod
def _submit_request(
cls,
session: requests.Session,
payload: dict
) -> str:
response = session.post(
"https://cocalc.com/api/v2/openai/chatgpt", json=payload).json()
return {
"response":response["output"],
"success":response["success"]
}

View file

@ -1,16 +0,0 @@
### Example: `forefront` (use like openai pypi package) <a name="example-forefront"></a>
```python
import forefront
# create an account
token = forefront.Account.create(logging=False)
print(token)
# get a response
for response in forefront.StreamingCompletion.create(token = token,
prompt = 'hello world', model='gpt-4'):
print(response.completion.choices[0].text, end = '')
print("")
```

View file

@ -1,154 +0,0 @@
from json import loads
from re import match
from time import time, sleep
from uuid import uuid4
from requests import post
from tls_client import Session
from forefront.mail import Mail
from forefront.typing import ForeFrontResponse
class Account:
@staticmethod
def create(proxy=None, logging=False):
proxies = {
'http': 'http://' + proxy,
'https': 'http://' + proxy} if proxy else False
start = time()
mail = Mail(proxies)
mail_token = None
mail_adress = mail.get_mail()
# print(mail_adress)
client = Session(client_identifier='chrome110')
client.proxies = proxies
client.headers = {
"origin": "https://accounts.forefront.ai",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
}
response = client.post('https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
data={
"email_address": mail_adress
}
)
try:
trace_token = response.json()['response']['id']
if logging: print(trace_token)
except KeyError:
return 'Failed to create account!'
response = client.post(
f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
data={
"strategy": "email_code",
}
)
if logging: print(response.text)
if not 'sign_up_attempt' in response.text:
return 'Failed to create account!'
while True:
sleep(1)
for _ in mail.fetch_inbox():
if logging: print(mail.get_message_content(_["id"]))
mail_token = match(r"(\d){5,6}", mail.get_message_content(_["id"])).group(0)
if mail_token:
break
if logging: print(mail_token)
response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
data={
'code': mail_token,
'strategy': 'email_code'
})
if logging: print(response.json())
token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
with open('accounts.txt', 'a') as f:
f.write(f'{mail_adress}:{token}\n')
if logging: print(time() - start)
return token
class StreamingCompletion:
@staticmethod
def create(
token=None,
chatId=None,
prompt='',
actionType='new',
defaultPersona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4') -> ForeFrontResponse:
if not token: raise Exception('Token is required!')
if not chatId: chatId = str(uuid4())
headers = {
'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer ' + token,
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://chat.forefront.ai',
'pragma': 'no-cache',
'referer': 'https://chat.forefront.ai/',
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
json_data = {
'text': prompt,
'action': actionType,
'parentId': chatId,
'workspaceId': chatId,
'messagePersona': defaultPersona,
'model': model
}
for chunk in post('https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
headers=headers, json=json_data, stream=True).iter_lines():
if b'finish_reason":null' in chunk:
data = loads(chunk.decode('utf-8').split('data: ')[1])
token = data['choices'][0]['delta'].get('content')
if token != None:
yield ForeFrontResponse({
'id': chatId,
'object': 'text_completion',
'created': int(time()),
'model': model,
'choices': [{
'text': token,
'index': 0,
'logprobs': None,
'finish_reason': 'stop'
}],
'usage': {
'prompt_tokens': len(prompt),
'completion_tokens': len(token),
'total_tokens': len(prompt) + len(token)
}
})

View file

@ -1,36 +0,0 @@
class ForeFrontResponse:
class Completion:
class Choices:
def __init__(self, choice: dict) -> None:
self.text = choice['text']
self.content = self.text.encode()
self.index = choice['index']
self.logprobs = choice['logprobs']
self.finish_reason = choice['finish_reason']
def __repr__(self) -> str:
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
def __init__(self, choices: dict) -> None:
self.choices = [self.Choices(choice) for choice in choices]
class Usage:
def __init__(self, usage_dict: dict) -> None:
self.prompt_tokens = usage_dict['prompt_tokens']
self.completion_tokens = usage_dict['completion_tokens']
self.total_tokens = usage_dict['total_tokens']
def __repr__(self):
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
def __init__(self, response_dict: dict) -> None:
self.response_dict = response_dict
self.id = response_dict['id']
self.object = response_dict['object']
self.created = response_dict['created']
self.model = response_dict['model']
self.completion = self.Completion(response_dict['choices'])
self.usage = self.Usage(response_dict['usage'])
def json(self) -> dict:
return self.response_dict

116
gpt4free/README.md Normal file
View file

@ -0,0 +1,116 @@
# gpt4free package
### What is it?
gpt4free is a python package that provides some language model api's
### Main Features
- It's free to use
- Easy access
### Installation:
```bash
pip install gpt4free
```
#### Usage:
```python
import gpt4free
import gpt4free
from gpt4free import Provider, quora, forefront
# usage You
response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
print(response)
# usage Poe
token = quora.Account.create(logging=False)
response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT')
print(response)
# usage forefront
token = forefront.Account.create(logging=False)
response = gpt4free.Completion.create(
Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token
)
print(response)
print(f'END')
# usage theb
response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
print(response)
# usage cocalc
response = gpt4free.Completion.create(Provider.CoCalc, prompt='Write a poem on Lionel Messi', cookie_input='')
print(response)
```
### Invocation Arguments
`gpt4free.Completion.create()` method has two required arguments
1. Provider: This is an enum representing different provider
2. prompt: This is the user input
#### Keyword Arguments
Some of the keyword arguments are optional, while others are required.
- You:
- `safe_search`: boolean - default value is `False`
- `include_links`: boolean - default value is `False`
- `detailed`: boolean - default value is `False`
- Quora:
- `token`: str - this needs to be provided by the user
- `model`: str - default value is `gpt-4`.
(Available models: `['Sage', 'GPT-4', 'Claude+', 'Claude-instant', 'ChatGPT', 'Dragonfly', 'NeevaAI']`)
- ForeFront:
- `token`: str - this need to be provided by the user
- Theb:
(no keyword arguments required)
- CoCalc:
- `cookie_input`: str - this needs to be provided by user
#### Token generation of quora
```python
from gpt4free import quora
token = quora.Account.create(logging=False)
```
### Token generation of ForeFront
```python
from gpt4free import forefront
token = forefront.Account.create(logging=False)
```
## Copyright:
This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt)
### Copyright Notice: <a name="copyright"></a>
```
xtekky/gpt4free: multiple reverse engineered language-model api's to decentralise the ai industry.
Copyright (C) 2023 xtekky
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
```

64
gpt4free/__init__.py Normal file
View file

@ -0,0 +1,64 @@
from enum import Enum
from gpt4free import cocalc
from gpt4free import forefront
from gpt4free import quora
from gpt4free import theb
from gpt4free import you
class Provider(Enum):
"""An enum representing different providers."""
You = 'you'
Poe = 'poe'
ForeFront = 'fore_front'
Theb = 'theb'
CoCalc = 'cocalc'
class Completion:
"""This class will be used for invoking the given provider"""
@staticmethod
def create(provider: Provider, prompt: str, **kwargs) -> str:
"""
Invokes the given provider with given prompt and addition arguments and returns the string response
:param provider: an enum representing the provider to use while invoking
:param prompt: input provided by the user
:param kwargs: Additional keyword arguments to pass to the provider while invoking
:return: A string representing the response from the provider
"""
if provider == Provider.Poe:
return Completion.__poe_service(prompt, **kwargs)
elif provider == Provider.You:
return Completion.__you_service(prompt, **kwargs)
elif provider == Provider.ForeFront:
return Completion.__fore_front_service(prompt, **kwargs)
elif provider == Provider.Theb:
return Completion.__theb_service(prompt, **kwargs)
elif provider == Provider.CoCalc:
return Completion.__cocalc_service(prompt, **kwargs)
else:
raise Exception('Provider not exist, Please try again')
@staticmethod
def __you_service(prompt: str, **kwargs) -> str:
return you.Completion.create(prompt, **kwargs).text
@staticmethod
def __poe_service(prompt: str, **kwargs) -> str:
return quora.Completion.create(prompt=prompt, **kwargs).text
@staticmethod
def __fore_front_service(prompt: str, **kwargs) -> str:
return forefront.Completion.create(prompt=prompt, **kwargs).text
@staticmethod
def __theb_service(prompt: str, **kwargs):
return ''.join(theb.Completion.create(prompt=prompt))
@staticmethod
def __cocalc_service(prompt: str, **kwargs):
return cocalc.Completion.create(prompt, cookie_input=kwargs.get('cookie_input', '')).text

View file

@ -0,0 +1,47 @@
import requests
from fake_useragent import UserAgent
from pydantic import BaseModel
class CoCalcResponse(BaseModel):
text: str
status: bool
class Completion:
@staticmethod
def create(prompt: str, cookie_input: str) -> CoCalcResponse:
# Initialize a session with custom headers
session = Completion._initialize_session(cookie_input)
# Set the data that will be submitted
payload = Completion._create_payload(prompt, 'ASSUME I HAVE FULL ACCESS TO COCALC. ')
# Submit the request and return the results
return Completion._submit_request(session, payload)
@classmethod
def _initialize_session(cls, conversation_cookie) -> requests.Session:
"""Initialize a session with custom headers for the request."""
session = requests.Session()
headers = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Origin': 'https://cocalc.com',
'Referer': 'https://cocalc.com/api/v2/openai/chatgpt',
'Cookie': conversation_cookie,
'User-Agent': UserAgent().random,
}
session.headers.update(headers)
return session
@staticmethod
def _create_payload(prompt: str, system_prompt: str) -> dict:
return {'input': prompt, 'system': system_prompt, 'tag': 'next:index'}
@staticmethod
def _submit_request(session: requests.Session, payload: dict) -> CoCalcResponse:
response = session.post('https://cocalc.com/api/v2/openai/chatgpt', json=payload).json()
return CoCalcResponse(text=response['output'], status=response['success'])

View file

@ -1,11 +1,10 @@
### Example: `cocalc` <a name="example-cocalc"></a> ### Example: `cocalc` <a name="example-cocalc"></a>
```python ```python
# import library # import library
import cocalc from gpt4free import cocalc
cocalc.Completion.create(prompt="How are you!", cookieInput="cookieinput") ## Tutorial cocalc.Completion.create(prompt="How are you!", cookie_input="cookieinput") ## Tutorial
``` ```
### How to grab cookie input ### How to grab cookie input

View file

@ -0,0 +1,16 @@
### Example: `forefront` (use like openai pypi package) <a name="example-forefront"></a>
```python
from gpt4free import forefront
# create an account
token = forefront.Account.create(logging=False)
print(token)
# get a response
for response in forefront.StreamingCompletion.create(token=token,
prompt='hello world', model='gpt-4'):
print(response.completion.choices[0].text, end='')
print("")
```

View file

@ -0,0 +1,192 @@
from json import loads
from re import match
from time import time, sleep
from typing import Generator, Optional
from uuid import uuid4
from fake_useragent import UserAgent
from requests import post
from tls_client import Session
from .mail import Mail
from .typing import ForeFrontResponse
class Account:
@staticmethod
def create(proxy: Optional[str] = None, logging: bool = False):
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
start = time()
mail_client = Mail(proxies)
mail_token = None
mail_address = mail_client.get_mail()
# print(mail_address)
client = Session(client_identifier='chrome110')
client.proxies = proxies
client.headers = {
'origin': 'https://accounts.forefront.ai',
'user-agent': UserAgent().random,
}
response = client.post(
'https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
data={'email_address': mail_address},
)
try:
trace_token = response.json()['response']['id']
if logging:
print(trace_token)
except KeyError:
return 'Failed to create account!'
response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6',
data={
'strategy': 'email_code',
},
)
if logging:
print(response.text)
if 'sign_up_attempt' not in response.text:
return 'Failed to create account!'
while True:
sleep(1)
for _ in mail_client.fetch_inbox():
if logging:
print(mail_client.get_message_content(_['id']))
mail_token = match(r'(\d){5,6}', mail_client.get_message_content(_['id'])).group(0)
if mail_token:
break
if logging:
print(mail_token)
response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
data={'code': mail_token, 'strategy': 'email_code'},
)
if logging:
print(response.json())
token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
with open('accounts.txt', 'a') as f:
f.write(f'{mail_address}:{token}\n')
if logging:
print(time() - start)
return token
class StreamingCompletion:
@staticmethod
def create(
token=None,
chat_id=None,
prompt='',
action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4',
) -> Generator[ForeFrontResponse, None, None]:
if not token:
raise Exception('Token is required!')
if not chat_id:
chat_id = str(uuid4())
headers = {
'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer ' + token,
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://chat.forefront.ai',
'pragma': 'no-cache',
'referer': 'https://chat.forefront.ai/',
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': UserAgent().random,
}
json_data = {
'text': prompt,
'action': action_type,
'parentId': chat_id,
'workspaceId': chat_id,
'messagePersona': default_persona,
'model': model,
}
for chunk in post(
'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
headers=headers,
json=json_data,
stream=True,
).iter_lines():
if b'finish_reason":null' in chunk:
data = loads(chunk.decode('utf-8').split('data: ')[1])
token = data['choices'][0]['delta'].get('content')
if token is not None:
yield ForeFrontResponse(
**{
'id': chat_id,
'object': 'text_completion',
'created': int(time()),
'text': token,
'model': model,
'choices': [{'text': token, 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}],
'usage': {
'prompt_tokens': len(prompt),
'completion_tokens': len(token),
'total_tokens': len(prompt) + len(token),
},
}
)
class Completion:
@staticmethod
def create(
token=None,
chat_id=None,
prompt='',
action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4',
) -> ForeFrontResponse:
text = ''
final_response = None
for response in StreamingCompletion.create(
token=token,
chat_id=chat_id,
prompt=prompt,
action_type=action_type,
default_persona=default_persona,
model=model,
):
if response:
final_response = response
text += response.text
if final_response:
final_response.text = text
else:
raise Exception('Unable to get the response, Please try again')
return final_response

View file

@ -23,21 +23,17 @@ class Mail:
"sec-fetch-dest": "empty", "sec-fetch-dest": "empty",
"referer": "https://mail.tm/", "referer": "https://mail.tm/",
"accept-encoding": "gzip, deflate, br", "accept-encoding": "gzip, deflate, br",
"accept-language": "en-GB,en-US;q=0.9,en;q=0.8" "accept-language": "en-GB,en-US;q=0.9,en;q=0.8",
} }
def get_mail(self) -> str: def get_mail(self) -> str:
token = ''.join(choices(ascii_letters, k=14)).lower() token = ''.join(choices(ascii_letters, k=14)).lower()
init = self.client.post("https://api.mail.tm/accounts", json={ init = self.client.post(
"address": f"{token}@bugfoo.com", "https://api.mail.tm/accounts", json={"address": f"{token}@bugfoo.com", "password": token}
"password": token )
})
if init.status_code == 201: if init.status_code == 201:
resp = self.client.post("https://api.mail.tm/token", json={ resp = self.client.post("https://api.mail.tm/token", json={**init.json(), "password": token})
**init.json(),
"password": token
})
self.client.headers['authorization'] = 'Bearer ' + resp.json()['token'] self.client.headers['authorization'] = 'Bearer ' + resp.json()['token']

View file

@ -0,0 +1,26 @@
from typing import Any, List
from pydantic import BaseModel
class Choice(BaseModel):
text: str
index: int
logprobs: Any
finish_reason: str
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class ForeFrontResponse(BaseModel):
id: str
object: str
created: int
model: str
choices: List[Choice]
usage: Usage
text: str

View file

@ -21,26 +21,25 @@ models = {
```python ```python
# import quora (poe) package # import quora (poe) package
import quora from gpt4free import quora
# create account # create account
# make sure to set enable_bot_creation to True # make sure to set enable_bot_creation to True
token = quora.Account.create(logging = True, enable_bot_creation=True) token = quora.Account.create(logging=True, enable_bot_creation=True)
model = quora.Model.create( model = quora.Model.create(
token = token, token=token,
model = 'gpt-3.5-turbo', # or claude-instant-v1.0 model='gpt-3.5-turbo', # or claude-instant-v1.0
system_prompt = 'you are ChatGPT a large language model ...' system_prompt='you are ChatGPT a large language model ...'
) )
print(model.name) # gptx.... print(model.name) # gptx....
# streaming response # streaming response
for response in quora.StreamingCompletion.create( for response in quora.StreamingCompletion.create(
custom_model = model.name, custom_model=model.name,
prompt ='hello world', prompt='hello world',
token = token): token=token):
print(response.completion.choices[0].text) print(response.completion.choices[0].text)
``` ```
@ -56,7 +55,7 @@ print(response.completion.choices[0].text)
### Update Use This For Poe ### Update Use This For Poe
```python ```python
from quora import Poe from gpt4free.quora import Poe
# available models: ['Sage', 'GPT-4', 'Claude+', 'Claude-instant', 'ChatGPT', 'Dragonfly', 'NeevaAI'] # available models: ['Sage', 'GPT-4', 'Claude+', 'Claude-instant', 'ChatGPT', 'Dragonfly', 'NeevaAI']

View file

@ -6,11 +6,12 @@ from pathlib import Path
from random import choice, choices, randint from random import choice, choices, randint
from re import search, findall from re import search, findall
from string import ascii_letters, digits from string import ascii_letters, digits
from typing import Optional, Union from typing import Optional, Union, List, Any, Generator
from urllib.parse import unquote from urllib.parse import unquote
import selenium.webdriver.support.expected_conditions as EC import selenium.webdriver.support.expected_conditions as EC
from fake_useragent import UserAgent from fake_useragent import UserAgent
from pydantic import BaseModel
from pypasser import reCaptchaV3 from pypasser import reCaptchaV3
from requests import Session from requests import Session
from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions
@ -18,8 +19,8 @@ from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support.wait import WebDriverWait
from tls_client import Session as TLS from tls_client import Session as TLS
from quora.api import Client as PoeClient from .api import Client as PoeClient
from quora.mail import Emailnator from .mail import Emailnator
SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not
being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location
@ -67,42 +68,27 @@ def extract_formkey(html):
return formkey return formkey
class PoeResponse: class Choice(BaseModel):
class Completion: text: str
class Choices: index: int
def __init__(self, choice: dict) -> None: logprobs: Any
self.text = choice['text'] finish_reason: str
self.content = self.text.encode()
self.index = choice['index']
self.logprobs = choice['logprobs']
self.finish_reason = choice['finish_reason']
def __repr__(self) -> str:
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
def __init__(self, choices: dict) -> None: class Usage(BaseModel):
self.choices = [self.Choices(choice) for choice in choices] prompt_tokens: int
completion_tokens: int
total_tokens: int
class Usage:
def __init__(self, usage_dict: dict) -> None:
self.prompt_tokens = usage_dict['prompt_tokens']
self.completion_tokens = usage_dict['completion_tokens']
self.total_tokens = usage_dict['total_tokens']
def __repr__(self): class PoeResponse(BaseModel):
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>''' id: int
object: str
def __init__(self, response_dict: dict) -> None: created: int
self.response_dict = response_dict model: str
self.id = response_dict['id'] choices: List[Choice]
self.object = response_dict['object'] usage: Usage
self.created = response_dict['created'] text: str
self.model = response_dict['model']
self.completion = self.Completion(response_dict['choices'])
self.usage = self.Usage(response_dict['usage'])
def json(self) -> dict:
return self.response_dict
class ModelResponse: class ModelResponse:
@ -116,18 +102,12 @@ class ModelResponse:
class Model: class Model:
@staticmethod @staticmethod
def create( def create(
token: str, token: str,
model: str = 'gpt-3.5-turbo', # claude-instant model: str = 'gpt-3.5-turbo', # claude-instant
system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible', system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
description: str = 'gpt-3.5 language model from openai, skidded by poe.com', description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
handle: str = None, handle: str = None,
) -> ModelResponse: ) -> ModelResponse:
models = {
'gpt-3.5-turbo': 'chinchilla',
'claude-instant-v1.0': 'a2',
'gpt-4': 'beaver',
}
if not handle: if not handle:
handle = f'gptx{randint(1111111, 9999999)}' handle = f'gptx{randint(1111111, 9999999)}'
@ -162,7 +142,7 @@ class Model:
obj={ obj={
'queryName': 'CreateBotMain_poeBotCreate_Mutation', 'queryName': 'CreateBotMain_poeBotCreate_Mutation',
'variables': { 'variables': {
'model': models[model], 'model': MODELS[model],
'handle': handle, 'handle': handle,
'prompt': system_prompt, 'prompt': system_prompt,
'isPromptPublic': True, 'isPromptPublic': True,
@ -202,9 +182,9 @@ class Model:
class Account: class Account:
@staticmethod @staticmethod
def create( def create(
proxy: Optional[str] = None, proxy: Optional[str] = None,
logging: bool = False, logging: bool = False,
enable_bot_creation: bool = False, enable_bot_creation: bool = False,
): ):
client = TLS(client_identifier='chrome110') client = TLS(client_identifier='chrome110')
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
@ -309,22 +289,23 @@ class Account:
class StreamingCompletion: class StreamingCompletion:
@staticmethod @staticmethod
def create( def create(
model: str = 'gpt-4', model: str = 'gpt-4',
custom_model: bool = None, custom_model: bool = None,
prompt: str = 'hello world', prompt: str = 'hello world',
token: str = '', token: str = '',
): ) -> Generator[PoeResponse, None, None]:
_model = MODELS[model] if not custom_model else custom_model _model = MODELS[model] if not custom_model else custom_model
client = PoeClient(token) client = PoeClient(token)
for chunk in client.send_message(_model, prompt): for chunk in client.send_message(_model, prompt):
yield PoeResponse( yield PoeResponse(
{ **{
'id': chunk['messageId'], 'id': chunk['messageId'],
'object': 'text_completion', 'object': 'text_completion',
'created': chunk['creationTime'], 'created': chunk['creationTime'],
'model': _model, 'model': _model,
'text': chunk['text_new'],
'choices': [ 'choices': [
{ {
'text': chunk['text_new'], 'text': chunk['text_new'],
@ -343,33 +324,28 @@ class StreamingCompletion:
class Completion: class Completion:
@staticmethod
def create( def create(
model: str = 'gpt-4', model: str = 'gpt-4',
custom_model: str = None, custom_model: str = None,
prompt: str = 'hello world', prompt: str = 'hello world',
token: str = '', token: str = '',
): ) -> PoeResponse:
models = { _model = MODELS[model] if not custom_model else custom_model
'sage': 'capybara',
'gpt-4': 'beaver',
'claude-v1.2': 'a2_2',
'claude-instant-v1.0': 'a2',
'gpt-3.5-turbo': 'chinchilla',
}
_model = models[model] if not custom_model else custom_model
client = PoeClient(token) client = PoeClient(token)
for chunk in client.send_message(_model, prompt): chunk = None
pass for response in client.send_message(_model, prompt):
chunk = response
return PoeResponse( return PoeResponse(
{ **{
'id': chunk['messageId'], 'id': chunk['messageId'],
'object': 'text_completion', 'object': 'text_completion',
'created': chunk['creationTime'], 'created': chunk['creationTime'],
'model': _model, 'model': _model,
'text': chunk['text'],
'choices': [ 'choices': [
{ {
'text': chunk['text'], 'text': chunk['text'],
@ -389,22 +365,22 @@ class Completion:
class Poe: class Poe:
def __init__( def __init__(
self, self,
model: str = 'ChatGPT', model: str = 'ChatGPT',
driver: str = 'firefox', driver: str = 'firefox',
download_driver: bool = False, download_driver: bool = False,
driver_path: Optional[str] = None, driver_path: Optional[str] = None,
cookie_path: str = './quora/cookie.json', cookie_path: str = './quora/cookie.json',
): ):
# validating the model # validating the model
if model and model not in MODELS: if model and model not in MODELS:
raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.') raise RuntimeError('Sorry, the model you provided does not exist. Please check and try again.')
self.model = MODELS[model] self.model = MODELS[model]
self.cookie_path = cookie_path self.cookie_path = cookie_path
self.cookie = self.__load_cookie(driver, download_driver, driver_path=driver_path) self.cookie = self.__load_cookie(driver, driver_path=driver_path)
self.client = PoeClient(self.cookie) self.client = PoeClient(self.cookie)
def __load_cookie(self, driver: str, download_driver: bool, driver_path: Optional[str] = None) -> str: def __load_cookie(self, driver: str, driver_path: Optional[str] = None) -> str:
if (cookie_file := Path(self.cookie_path)).exists(): if (cookie_file := Path(self.cookie_path)).exists():
with cookie_file.open() as fp: with cookie_file.open() as fp:
cookie = json.load(fp) cookie = json.load(fp)
@ -451,8 +427,8 @@ class Poe:
driver.close() driver.close()
return cookie return cookie
@classmethod @staticmethod
def __resolve_driver(cls, driver: str, driver_path: Optional[str] = None) -> Union[Firefox, Chrome]: def __resolve_driver(driver: str, driver_path: Optional[str] = None) -> Union[Firefox, Chrome]:
options = FirefoxOptions() if driver == 'firefox' else ChromeOptions() options = FirefoxOptions() if driver == 'firefox' else ChromeOptions()
options.add_argument('-headless') options.add_argument('-headless')
@ -473,12 +449,12 @@ class Poe:
return response return response
def create_bot( def create_bot(
self, self,
name: str, name: str,
/, /,
prompt: str = '', prompt: str = '',
base_model: str = 'ChatGPT', base_model: str = 'ChatGPT',
description: str = '', description: str = '',
) -> None: ) -> None:
if base_model not in MODELS: if base_model not in MODELS:
raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.') raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')

View file

@ -225,7 +225,7 @@ class Client:
r = request_with_retries(self.session.post, self.gql_url, data=payload, headers=headers) r = request_with_retries(self.session.post, self.gql_url, data=payload, headers=headers)
data = r.json() data = r.json()
if data["data"] == None: if data["data"] is None:
logger.warn(f'{query_name} returned an error: {data["errors"][0]["message"]} | Retrying ({i + 1}/20)') logger.warn(f'{query_name} returned an error: {data["errors"][0]["message"]} | Retrying ({i + 1}/20)')
time.sleep(2) time.sleep(2)
continue continue
@ -316,7 +316,7 @@ class Client:
return return
# indicate that the response id is tied to the human message id # indicate that the response id is tied to the human message id
elif key != "pending" and value == None and message["state"] != "complete": elif key != "pending" and value is None and message["state"] != "complete":
self.active_messages[key] = message["messageId"] self.active_messages[key] = message["messageId"]
self.message_queues[key].put(message) self.message_queues[key].put(message)
return return
@ -384,7 +384,7 @@ class Client:
continue continue
# update info about response # update info about response
message["text_new"] = message["text"][len(last_text):] message["text_new"] = message["text"][len(last_text) :]
last_text = message["text"] last_text = message["text"]
message_id = message["messageId"] message_id = message["messageId"]
@ -402,7 +402,7 @@ class Client:
logger.info(f"Downloading {count} messages from {chatbot}") logger.info(f"Downloading {count} messages from {chatbot}")
messages = [] messages = []
if cursor == None: if cursor is None:
chat_data = self.get_bot(self.bot_names[chatbot]) chat_data = self.get_bot(self.bot_names[chatbot])
if not chat_data["messagesConnection"]["edges"]: if not chat_data["messagesConnection"]["edges"]:
return [] return []
@ -456,21 +456,21 @@ class Client:
logger.info(f"No more messages left to delete.") logger.info(f"No more messages left to delete.")
def create_bot( def create_bot(
self, self,
handle, handle,
prompt="", prompt="",
base_model="chinchilla", base_model="chinchilla",
description="", description="",
intro_message="", intro_message="",
api_key=None, api_key=None,
api_bot=False, api_bot=False,
api_url=None, api_url=None,
prompt_public=True, prompt_public=True,
pfp_url=None, pfp_url=None,
linkification=False, linkification=False,
markdown_rendering=True, markdown_rendering=True,
suggested_replies=False, suggested_replies=False,
private=False, private=False,
): ):
result = self.send_query( result = self.send_query(
"PoeBotCreateMutation", "PoeBotCreateMutation",
@ -499,21 +499,21 @@ class Client:
return data return data
def edit_bot( def edit_bot(
self, self,
bot_id, bot_id,
handle, handle,
prompt="", prompt="",
base_model="chinchilla", base_model="chinchilla",
description="", description="",
intro_message="", intro_message="",
api_key=None, api_key=None,
api_url=None, api_url=None,
private=False, private=False,
prompt_public=True, prompt_public=True,
pfp_url=None, pfp_url=None,
linkification=False, linkification=False,
markdown_rendering=True, markdown_rendering=True,
suggested_replies=False, suggested_replies=False,
): ):
result = self.send_query( result = self.send_query(
"PoeBotEditMutation", "PoeBotEditMutation",

View file

@ -42,9 +42,7 @@ class Emailnator:
while True: while True:
sleep(2) sleep(2)
mail_token = self.client.post( mail_token = self.client.post("https://www.emailnator.com/message-list", json={"email": self.email})
"https://www.emailnator.com/message-list", json={"email": self.email}
)
mail_token = loads(mail_token.text)["messageData"] mail_token = loads(mail_token.text)["messageData"]

View file

@ -1,9 +1,8 @@
### Example: `theb` (use like openai pypi package) <a name="example-theb"></a> ### Example: `theb` (use like openai pypi package) <a name="example-theb"></a>
```python ```python
# import library # import library
import theb from gpt4free import theb
# simple streaming completion # simple streaming completion
for token in theb.Completion.create('hello world'): for token in theb.Completion.create('hello world'):

View file

@ -1,8 +1,12 @@
from re import findall
from json import loads from json import loads
from queue import Queue, Empty from queue import Queue, Empty
from re import findall
from threading import Thread from threading import Thread
from typing import Generator
from curl_cffi import requests from curl_cffi import requests
from fake_useragent import UserAgent
class Completion: class Completion:
# experimental # experimental
@ -14,29 +18,29 @@ class Completion:
message_queue = Queue() message_queue = Queue()
stream_completed = False stream_completed = False
@staticmethod
def request(prompt: str): def request(prompt: str):
headers = { headers = {
'authority': 'chatbot.theb.ai', 'authority': 'chatbot.theb.ai',
'content-type': 'application/json', 'content-type': 'application/json',
'origin': 'https://chatbot.theb.ai', 'origin': 'https://chatbot.theb.ai',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', 'user-agent': UserAgent().random,
} }
requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers, requests.post(
content_callback = Completion.handle_stream_response, 'https://chatbot.theb.ai/api/chat-process',
json = { headers=headers,
'prompt': prompt, content_callback=Completion.handle_stream_response,
'options': {} json={'prompt': prompt, 'options': {}},
}
) )
Completion.stream_completed = True Completion.stream_completed = True
@staticmethod @staticmethod
def create(prompt: str): def create(prompt: str) -> Generator[str, None, None]:
Thread(target=Completion.request, args=[prompt]).start() Thread(target=Completion.request, args=[prompt]).start()
while Completion.stream_completed != True or not Completion.message_queue.empty(): while not Completion.stream_completed or not Completion.message_queue.empty():
try: try:
message = Completion.message_queue.get(timeout=0.01) message = Completion.message_queue.get(timeout=0.01)
for message in findall(Completion.regex, message): for message in findall(Completion.regex, message):

View file

@ -1,4 +1,4 @@
import theb import theb
for token in theb.Completion.create('hello world'): for token in theb.Completion.create('hello world'):
print(token, end='', flush=True) print(token, end='', flush=True)

View file

@ -1,7 +1,8 @@
### Example: `you` (use like openai pypi package) <a name="example-you"></a> ### Example: `you` (use like openai pypi package) <a name="example-you"></a>
```python ```python
import you
from gpt4free import you
# simple request with links and details # simple request with links and details
response = you.Completion.create( response = you.Completion.create(

View file

@ -1,28 +1,36 @@
import json
import re import re
from json import loads from typing import Optional, List, Dict, Any
from uuid import uuid4 from uuid import uuid4
from fake_useragent import UserAgent from fake_useragent import UserAgent
from pydantic import BaseModel
from tls_client import Session from tls_client import Session
class PoeResponse(BaseModel):
text: Optional[str] = None
links: List[str] = []
extra: Dict[str, Any] = {}
class Completion: class Completion:
@staticmethod @staticmethod
def create( def create(
prompt: str, prompt: str,
page: int = 1, page: int = 1,
count: int = 10, count: int = 10,
safe_search: str = 'Moderate', safe_search: str = 'Moderate',
on_shopping_page: bool = False, on_shopping_page: bool = False,
mkt: str = '', mkt: str = '',
response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches', response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
domain: str = 'youchat', domain: str = 'youchat',
query_trace_id: str = None, query_trace_id: str = None,
chat: list = None, chat: list = None,
include_links: bool = False, include_links: bool = False,
detailed: bool = False, detailed: bool = False,
debug: bool = False, debug: bool = False,
) -> dict: ) -> PoeResponse:
if chat is None: if chat is None:
chat = [] chat = []
@ -57,26 +65,28 @@ class Completion:
r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text
).group() ).group()
third_party_search_results = re.search( third_party_search_results = re.search(
r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text).group() r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text
).group()
# slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0] # slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text)) text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text))
extra = { extra = {
'youChatSerpResults': loads(you_chat_serp_results), 'youChatSerpResults': json.loads(you_chat_serp_results),
# 'slots' : loads(slots) # 'slots' : loads(slots)
} }
return { response = PoeResponse(text=text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'))
'response': text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'), if include_links:
'links': loads(third_party_search_results)['search']['third_party_search_results'] response.links = json.loads(third_party_search_results)['search']['third_party_search_results']
if include_links
else None,
'extra': extra if detailed else None,
}
@classmethod if detailed:
def __get_headers(cls) -> dict: response.extra = extra
return response
@staticmethod
def __get_headers() -> dict:
return { return {
'authority': 'you.com', 'authority': 'you.com',
'accept': 'text/event-stream', 'accept': 'text/event-stream',
@ -93,6 +103,6 @@ class Completion:
'user-agent': UserAgent().random, 'user-agent': UserAgent().random,
} }
@classmethod @staticmethod
def __get_failure_response(cls) -> dict: def __get_failure_response() -> PoeResponse:
return dict(response='Unable to fetch the response, Please try again.', links=[], extra={}) return PoeResponse(text='Unable to fetch the response, Please try again.')

View file

@ -3,11 +3,10 @@ import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir)) sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import forefront, quora, theb, you from gpt4free import quora, forefront, theb, you
import random import random
def query_forefront(question: str) -> str: def query_forefront(question: str) -> str:
# create an account # create an account
token = forefront.Account.create(logging=False) token = forefront.Account.create(logging=False)
@ -15,65 +14,59 @@ def query_forefront(question: str) -> str:
response = "" response = ""
# get a response # get a response
try: try:
for i in forefront.StreamingCompletion.create(token = token, prompt = 'hello world', model='gpt-4'): return forefront.Completion.create(token=token, prompt='hello world', model='gpt-4').text
response += i.completion.choices[0].text
return response
except Exception as e: except Exception as e:
# Return error message if an exception occurs # Return error message if an exception occurs
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.' return (
f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
)
def query_quora(question: str) -> str: def query_quora(question: str) -> str:
token = quora.Account.create(logging=False, enable_bot_creation=True) token = quora.Account.create(logging=False, enable_bot_creation=True)
response = quora.Completion.create( return quora.Completion.create(model='gpt-4', prompt=question, token=token).text
model='gpt-4',
prompt=question,
token=token
)
return response.completion.choices[0].tex
def query_theb(question: str) -> str: def query_theb(question: str) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model # Set cloudflare clearance cookie and get answer from GPT-4 model
response = "" response = ""
try: try:
result = theb.Completion.create( return ''.join(theb.Completion.create(prompt=question))
prompt = question)
return result
except Exception as e: except Exception as e:
# Return error message if an exception occurs # Return error message if an exception occurs
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.' return (
f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
)
def query_you(question: str) -> str: def query_you(question: str) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model # Set cloudflare clearance cookie and get answer from GPT-4 model
try: try:
result = you.Completion.create( result = you.Completion.create(prompt=question)
prompt = question)
return result["response"] return result["response"]
except Exception as e: except Exception as e:
# Return error message if an exception occurs # Return error message if an exception occurs
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.' return (
f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
)
# Define a dictionary containing all query methods # Define a dictionary containing all query methods
avail_query_methods = { avail_query_methods = {
"Forefront": query_forefront, "Forefront": query_forefront,
"Poe": query_quora, "Poe": query_quora,
"Theb": query_theb, "Theb": query_theb,
"You": query_you, "You": query_you,
# "Writesonic": query_writesonic, # "Writesonic": query_writesonic,
# "T3nsor": query_t3nsor, # "T3nsor": query_t3nsor,
# "Phind": query_phind, # "Phind": query_phind,
# "Ora": query_ora, # "Ora": query_ora,
} }
def query(user_input: str, selected_method: str = "Random") -> str: def query(user_input: str, selected_method: str = "Random") -> str:
# If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it # If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
if selected_method != "Random" and selected_method in avail_query_methods: if selected_method != "Random" and selected_method in avail_query_methods:
try: try:
@ -104,4 +97,3 @@ def query(user_input: str, selected_method: str = "Random") -> str:
query_methods_list.remove(chosen_query) query_methods_list.remove(chosen_query)
return result return result

View file

@ -4,7 +4,7 @@ import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir)) sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import streamlit as st import streamlit as st
from openai_rev import you from gpt4free import you
def get_answer(question: str) -> str: def get_answer(question: str) -> str:

View file

@ -1,6 +1,6 @@
import atexit
import os import os
import sys import sys
import atexit
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir)) sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
@ -9,9 +9,9 @@ from streamlit_chat import message
from query_methods import query, avail_query_methods from query_methods import query, avail_query_methods
import pickle import pickle
conversations_file = "conversations.pkl" conversations_file = "conversations.pkl"
def load_conversations(): def load_conversations():
try: try:
with open(conversations_file, "rb") as f: with open(conversations_file, "rb") as f:
@ -31,11 +31,11 @@ def save_conversations(conversations, current_conversation):
break break
if not updated: if not updated:
conversations.append(current_conversation) conversations.append(current_conversation)
temp_conversations_file = "temp_" + conversations_file temp_conversations_file = "temp_" + conversations_file
with open(temp_conversations_file, "wb") as f: with open(temp_conversations_file, "wb") as f:
pickle.dump(conversations, f) pickle.dump(conversations, f)
os.replace(temp_conversations_file, conversations_file) os.replace(temp_conversations_file, conversations_file)
@ -44,10 +44,10 @@ def exit_handler():
# Perform cleanup operations here, like saving data or closing open files. # Perform cleanup operations here, like saving data or closing open files.
save_conversations(st.session_state.conversations, st.session_state.current_conversation) save_conversations(st.session_state.conversations, st.session_state.current_conversation)
# Register the exit_handler function to be called when the program is closing. # Register the exit_handler function to be called when the program is closing.
atexit.register(exit_handler) atexit.register(exit_handler)
st.header("Chat Placeholder") st.header("Chat Placeholder")
if 'conversations' not in st.session_state: if 'conversations' not in st.session_state:
@ -61,7 +61,7 @@ if 'selected_conversation' not in st.session_state:
if 'input_field_key' not in st.session_state: if 'input_field_key' not in st.session_state:
st.session_state['input_field_key'] = 0 st.session_state['input_field_key'] = 0
if 'query_method' not in st.session_state: if 'query_method' not in st.session_state:
st.session_state['query_method'] = query st.session_state['query_method'] = query
@ -69,19 +69,22 @@ if 'query_method' not in st.session_state:
if 'current_conversation' not in st.session_state or st.session_state['current_conversation'] is None: if 'current_conversation' not in st.session_state or st.session_state['current_conversation'] is None:
st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []} st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
input_placeholder = st.empty() input_placeholder = st.empty()
user_input = input_placeholder.text_input('You:', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}') user_input = input_placeholder.text_input(
'You:', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}'
)
submit_button = st.button("Submit") submit_button = st.button("Submit")
if user_input or submit_button: if user_input or submit_button:
output = query(user_input, st.session_state['query_method']) output = query(user_input, st.session_state['query_method'])
escaped_output = output.encode('utf-8').decode('unicode-escape')
st.session_state.current_conversation['user_inputs'].append(user_input)
st.session_state.current_conversation['generated_responses'].append(output)
save_conversations(st.session_state.conversations, st.session_state.current_conversation)
user_input = input_placeholder.text_input('You:', value='', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}') # Clear the input field
st.session_state.current_conversation['user_inputs'].append(user_input)
st.session_state.current_conversation['generated_responses'].append(escaped_output)
save_conversations(st.session_state.conversations, st.session_state.current_conversation)
user_input = input_placeholder.text_input(
'You:', value='', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}'
) # Clear the input field
# Add a button to create a new conversation # Add a button to create a new conversation
if st.sidebar.button("New Conversation"): if st.sidebar.button("New Conversation"):
@ -89,11 +92,7 @@ if st.sidebar.button("New Conversation"):
st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []} st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
st.session_state['input_field_key'] += 1 st.session_state['input_field_key'] += 1
st.session_state['query_method'] = st.sidebar.selectbox( st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)
"Select API:",
options=avail_query_methods,
index=0
)
# Sidebar # Sidebar
st.sidebar.header("Conversation History") st.sidebar.header("Conversation History")

View file

@ -2,10 +2,10 @@
name = "openai-rev" name = "openai-rev"
version = "0.1.0" version = "0.1.0"
description = "" description = ""
authors = ["Raju Komati <komatiraju032@gmail.com>"] authors = []
license = "GPL-3.0" license = "GPL-3.0"
readme = "README.md" readme = "README.md"
packages = [{ include = "openai_rev" }] packages = [{ include = "gpt4free" }]
exclude = ["**/*.txt"] exclude = ["**/*.txt"]
[tool.poetry.dependencies] [tool.poetry.dependencies]

View file

@ -1,4 +1,4 @@
import theb from gpt4free import theb
for token in theb.Completion.create('hello world'): for token in theb.Completion.create('hello world'):
print(token, end='', flush=True) print(token, end='', flush=True)

View file

@ -1,4 +1,4 @@
from openai_rev import forefront from gpt4free import forefront
# create an account # create an account
token = forefront.Account.create(logging=True) token = forefront.Account.create(logging=True)

View file

@ -6,8 +6,8 @@ from typing import Optional
from tls_client import Session as TLS from tls_client import Session as TLS
from twocaptcha import TwoCaptcha from twocaptcha import TwoCaptcha
from openai_rev.quora import extract_formkey from gpt4free.quora import extract_formkey
from openai_rev.quora.mail import Emailnator from gpt4free.quora.mail import Emailnator
solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358') solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')

View file

@ -1,6 +1,6 @@
from time import sleep from time import sleep
from openai_rev import quora from gpt4free import quora
token = quora.Account.create(proxy=None, logging=True) token = quora.Account.create(proxy=None, logging=True)
print('token', token) print('token', token)

View file

@ -1,4 +1,4 @@
from openai_rev import quora from gpt4free import quora
token = quora.Account.create(logging=True, enable_bot_creation=True) token = quora.Account.create(logging=True, enable_bot_creation=True)

View file

@ -1,24 +1,27 @@
from openai_rev import openai_rev, Provider, quora, forefront import gpt4free
from gpt4free import Provider, quora, forefront
# usage You # usage You
response = openai_rev.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi') response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
print(response) print(response)
# usage Poe # usage Poe
token = quora.Account.create(logging=False) token = quora.Account.create(logging=False)
response = openai_rev.Completion.create( response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT')
Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT'
)
print(response) print(response)
# usage forefront # usage forefront
token = forefront.Account.create(logging=False) token = forefront.Account.create(logging=False)
response = openai_rev.Completion.create( response = gpt4free.Completion.create(
Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token
) )
print(response) print(response)
print(f'END') print(f'END')
# usage theb # usage theb
response = openai_rev.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi') response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
print(response)
# usage cocalc
response = gpt4free.Completion.create(Provider.CoCalc, prompt='Write a poem on Lionel Messi', cookie_input='')
print(response) print(response)

View file

@ -1,4 +1,4 @@
from openai_rev import you from gpt4free import you
# simple request with links and details # simple request with links and details
response = you.Completion.create(prompt="hello world", detailed=True, include_links=True) response = you.Completion.create(prompt="hello world", detailed=True, include_links=True)
@ -22,6 +22,6 @@ while True:
response = you.Completion.create(prompt=prompt, chat=chat) response = you.Completion.create(prompt=prompt, chat=chat)
print("Bot:", response["response"]) print("Bot:", response.text)
chat.append({"question": prompt, "answer": response["response"]}) chat.append({"question": prompt, "answer": response.text})