1
0
Fork 0

added gpt4free package

This commit is contained in:
Raju Komati 2023-04-29 14:55:24 +05:30
parent 94b30306f0
commit 54b4c789a7
No known key found for this signature in database
GPG key ID: A581A5D67A8EB090
66 changed files with 492 additions and 422 deletions

View file

@ -92,9 +92,9 @@ Please note the following:
| **Star History** | Star History | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#star-history) | - |
| **Usage Examples** | | | |
| `theb` | Example usage for theb (gpt-3.5) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](openai_rev/theb/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
| `forefront` | Example usage for forefront (gpt-4) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./forefront/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | ||
| `quora (poe)` | Example usage for quora | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./quora/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
| `you` | Example usage for you | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./you/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
| `forefront` | Example usage for forefront (gpt-4) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/forefront/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | ||
| `quora (poe)` | Example usage for quora | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/quora/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
| `you` | Example usage for you | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/you/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
| **Try it Out** | | | |
| Google Colab Jupyter Notebook | Example usage for gpt4free | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DanielShemesh/gpt4free-colab/blob/main/gpt4free.ipynb) | - |
| replit Example (feel free to fork this repl) | Example usage for gpt4free | [![](https://img.shields.io/badge/Open%20in-Replit-1A1E27?logo=replit)](https://replit.com/@gpt4free/gpt4free-webui) | - |
@ -126,10 +126,10 @@ Please note the following:
## Best sites <a name="best-sites"></a>
#### gpt-4
- [`/forefront`](./forefront/README.md)
- [`/forefront`](gpt4free/forefront/README.md)
#### gpt-3.5
- [`/you`](./you/README.md)
- [`/you`](gpt4free/you/README.md)
## Install <a name="install"></a>
Download or clone this GitHub repo

View file

@ -1,57 +0,0 @@
import requests
class Completion:
@staticmethod
def create(prompt:str, cookieInput:str) -> str:
# Initialize a session with custom headers
session = Completion._initialize_session(cookieInput)
# Set the data that will be submitted
payload = Completion._create_payload(prompt, ("ASSUME I HAVE FULL ACCESS TO COCALC. "))
# Submit the request and return the results
return Completion._submit_request(session, payload)
@classmethod
def _initialize_session(cls, conversationCookie) -> requests.Session:
"""Initialize a session with custom headers for the request."""
session = requests.Session()
headers = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Origin': 'https://cocalc.com',
'Referer': 'https://cocalc.com/api/v2/openai/chatgpt',
'Cookie': conversationCookie,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
}
session.headers.update(headers)
return session
@classmethod
def _create_payload(
cls,
prompt: str,
system_prompt: str
) -> dict:
return {
"input": prompt,
"system": system_prompt,
"tag": "next:index"
}
@classmethod
def _submit_request(
cls,
session: requests.Session,
payload: dict
) -> str:
response = session.post(
"https://cocalc.com/api/v2/openai/chatgpt", json=payload).json()
return {
"response":response["output"],
"success":response["success"]
}

View file

@ -1,16 +0,0 @@
### Example: `forefront` (use like openai pypi package) <a name="example-forefront"></a>
```python
import forefront
# create an account
token = forefront.Account.create(logging=False)
print(token)
# get a response
for response in forefront.StreamingCompletion.create(token = token,
prompt = 'hello world', model='gpt-4'):
print(response.completion.choices[0].text, end = '')
print("")
```

View file

@ -1,154 +0,0 @@
from json import loads
from re import match
from time import time, sleep
from uuid import uuid4
from requests import post
from tls_client import Session
from forefront.mail import Mail
from forefront.typing import ForeFrontResponse
class Account:
@staticmethod
def create(proxy=None, logging=False):
proxies = {
'http': 'http://' + proxy,
'https': 'http://' + proxy} if proxy else False
start = time()
mail = Mail(proxies)
mail_token = None
mail_adress = mail.get_mail()
# print(mail_adress)
client = Session(client_identifier='chrome110')
client.proxies = proxies
client.headers = {
"origin": "https://accounts.forefront.ai",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
}
response = client.post('https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
data={
"email_address": mail_adress
}
)
try:
trace_token = response.json()['response']['id']
if logging: print(trace_token)
except KeyError:
return 'Failed to create account!'
response = client.post(
f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
data={
"strategy": "email_code",
}
)
if logging: print(response.text)
if not 'sign_up_attempt' in response.text:
return 'Failed to create account!'
while True:
sleep(1)
for _ in mail.fetch_inbox():
if logging: print(mail.get_message_content(_["id"]))
mail_token = match(r"(\d){5,6}", mail.get_message_content(_["id"])).group(0)
if mail_token:
break
if logging: print(mail_token)
response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
data={
'code': mail_token,
'strategy': 'email_code'
})
if logging: print(response.json())
token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
with open('accounts.txt', 'a') as f:
f.write(f'{mail_adress}:{token}\n')
if logging: print(time() - start)
return token
class StreamingCompletion:
@staticmethod
def create(
token=None,
chatId=None,
prompt='',
actionType='new',
defaultPersona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4') -> ForeFrontResponse:
if not token: raise Exception('Token is required!')
if not chatId: chatId = str(uuid4())
headers = {
'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer ' + token,
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://chat.forefront.ai',
'pragma': 'no-cache',
'referer': 'https://chat.forefront.ai/',
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
json_data = {
'text': prompt,
'action': actionType,
'parentId': chatId,
'workspaceId': chatId,
'messagePersona': defaultPersona,
'model': model
}
for chunk in post('https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
headers=headers, json=json_data, stream=True).iter_lines():
if b'finish_reason":null' in chunk:
data = loads(chunk.decode('utf-8').split('data: ')[1])
token = data['choices'][0]['delta'].get('content')
if token != None:
yield ForeFrontResponse({
'id': chatId,
'object': 'text_completion',
'created': int(time()),
'model': model,
'choices': [{
'text': token,
'index': 0,
'logprobs': None,
'finish_reason': 'stop'
}],
'usage': {
'prompt_tokens': len(prompt),
'completion_tokens': len(token),
'total_tokens': len(prompt) + len(token)
}
})

View file

@ -1,36 +0,0 @@
class ForeFrontResponse:
class Completion:
class Choices:
def __init__(self, choice: dict) -> None:
self.text = choice['text']
self.content = self.text.encode()
self.index = choice['index']
self.logprobs = choice['logprobs']
self.finish_reason = choice['finish_reason']
def __repr__(self) -> str:
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
def __init__(self, choices: dict) -> None:
self.choices = [self.Choices(choice) for choice in choices]
class Usage:
def __init__(self, usage_dict: dict) -> None:
self.prompt_tokens = usage_dict['prompt_tokens']
self.completion_tokens = usage_dict['completion_tokens']
self.total_tokens = usage_dict['total_tokens']
def __repr__(self):
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
def __init__(self, response_dict: dict) -> None:
self.response_dict = response_dict
self.id = response_dict['id']
self.object = response_dict['object']
self.created = response_dict['created']
self.model = response_dict['model']
self.completion = self.Completion(response_dict['choices'])
self.usage = self.Usage(response_dict['usage'])
def json(self) -> dict:
return self.response_dict

64
gpt4free/__init__.py Normal file
View file

@ -0,0 +1,64 @@
from enum import Enum
from gpt4free import cocalc
from gpt4free import forefront
from gpt4free import quora
from gpt4free import theb
from gpt4free import you
class Provider(Enum):
"""An enum representing different providers."""
You = 'you'
Poe = 'poe'
ForeFront = 'fore_front'
Theb = 'theb'
CoCalc = 'cocalc'
class Completion:
"""This class will be used for invoking the given provider"""
@staticmethod
def create(provider: Provider, prompt: str, **kwargs) -> str:
"""
Invokes the given provider with given prompt and addition arguments and returns the string response
:param provider: an enum representing the provider to use while invoking
:param prompt: input provided by the user
:param kwargs: Additional keyword arguments to pass to the provider while invoking
:return: A string representing the response from the provider
"""
if provider == Provider.Poe:
return Completion.__poe_service(prompt, **kwargs)
elif provider == Provider.You:
return Completion.__you_service(prompt, **kwargs)
elif provider == Provider.ForeFront:
return Completion.__fore_front_service(prompt, **kwargs)
elif provider == Provider.Theb:
return Completion.__theb_service(prompt, **kwargs)
elif provider == Provider.CoCalc:
return Completion.__cocalc_service(prompt, **kwargs)
else:
raise Exception('Provider not exist, Please try again')
@staticmethod
def __you_service(prompt: str, **kwargs) -> str:
return you.Completion.create(prompt, **kwargs).text
@staticmethod
def __poe_service(prompt: str, **kwargs) -> str:
return quora.Completion.create(prompt=prompt, **kwargs).text
@staticmethod
def __fore_front_service(prompt: str, **kwargs) -> str:
return forefront.Completion.create(prompt=prompt, **kwargs).text
@staticmethod
def __theb_service(prompt: str, **kwargs):
return ''.join(theb.Completion.create(prompt=prompt))
@staticmethod
def __cocalc_service(prompt: str, **kwargs):
return cocalc.Completion.create(prompt, cookie_input=kwargs.get('cookie_input', '')).text

View file

@ -0,0 +1,47 @@
import requests
from fake_useragent import UserAgent
from pydantic import BaseModel
class CoCalcResponse(BaseModel):
text: str
status: bool
class Completion:
@staticmethod
def create(prompt: str, cookie_input: str) -> CoCalcResponse:
# Initialize a session with custom headers
session = Completion._initialize_session(cookie_input)
# Set the data that will be submitted
payload = Completion._create_payload(prompt, 'ASSUME I HAVE FULL ACCESS TO COCALC. ')
# Submit the request and return the results
return Completion._submit_request(session, payload)
@classmethod
def _initialize_session(cls, conversation_cookie) -> requests.Session:
"""Initialize a session with custom headers for the request."""
session = requests.Session()
headers = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Origin': 'https://cocalc.com',
'Referer': 'https://cocalc.com/api/v2/openai/chatgpt',
'Cookie': conversation_cookie,
'User-Agent': UserAgent().random,
}
session.headers.update(headers)
return session
@staticmethod
def _create_payload(prompt: str, system_prompt: str) -> dict:
return {'input': prompt, 'system': system_prompt, 'tag': 'next:index'}
@staticmethod
def _submit_request(session: requests.Session, payload: dict) -> CoCalcResponse:
response = session.post('https://cocalc.com/api/v2/openai/chatgpt', json=payload).json()
return CoCalcResponse(text=response['output'], status=response['success'])

View file

@ -1,11 +1,10 @@
### Example: `cocalc` <a name="example-cocalc"></a>
```python
# import library
import cocalc
from gpt4free import cocalc
cocalc.Completion.create(prompt="How are you!", cookieInput="cookieinput") ## Tutorial
cocalc.Completion.create(prompt="How are you!", cookie_input="cookieinput") ## Tutorial
```
### How to grab cookie input

View file

@ -0,0 +1,16 @@
### Example: `forefront` (use like openai pypi package) <a name="example-forefront"></a>
```python
from gpt4free import forefront
# create an account
token = forefront.Account.create(logging=False)
print(token)
# get a response
for response in forefront.StreamingCompletion.create(token=token,
prompt='hello world', model='gpt-4'):
print(response.completion.choices[0].text, end='')
print("")
```

View file

@ -0,0 +1,192 @@
from json import loads
from re import match
from time import time, sleep
from typing import Generator, Optional
from uuid import uuid4
from fake_useragent import UserAgent
from requests import post
from tls_client import Session
from .mail import Mail
from .typing import ForeFrontResponse
class Account:
@staticmethod
def create(proxy: Optional[str] = None, logging: bool = False):
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
start = time()
mail_client = Mail(proxies)
mail_token = None
mail_address = mail_client.get_mail()
# print(mail_address)
client = Session(client_identifier='chrome110')
client.proxies = proxies
client.headers = {
'origin': 'https://accounts.forefront.ai',
'user-agent': UserAgent().random,
}
response = client.post(
'https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
data={'email_address': mail_address},
)
try:
trace_token = response.json()['response']['id']
if logging:
print(trace_token)
except KeyError:
return 'Failed to create account!'
response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6',
data={
'strategy': 'email_code',
},
)
if logging:
print(response.text)
if 'sign_up_attempt' not in response.text:
return 'Failed to create account!'
while True:
sleep(1)
for _ in mail_client.fetch_inbox():
if logging:
print(mail_client.get_message_content(_['id']))
mail_token = match(r'(\d){5,6}', mail_client.get_message_content(_['id'])).group(0)
if mail_token:
break
if logging:
print(mail_token)
response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
data={'code': mail_token, 'strategy': 'email_code'},
)
if logging:
print(response.json())
token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
with open('accounts.txt', 'a') as f:
f.write(f'{mail_address}:{token}\n')
if logging:
print(time() - start)
return token
class StreamingCompletion:
@staticmethod
def create(
token=None,
chat_id=None,
prompt='',
action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4',
) -> Generator[ForeFrontResponse, None, None]:
if not token:
raise Exception('Token is required!')
if not chat_id:
chat_id = str(uuid4())
headers = {
'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer ' + token,
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://chat.forefront.ai',
'pragma': 'no-cache',
'referer': 'https://chat.forefront.ai/',
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': UserAgent().random,
}
json_data = {
'text': prompt,
'action': action_type,
'parentId': chat_id,
'workspaceId': chat_id,
'messagePersona': default_persona,
'model': model,
}
for chunk in post(
'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
headers=headers,
json=json_data,
stream=True,
).iter_lines():
if b'finish_reason":null' in chunk:
data = loads(chunk.decode('utf-8').split('data: ')[1])
token = data['choices'][0]['delta'].get('content')
if token is not None:
yield ForeFrontResponse(
**{
'id': chat_id,
'object': 'text_completion',
'created': int(time()),
'text': token,
'model': model,
'choices': [{'text': token, 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}],
'usage': {
'prompt_tokens': len(prompt),
'completion_tokens': len(token),
'total_tokens': len(prompt) + len(token),
},
}
)
class Completion:
@staticmethod
def create(
token=None,
chat_id=None,
prompt='',
action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4',
) -> ForeFrontResponse:
text = ''
final_response = None
for response in StreamingCompletion.create(
token=token,
chat_id=chat_id,
prompt=prompt,
action_type=action_type,
default_persona=default_persona,
model=model,
):
if response:
final_response = response
text += response.text
if final_response:
final_response.text = text
else:
raise Exception('Unable to get the response, Please try again')
return final_response

View file

@ -0,0 +1,26 @@
from typing import Any, List
from pydantic import BaseModel
class Choice(BaseModel):
text: str
index: int
logprobs: Any
finish_reason: str
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class ForeFrontResponse(BaseModel):
id: str
object: str
created: int
model: str
choices: List[Choice]
usage: Usage
text: str

View file

@ -21,26 +21,25 @@ models = {
```python
# import quora (poe) package
import quora
from gpt4free import quora
# create account
# make sure to set enable_bot_creation to True
token = quora.Account.create(logging = True, enable_bot_creation=True)
token = quora.Account.create(logging=True, enable_bot_creation=True)
model = quora.Model.create(
token = token,
model = 'gpt-3.5-turbo', # or claude-instant-v1.0
system_prompt = 'you are ChatGPT a large language model ...'
token=token,
model='gpt-3.5-turbo', # or claude-instant-v1.0
system_prompt='you are ChatGPT a large language model ...'
)
print(model.name) # gptx....
print(model.name) # gptx....
# streaming response
for response in quora.StreamingCompletion.create(
custom_model = model.name,
prompt ='hello world',
token = token):
custom_model=model.name,
prompt='hello world',
token=token):
print(response.completion.choices[0].text)
```

View file

@ -6,11 +6,12 @@ from pathlib import Path
from random import choice, choices, randint
from re import search, findall
from string import ascii_letters, digits
from typing import Optional, Union
from typing import Optional, Union, List, Any, Generator
from urllib.parse import unquote
import selenium.webdriver.support.expected_conditions as EC
from fake_useragent import UserAgent
from pydantic import BaseModel
from pypasser import reCaptchaV3
from requests import Session
from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions
@ -18,8 +19,8 @@ from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from tls_client import Session as TLS
from quora.api import Client as PoeClient
from quora.mail import Emailnator
from .api import Client as PoeClient
from .mail import Emailnator
SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not
being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location
@ -67,42 +68,27 @@ def extract_formkey(html):
return formkey
class PoeResponse:
class Completion:
class Choices:
def __init__(self, choice: dict) -> None:
self.text = choice['text']
self.content = self.text.encode()
self.index = choice['index']
self.logprobs = choice['logprobs']
self.finish_reason = choice['finish_reason']
class Choice(BaseModel):
text: str
index: int
logprobs: Any
finish_reason: str
def __repr__(self) -> str:
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
def __init__(self, choices: dict) -> None:
self.choices = [self.Choices(choice) for choice in choices]
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class Usage:
def __init__(self, usage_dict: dict) -> None:
self.prompt_tokens = usage_dict['prompt_tokens']
self.completion_tokens = usage_dict['completion_tokens']
self.total_tokens = usage_dict['total_tokens']
def __repr__(self):
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
def __init__(self, response_dict: dict) -> None:
self.response_dict = response_dict
self.id = response_dict['id']
self.object = response_dict['object']
self.created = response_dict['created']
self.model = response_dict['model']
self.completion = self.Completion(response_dict['choices'])
self.usage = self.Usage(response_dict['usage'])
def json(self) -> dict:
return self.response_dict
class PoeResponse(BaseModel):
id: int
object: str
created: int
model: str
choices: List[Choice]
usage: Usage
text: str
class ModelResponse:
@ -116,18 +102,12 @@ class ModelResponse:
class Model:
@staticmethod
def create(
token: str,
model: str = 'gpt-3.5-turbo', # claude-instant
system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
handle: str = None,
token: str,
model: str = 'gpt-3.5-turbo', # claude-instant
system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
handle: str = None,
) -> ModelResponse:
models = {
'gpt-3.5-turbo': 'chinchilla',
'claude-instant-v1.0': 'a2',
'gpt-4': 'beaver',
}
if not handle:
handle = f'gptx{randint(1111111, 9999999)}'
@ -162,7 +142,7 @@ class Model:
obj={
'queryName': 'CreateBotMain_poeBotCreate_Mutation',
'variables': {
'model': models[model],
'model': MODELS[model],
'handle': handle,
'prompt': system_prompt,
'isPromptPublic': True,
@ -202,9 +182,9 @@ class Model:
class Account:
@staticmethod
def create(
proxy: Optional[str] = None,
logging: bool = False,
enable_bot_creation: bool = False,
proxy: Optional[str] = None,
logging: bool = False,
enable_bot_creation: bool = False,
):
client = TLS(client_identifier='chrome110')
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
@ -309,22 +289,23 @@ class Account:
class StreamingCompletion:
@staticmethod
def create(
model: str = 'gpt-4',
custom_model: bool = None,
prompt: str = 'hello world',
token: str = '',
):
model: str = 'gpt-4',
custom_model: bool = None,
prompt: str = 'hello world',
token: str = '',
) -> Generator[PoeResponse, None, None]:
_model = MODELS[model] if not custom_model else custom_model
client = PoeClient(token)
for chunk in client.send_message(_model, prompt):
yield PoeResponse(
{
**{
'id': chunk['messageId'],
'object': 'text_completion',
'created': chunk['creationTime'],
'model': _model,
'text': chunk['text_new'],
'choices': [
{
'text': chunk['text_new'],
@ -343,33 +324,28 @@ class StreamingCompletion:
class Completion:
@staticmethod
def create(
model: str = 'gpt-4',
custom_model: str = None,
prompt: str = 'hello world',
token: str = '',
):
models = {
'sage': 'capybara',
'gpt-4': 'beaver',
'claude-v1.2': 'a2_2',
'claude-instant-v1.0': 'a2',
'gpt-3.5-turbo': 'chinchilla',
}
_model = models[model] if not custom_model else custom_model
model: str = 'gpt-4',
custom_model: str = None,
prompt: str = 'hello world',
token: str = '',
) -> PoeResponse:
_model = MODELS[model] if not custom_model else custom_model
client = PoeClient(token)
for chunk in client.send_message(_model, prompt):
pass
chunk = None
for response in client.send_message(_model, prompt):
chunk = response
return PoeResponse(
{
**{
'id': chunk['messageId'],
'object': 'text_completion',
'created': chunk['creationTime'],
'model': _model,
'text': chunk['text'],
'choices': [
{
'text': chunk['text'],
@ -389,12 +365,12 @@ class Completion:
class Poe:
def __init__(
self,
model: str = 'ChatGPT',
driver: str = 'firefox',
download_driver: bool = False,
driver_path: Optional[str] = None,
cookie_path: str = './quora/cookie.json',
self,
model: str = 'ChatGPT',
driver: str = 'firefox',
download_driver: bool = False,
driver_path: Optional[str] = None,
cookie_path: str = './quora/cookie.json',
):
# validating the model
if model and model not in MODELS:
@ -451,8 +427,8 @@ class Poe:
driver.close()
return cookie
@classmethod
def __resolve_driver(cls, driver: str, driver_path: Optional[str] = None) -> Union[Firefox, Chrome]:
@staticmethod
def __resolve_driver(driver: str, driver_path: Optional[str] = None) -> Union[Firefox, Chrome]:
options = FirefoxOptions() if driver == 'firefox' else ChromeOptions()
options.add_argument('-headless')
@ -473,12 +449,12 @@ class Poe:
return response
def create_bot(
self,
name: str,
/,
prompt: str = '',
base_model: str = 'ChatGPT',
description: str = '',
self,
name: str,
/,
prompt: str = '',
base_model: str = 'ChatGPT',
description: str = '',
) -> None:
if base_model not in MODELS:
raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')
@ -492,4 +468,4 @@ class Poe:
print(f'Successfully created bot with name: {response["bot"]["displayName"]}')
def list_bots(self) -> list:
return list(self.client.bot_names.values())
return list(self.client.bot_names.values())

View file

@ -1,9 +1,8 @@
### Example: `theb` (use like openai pypi package) <a name="example-theb"></a>
```python
# import library
import theb
from gpt4free import theb
# simple streaming completion
for token in theb.Completion.create('hello world'):

View file

@ -1,7 +1,8 @@
### Example: `you` (use like openai pypi package) <a name="example-you"></a>
```python
import you
from gpt4free import you
# simple request with links and details
response = you.Completion.create(

View file

@ -1,28 +1,36 @@
import json
import re
from json import loads
from typing import Optional, List, Dict, Any
from uuid import uuid4
from fake_useragent import UserAgent
from pydantic import BaseModel
from tls_client import Session
class PoeResponse(BaseModel):
text: Optional[str] = None
links: List[str] = []
extra: Dict[str, Any] = {}
class Completion:
@staticmethod
def create(
prompt: str,
page: int = 1,
count: int = 10,
safe_search: str = 'Moderate',
on_shopping_page: bool = False,
mkt: str = '',
response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
domain: str = 'youchat',
query_trace_id: str = None,
chat: list = None,
include_links: bool = False,
detailed: bool = False,
debug: bool = False,
) -> dict:
prompt: str,
page: int = 1,
count: int = 10,
safe_search: str = 'Moderate',
on_shopping_page: bool = False,
mkt: str = '',
response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
domain: str = 'youchat',
query_trace_id: str = None,
chat: list = None,
include_links: bool = False,
detailed: bool = False,
debug: bool = False,
) -> PoeResponse:
if chat is None:
chat = []
@ -57,26 +65,28 @@ class Completion:
r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text
).group()
third_party_search_results = re.search(
r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text).group()
r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text
).group()
# slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text))
extra = {
'youChatSerpResults': loads(you_chat_serp_results),
'youChatSerpResults': json.loads(you_chat_serp_results),
# 'slots' : loads(slots)
}
return {
'response': text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'),
'links': loads(third_party_search_results)['search']['third_party_search_results']
if include_links
else None,
'extra': extra if detailed else None,
}
response = PoeResponse(text=text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'))
if include_links:
response.links = json.loads(third_party_search_results)['search']['third_party_search_results']
@classmethod
def __get_headers(cls) -> dict:
if detailed:
response.extra = extra
return response
@staticmethod
def __get_headers() -> dict:
return {
'authority': 'you.com',
'accept': 'text/event-stream',
@ -93,6 +103,6 @@ class Completion:
'user-agent': UserAgent().random,
}
@classmethod
def __get_failure_response(cls) -> dict:
return dict(response='Unable to fetch the response, Please try again.', links=[], extra={})
@staticmethod
def __get_failure_response() -> PoeResponse:
return PoeResponse(text='Unable to fetch the response, Please try again.')

View file

@ -3,7 +3,7 @@ import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import forefront, quora, theb, you
from gpt4free import quora, forefront, theb, you
import random
@ -15,7 +15,7 @@ def query_forefront(question: str) -> str:
response = ""
# get a response
try:
for i in forefront.StreamingCompletion.create(token = token, prompt = 'hello world', model='gpt-4'):
for i in forefront.StreamingCompletion.create(token = token, prompt ='hello world', model='gpt-4'):
response += i.completion.choices[0].text
return response

View file

@ -4,7 +4,7 @@ import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import streamlit as st
import you
from gpt4free import you
def get_answer(question: str) -> str:

View file

@ -5,7 +5,7 @@ description = ""
authors = ["Raju Komati <komatiraju032@gmail.com>"]
license = "GPL-3.0"
readme = "README.md"
packages = [{ include = "openai_rev" }]
packages = [{ include = "gpt4free" }]
exclude = ["**/*.txt"]
[tool.poetry.dependencies]

View file

@ -1,4 +1,4 @@
import theb
from gpt4free import theb
for token in theb.Completion.create('hello world'):
print(token, end='', flush=True)

View file

@ -1,4 +1,4 @@
from openai_rev import forefront
from gpt4free import forefront
# create an account
token = forefront.Account.create(logging=True)

View file

@ -6,8 +6,8 @@ from typing import Optional
from tls_client import Session as TLS
from twocaptcha import TwoCaptcha
from openai_rev.quora import extract_formkey
from openai_rev.quora.mail import Emailnator
from gpt4free.quora import extract_formkey
from gpt4free.quora.mail import Emailnator
solver = TwoCaptcha('72747bf24a9d89b4dcc1b24875efd358')

View file

@ -1,6 +1,6 @@
from time import sleep
from openai_rev import quora
from gpt4free import quora
token = quora.Account.create(proxy=None, logging=True)
print('token', token)

View file

@ -1,4 +1,4 @@
from openai_rev import quora
from gpt4free import quora
token = quora.Account.create(logging=True, enable_bot_creation=True)

View file

@ -1,24 +1,28 @@
from openai_rev import openai_rev, Provider, quora, forefront
import gpt4free
from gpt4free import Provider, quora, forefront
# usage You
response = openai_rev.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi')
print(response)
# usage Poe
token = quora.Account.create(logging=False)
response = openai_rev.Completion.create(
Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT'
)
# token = quora.Account.create(logging=False)
token = 'GKzCahZYGKhp76LfE197xw=='
response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT')
print(response)
# usage forefront
token = forefront.Account.create(logging=False)
response = openai_rev.Completion.create(
response = gpt4free.Completion.create(
Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token
)
print(response)
print(f'END')
# usage theb
response = openai_rev.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi')
print(response)
# usage cocalc
response = gpt4free.Completion.create(Provider.CoCalc, prompt='Write a poem on Lionel Messi', cookie_input='')
print(response)

View file

@ -1,4 +1,4 @@
from openai_rev import you
from gpt4free import you
# simple request with links and details
response = you.Completion.create(prompt="hello world", detailed=True, include_links=True)
@ -22,6 +22,6 @@ while True:
response = you.Completion.create(prompt=prompt, chat=chat)
print("Bot:", response["response"])
print("Bot:", response.text)
chat.append({"question": prompt, "answer": response["response"]})
chat.append({"question": prompt, "answer": response.text})