remove phind
This commit is contained in:
parent
10104774c1
commit
011d0babc2
9 changed files with 15 additions and 427 deletions
|
@ -40,7 +40,6 @@ Please note the following:
|
||||||
| **Usage Examples** | | | |
|
| **Usage Examples** | | | |
|
||||||
| `forefront` | Example usage for forefront (gpt-4) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./forefront/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | | |
|
| `forefront` | Example usage for forefront (gpt-4) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./forefront/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | | |
|
||||||
| `quora (poe)` | Example usage for quora | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./quora/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | |
|
| `quora (poe)` | Example usage for quora | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./quora/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | |
|
||||||
| `phind` | Example usage for phind | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./phind/README.md) | ![Inactive](https://img.shields.io/badge/Active-brightgreen) |
|
|
||||||
| `you` | Example usage for you | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./you/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
|
| `you` | Example usage for you | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](./you/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) |
|
||||||
| **Try it Out** | | | |
|
| **Try it Out** | | | |
|
||||||
| Google Colab Jupyter Notebook | Example usage for gpt4free | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DanielShemesh/gpt4free-colab/blob/main/gpt4free.ipynb) | - |
|
| Google Colab Jupyter Notebook | Example usage for gpt4free | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DanielShemesh/gpt4free-colab/blob/main/gpt4free.ipynb) | - |
|
||||||
|
@ -65,7 +64,6 @@ Please note the following:
|
||||||
| [writesonic.com](https://writesonic.com) | GPT-3.5 / Internet |
|
| [writesonic.com](https://writesonic.com) | GPT-3.5 / Internet |
|
||||||
| [t3nsor.com](https://t3nsor.com) | GPT-3.5 |
|
| [t3nsor.com](https://t3nsor.com) | GPT-3.5 |
|
||||||
| [you.com](https://you.com) | GPT-3.5 / Internet / good search|
|
| [you.com](https://you.com) | GPT-3.5 / Internet / good search|
|
||||||
| [phind.com](https://phind.com) | GPT-4 / Internet / good search |
|
|
||||||
| [sqlchat.ai](https://sqlchat.ai) | GPT-3.5 |
|
| [sqlchat.ai](https://sqlchat.ai) | GPT-3.5 |
|
||||||
| [chat.openai.com/chat](https://chat.openai.com/chat) | GPT-3.5 |
|
| [chat.openai.com/chat](https://chat.openai.com/chat) | GPT-3.5 |
|
||||||
| [bard.google.com](https://bard.google.com) | custom / search |
|
| [bard.google.com](https://bard.google.com) | custom / search |
|
||||||
|
@ -75,13 +73,10 @@ Please note the following:
|
||||||
## Best sites <a name="best-sites"></a>
|
## Best sites <a name="best-sites"></a>
|
||||||
|
|
||||||
#### gpt-4
|
#### gpt-4
|
||||||
- [`/phind`](./phind/README.md)
|
- [`/forefront`](./forefront/README.md)
|
||||||
- pro: only stable gpt-4 with streaming ( no limit )
|
|
||||||
- contra: weird backend prompting
|
|
||||||
- why not `ora` anymore ? gpt-4 requires login + limited
|
|
||||||
|
|
||||||
#### gpt-3.5
|
#### gpt-3.5
|
||||||
- looking for a stable api at the moment
|
- [`/you`](./you/README.md)
|
||||||
|
|
||||||
## Install <a name="install"></a>
|
## Install <a name="install"></a>
|
||||||
download or clone this GitHub repo
|
download or clone this GitHub repo
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
# gpt4free gui
|
# gpt4free gui
|
||||||
|
|
||||||
|
mode `streamlit_app.py` into base folder to run
|
||||||
|
|
||||||
|
|
||||||
preview:
|
preview:
|
||||||
|
|
||||||
<img width="1125" alt="image" src="https://user-images.githubusercontent.com/98614666/234232398-09e9d3c5-08e6-4b8a-b4f2-0666e9790c7d.png">
|
<img width="1125" alt="image" src="https://user-images.githubusercontent.com/98614666/234232398-09e9d3c5-08e6-4b8a-b4f2-0666e9790c7d.png">
|
||||||
|
|
||||||
run:
|
|
||||||
|
|
||||||
|
|
||||||
|
run:
|
||||||
<img width="724" alt="image" src="https://user-images.githubusercontent.com/98614666/234232449-0d5cd092-a29d-4759-8197-e00ba712cb1a.png">
|
<img width="724" alt="image" src="https://user-images.githubusercontent.com/98614666/234232449-0d5cd092-a29d-4759-8197-e00ba712cb1a.png">
|
||||||
|
|
|
@ -4,25 +4,16 @@ import sys
|
||||||
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
|
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
import phind
|
import you
|
||||||
|
|
||||||
# Set cloudflare clearance and user agent
|
|
||||||
phind.cloudflare_clearance = ''
|
|
||||||
phind.phind_api = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
|
|
||||||
|
|
||||||
|
|
||||||
def get_answer(question: str) -> str:
|
def get_answer(question: str) -> str:
|
||||||
# Set cloudflare clearance cookie and get answer from GPT-4 model
|
# Set cloudflare clearance cookie and get answer from GPT-4 model
|
||||||
try:
|
try:
|
||||||
result = phind.Completion.create(
|
result = you.Completion.create(
|
||||||
model='gpt-4',
|
prompt = question)
|
||||||
prompt=question,
|
|
||||||
results=phind.Search.create(question, actualSearch=True),
|
return result['response']
|
||||||
creative=False,
|
|
||||||
detailed=False,
|
|
||||||
codeContext=''
|
|
||||||
)
|
|
||||||
return result.completion.choices[0].text
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Return error message if an exception occurs
|
# Return error message if an exception occurs
|
||||||
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
|
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
|
||||||
|
|
|
@ -1,34 +0,0 @@
|
||||||
### Example: `phind` (use like openai pypi package) <a name="example-phind"></a>
|
|
||||||
|
|
||||||
```python
|
|
||||||
import phind
|
|
||||||
|
|
||||||
# set cf_clearance cookie (needed again)
|
|
||||||
phind.cf_clearance = 'xx.xx-1682166681-0-160'
|
|
||||||
phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' # same as the one from browser you got cf_clearance from
|
|
||||||
|
|
||||||
prompt = 'who won the quatar world cup'
|
|
||||||
|
|
||||||
# help needed: not getting newlines from the stream, please submit a PR if you know how to fix this
|
|
||||||
# stream completion
|
|
||||||
for result in phind.StreamingCompletion.create(
|
|
||||||
model = 'gpt-4',
|
|
||||||
prompt = prompt,
|
|
||||||
results = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
|
|
||||||
creative = False,
|
|
||||||
detailed = False,
|
|
||||||
codeContext = ''): # up to 3000 chars of code
|
|
||||||
|
|
||||||
print(result.completion.choices[0].text, end='', flush=True)
|
|
||||||
|
|
||||||
# normal completion
|
|
||||||
result = phind.Completion.create(
|
|
||||||
model = 'gpt-4',
|
|
||||||
prompt = prompt,
|
|
||||||
results = phind.Search.create(prompt, actualSearch = True), # create search (set actualSearch to False to disable internet)
|
|
||||||
creative = False,
|
|
||||||
detailed = False,
|
|
||||||
codeContext = '') # up to 3000 chars of code
|
|
||||||
|
|
||||||
print(result.completion.choices[0].text)
|
|
||||||
```
|
|
|
@ -1,289 +0,0 @@
|
||||||
from datetime import datetime
|
|
||||||
from queue import Queue, Empty
|
|
||||||
from threading import Thread
|
|
||||||
from time import time
|
|
||||||
from urllib.parse import quote
|
|
||||||
|
|
||||||
from curl_cffi.requests import post
|
|
||||||
|
|
||||||
cf_clearance = ''
|
|
||||||
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
|
|
||||||
|
|
||||||
|
|
||||||
class PhindResponse:
|
|
||||||
class Completion:
|
|
||||||
class Choices:
|
|
||||||
def __init__(self, choice: dict) -> None:
|
|
||||||
self.text = choice['text']
|
|
||||||
self.content = self.text.encode()
|
|
||||||
self.index = choice['index']
|
|
||||||
self.logprobs = choice['logprobs']
|
|
||||||
self.finish_reason = choice['finish_reason']
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
|
|
||||||
|
|
||||||
def __init__(self, choices: dict) -> None:
|
|
||||||
self.choices = list(map(self.Choices, choices))
|
|
||||||
|
|
||||||
class Usage:
|
|
||||||
def __init__(self, usage_dict: dict) -> None:
|
|
||||||
self.prompt_tokens = usage_dict['prompt_tokens']
|
|
||||||
self.completion_tokens = usage_dict['completion_tokens']
|
|
||||||
self.total_tokens = usage_dict['total_tokens']
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
|
|
||||||
|
|
||||||
def __init__(self, response_dict: dict) -> None:
|
|
||||||
self.response_dict = response_dict
|
|
||||||
self.id = response_dict['id']
|
|
||||||
self.object = response_dict['object']
|
|
||||||
self.created = response_dict['created']
|
|
||||||
self.model = response_dict['model']
|
|
||||||
self.completion = self.Completion(response_dict['choices'])
|
|
||||||
self.usage = self.Usage(response_dict['usage'])
|
|
||||||
|
|
||||||
def json(self) -> dict:
|
|
||||||
return self.response_dict
|
|
||||||
|
|
||||||
|
|
||||||
class Search:
|
|
||||||
def create(prompt: str, actualSearch: bool = True, language: str = 'en') -> dict: # None = no search
|
|
||||||
if user_agent == '':
|
|
||||||
raise ValueError('user_agent must be set, refer to documentation')
|
|
||||||
if cf_clearance == '':
|
|
||||||
raise ValueError('cf_clearance must be set, refer to documentation')
|
|
||||||
|
|
||||||
if not actualSearch:
|
|
||||||
return {
|
|
||||||
'_type': 'SearchResponse',
|
|
||||||
'queryContext': {
|
|
||||||
'originalQuery': prompt
|
|
||||||
},
|
|
||||||
'webPages': {
|
|
||||||
'webSearchUrl': f'https://www.bing.com/search?q={quote(prompt)}',
|
|
||||||
'totalEstimatedMatches': 0,
|
|
||||||
'value': []
|
|
||||||
},
|
|
||||||
'rankingResponse': {
|
|
||||||
'mainline': {
|
|
||||||
'items': []
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'authority': 'www.phind.com',
|
|
||||||
'accept': '*/*',
|
|
||||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
||||||
'cookie': f'cf_clearance={cf_clearance}',
|
|
||||||
'origin': 'https://www.phind.com',
|
|
||||||
'referer': 'https://www.phind.com/search?q=hi&c=&source=searchbox&init=true',
|
|
||||||
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
|
||||||
'sec-ch-ua-mobile': '?0',
|
|
||||||
'sec-ch-ua-platform': '"macOS"',
|
|
||||||
'sec-fetch-dest': 'empty',
|
|
||||||
'sec-fetch-mode': 'cors',
|
|
||||||
'sec-fetch-site': 'same-origin',
|
|
||||||
'user-agent': user_agent
|
|
||||||
}
|
|
||||||
|
|
||||||
return post('https://www.phind.com/api/bing/search', headers=headers, json={
|
|
||||||
'q': prompt,
|
|
||||||
'userRankList': {},
|
|
||||||
'browserLanguage': language}).json()['rawBingResults']
|
|
||||||
|
|
||||||
|
|
||||||
class Completion:
|
|
||||||
def create(
|
|
||||||
model='gpt-4',
|
|
||||||
prompt: str = '',
|
|
||||||
results: dict = None,
|
|
||||||
creative: bool = False,
|
|
||||||
detailed: bool = False,
|
|
||||||
codeContext: str = '',
|
|
||||||
language: str = 'en') -> PhindResponse:
|
|
||||||
|
|
||||||
if user_agent == '':
|
|
||||||
raise ValueError('user_agent must be set, refer to documentation')
|
|
||||||
|
|
||||||
if cf_clearance == '':
|
|
||||||
raise ValueError('cf_clearance must be set, refer to documentation')
|
|
||||||
|
|
||||||
if results is None:
|
|
||||||
results = Search.create(prompt, actualSearch=True)
|
|
||||||
|
|
||||||
if len(codeContext) > 2999:
|
|
||||||
raise ValueError('codeContext must be less than 3000 characters')
|
|
||||||
|
|
||||||
models = {
|
|
||||||
'gpt-4': 'expert',
|
|
||||||
'gpt-3.5-turbo': 'intermediate',
|
|
||||||
'gpt-3.5': 'intermediate',
|
|
||||||
}
|
|
||||||
|
|
||||||
json_data = {
|
|
||||||
'question': prompt,
|
|
||||||
'bingResults': results, # response.json()['rawBingResults'],
|
|
||||||
'codeContext': codeContext,
|
|
||||||
'options': {
|
|
||||||
'skill': models[model],
|
|
||||||
'date': datetime.now().strftime("%d/%m/%Y"),
|
|
||||||
'language': language,
|
|
||||||
'detailed': detailed,
|
|
||||||
'creative': creative
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'authority': 'www.phind.com',
|
|
||||||
'accept': '*/*',
|
|
||||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
||||||
'content-type': 'application/json',
|
|
||||||
'cookie': f'cf_clearance={cf_clearance}',
|
|
||||||
'origin': 'https://www.phind.com',
|
|
||||||
'referer': 'https://www.phind.com/search?q=hi&c=&source=searchbox&init=true',
|
|
||||||
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
|
||||||
'sec-ch-ua-mobile': '?0',
|
|
||||||
'sec-ch-ua-platform': '"macOS"',
|
|
||||||
'sec-fetch-dest': 'empty',
|
|
||||||
'sec-fetch-mode': 'cors',
|
|
||||||
'sec-fetch-site': 'same-origin',
|
|
||||||
'user-agent': user_agent
|
|
||||||
}
|
|
||||||
|
|
||||||
completion = ''
|
|
||||||
response = post('https://www.phind.com/api/infer/answer', headers=headers, json=json_data, timeout=99999,
|
|
||||||
impersonate='chrome110')
|
|
||||||
for line in response.text.split('\r\n\r\n'):
|
|
||||||
completion += (line.replace('data: ', ''))
|
|
||||||
|
|
||||||
return PhindResponse({
|
|
||||||
'id': f'cmpl-1337-{int(time())}',
|
|
||||||
'object': 'text_completion',
|
|
||||||
'created': int(time()),
|
|
||||||
'model': models[model],
|
|
||||||
'choices': [{
|
|
||||||
'text': completion,
|
|
||||||
'index': 0,
|
|
||||||
'logprobs': None,
|
|
||||||
'finish_reason': 'stop'
|
|
||||||
}],
|
|
||||||
'usage': {
|
|
||||||
'prompt_tokens': len(prompt),
|
|
||||||
'completion_tokens': len(completion),
|
|
||||||
'total_tokens': len(prompt) + len(completion)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
class StreamingCompletion:
|
|
||||||
message_queue = Queue()
|
|
||||||
stream_completed = False
|
|
||||||
|
|
||||||
def request(model, prompt, results, creative, detailed, codeContext, language) -> None:
|
|
||||||
|
|
||||||
models = {
|
|
||||||
'gpt-4': 'expert',
|
|
||||||
'gpt-3.5-turbo': 'intermediate',
|
|
||||||
'gpt-3.5': 'intermediate',
|
|
||||||
}
|
|
||||||
|
|
||||||
json_data = {
|
|
||||||
'question': prompt,
|
|
||||||
'bingResults': results,
|
|
||||||
'codeContext': codeContext,
|
|
||||||
'options': {
|
|
||||||
'skill': models[model],
|
|
||||||
'date': datetime.now().strftime("%d/%m/%Y"),
|
|
||||||
'language': language,
|
|
||||||
'detailed': detailed,
|
|
||||||
'creative': creative
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
'authority': 'www.phind.com',
|
|
||||||
'accept': '*/*',
|
|
||||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
||||||
'content-type': 'application/json',
|
|
||||||
'cookie': f'cf_clearance={cf_clearance}',
|
|
||||||
'origin': 'https://www.phind.com',
|
|
||||||
'referer': 'https://www.phind.com/search?q=hi&c=&source=searchbox&init=true',
|
|
||||||
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
|
|
||||||
'sec-ch-ua-mobile': '?0',
|
|
||||||
'sec-ch-ua-platform': '"macOS"',
|
|
||||||
'sec-fetch-dest': 'empty',
|
|
||||||
'sec-fetch-mode': 'cors',
|
|
||||||
'sec-fetch-site': 'same-origin',
|
|
||||||
'user-agent': user_agent
|
|
||||||
}
|
|
||||||
|
|
||||||
response = post('https://www.phind.com/api/infer/answer',
|
|
||||||
headers=headers, json=json_data, timeout=99999, impersonate='chrome110',
|
|
||||||
content_callback=StreamingCompletion.handle_stream_response)
|
|
||||||
|
|
||||||
StreamingCompletion.stream_completed = True
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def create(
|
|
||||||
model: str = 'gpt-4',
|
|
||||||
prompt: str = '',
|
|
||||||
results: dict = None,
|
|
||||||
creative: bool = False,
|
|
||||||
detailed: bool = False,
|
|
||||||
codeContext: str = '',
|
|
||||||
language: str = 'en'):
|
|
||||||
|
|
||||||
if user_agent == '':
|
|
||||||
raise ValueError('user_agent must be set, refer to documentation')
|
|
||||||
if cf_clearance == '':
|
|
||||||
raise ValueError('cf_clearance must be set, refer to documentation')
|
|
||||||
|
|
||||||
if results is None:
|
|
||||||
results = Search.create(prompt, actualSearch=True)
|
|
||||||
|
|
||||||
if len(codeContext) > 2999:
|
|
||||||
raise ValueError('codeContext must be less than 3000 characters')
|
|
||||||
|
|
||||||
Thread(target=StreamingCompletion.request, args=[
|
|
||||||
model, prompt, results, creative, detailed, codeContext, language]).start()
|
|
||||||
|
|
||||||
while StreamingCompletion.stream_completed != True or not StreamingCompletion.message_queue.empty():
|
|
||||||
try:
|
|
||||||
chunk = StreamingCompletion.message_queue.get(timeout=0)
|
|
||||||
|
|
||||||
if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
|
|
||||||
chunk = b'data: \n\n\r\n\r\n'
|
|
||||||
|
|
||||||
chunk = chunk.decode()
|
|
||||||
|
|
||||||
chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
|
|
||||||
chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\n\r\n\r\n')
|
|
||||||
chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
|
|
||||||
|
|
||||||
yield PhindResponse({
|
|
||||||
'id': f'cmpl-1337-{int(time())}',
|
|
||||||
'object': 'text_completion',
|
|
||||||
'created': int(time()),
|
|
||||||
'model': model,
|
|
||||||
'choices': [{
|
|
||||||
'text': chunk,
|
|
||||||
'index': 0,
|
|
||||||
'logprobs': None,
|
|
||||||
'finish_reason': 'stop'
|
|
||||||
}],
|
|
||||||
'usage': {
|
|
||||||
'prompt_tokens': len(prompt),
|
|
||||||
'completion_tokens': len(chunk),
|
|
||||||
'total_tokens': len(prompt) + len(chunk)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
except Empty:
|
|
||||||
pass
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def handle_stream_response(response):
|
|
||||||
StreamingCompletion.message_queue.put(response)
|
|
Binary file not shown.
|
@ -1,34 +0,0 @@
|
||||||
import phind
|
|
||||||
|
|
||||||
# set cf_clearance cookie ( not needed at the moment)
|
|
||||||
phind.cf_clearance = 'MDzwnr3ZWk_ap8u.iwwMR5F3WccfOkhUy_zGNDpcF3s-1682497341-0-160'
|
|
||||||
phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
|
|
||||||
|
|
||||||
prompt = 'hello world'
|
|
||||||
|
|
||||||
# normal completion
|
|
||||||
result = phind.Completion.create(
|
|
||||||
model='gpt-4',
|
|
||||||
prompt=prompt,
|
|
||||||
results=phind.Search.create(prompt, actualSearch=False),
|
|
||||||
# create search (set actualSearch to False to disable internet)
|
|
||||||
creative=False,
|
|
||||||
detailed=False,
|
|
||||||
codeContext='') # up to 3000 chars of code
|
|
||||||
|
|
||||||
print(result.completion.choices[0].text)
|
|
||||||
|
|
||||||
prompt = 'who won the quatar world cup'
|
|
||||||
|
|
||||||
# help needed: not getting newlines from the stream, please submit a PR if you know how to fix this
|
|
||||||
# stream completion
|
|
||||||
for result in phind.StreamingCompletion.create(
|
|
||||||
model='gpt-4',
|
|
||||||
prompt=prompt,
|
|
||||||
results=phind.Search.create(prompt, actualSearch=True),
|
|
||||||
# create search (set actualSearch to False to disable internet)
|
|
||||||
creative=False,
|
|
||||||
detailed=False,
|
|
||||||
codeContext=''): # up to 3000 chars of code
|
|
||||||
|
|
||||||
print(result.completion.choices[0].text, end='', flush=True)
|
|
|
@ -1,42 +0,0 @@
|
||||||
# Import necessary libraries
|
|
||||||
from json import loads
|
|
||||||
from os import urandom
|
|
||||||
|
|
||||||
from requests import get
|
|
||||||
|
|
||||||
# Generate a random session ID
|
|
||||||
sessionId = urandom(10).hex()
|
|
||||||
|
|
||||||
# Set up headers for the API request
|
|
||||||
headers = {
|
|
||||||
'Accept': 'text/event-stream',
|
|
||||||
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
'Connection': 'keep-alive',
|
|
||||||
'Pragma': 'no-cache',
|
|
||||||
'Referer': 'http://easy-ai.ink/chat',
|
|
||||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
|
|
||||||
'token': 'null',
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main loop to interact with the AI
|
|
||||||
while True:
|
|
||||||
# Get user input
|
|
||||||
prompt = input('you: ')
|
|
||||||
|
|
||||||
# Set up parameters for the API request
|
|
||||||
params = {
|
|
||||||
'message': prompt,
|
|
||||||
'sessionId': sessionId
|
|
||||||
}
|
|
||||||
|
|
||||||
# Send request to the API and process the response
|
|
||||||
for chunk in get('http://easy-ai.ink/easyapi/v1/chat/completions', params=params,
|
|
||||||
headers=headers, verify=False, stream=True).iter_lines():
|
|
||||||
|
|
||||||
# Check if the chunk contains the 'content' field
|
|
||||||
if b'content' in chunk:
|
|
||||||
# Parse the JSON data and print the content
|
|
||||||
data = loads(chunk.decode('utf-8').split('data:')[1])
|
|
||||||
|
|
||||||
print(data['content'], end='')
|
|
|
@ -57,8 +57,7 @@ class Completion:
|
||||||
r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text
|
r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text
|
||||||
).group()
|
).group()
|
||||||
third_party_search_results = re.search(
|
third_party_search_results = re.search(
|
||||||
r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text
|
r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text).group()
|
||||||
).group()
|
|
||||||
# slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
|
# slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
|
||||||
|
|
||||||
text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text))
|
text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text))
|
||||||
|
@ -69,7 +68,7 @@ class Completion:
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'response': text.replace('\\n', '\n').replace('\\\\', '\\'),
|
'response': text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'),
|
||||||
'links': loads(third_party_search_results)['search']['third_party_search_results']
|
'links': loads(third_party_search_results)['search']['third_party_search_results']
|
||||||
if include_links
|
if include_links
|
||||||
else None,
|
else None,
|
||||||
|
|
Loading…
Reference in a new issue