First implementation of streamlit chat app in gui folder
This commit is contained in:
parent
25428d58d5
commit
952f7dbee9
8 changed files with 334 additions and 3 deletions
9
.gitignore
vendored
9
.gitignore
vendored
|
@ -7,6 +7,15 @@
|
||||||
/dataSources/
|
/dataSources/
|
||||||
/dataSources.local.xml
|
/dataSources.local.xml
|
||||||
|
|
||||||
|
# Ignore local python virtual environment
|
||||||
|
venv/
|
||||||
|
|
||||||
|
# Ignore streamlit_chat_app.py conversations pickle
|
||||||
|
conversations.pkl
|
||||||
|
|
||||||
|
# Ignore accounts created by api's
|
||||||
|
accounts.txt
|
||||||
|
|
||||||
.idea/
|
.idea/
|
||||||
|
|
||||||
*/__pycache__/
|
*/__pycache__/
|
||||||
|
|
|
@ -1,11 +1,72 @@
|
||||||
# gpt4free gui
|
# gpt4free gui
|
||||||
|
|
||||||
mode `streamlit_app.py` into base folder to run
|
This code provides a Graphical User Interface (GUI) for gpt4free. Users can ask questions and get answers from GPT-4 API's, utilizing multiple API implementations. The project contains two different Streamlit applications: `streamlit_app.py` and `streamlit_chat_app.py`.
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
1. Clone the repository.
|
||||||
|
2. Install the required dependencies with: `pip install -r requirements.txt`.
|
||||||
|
3. To use `streamlit_chat_app.py`, note that it depends on a pull request (PR #24) from the https://github.com/AI-Yash/st-chat/ repository, which may change in the future. The current dependency library can be found at https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
Choose one of the Streamlit applications to run:
|
||||||
|
|
||||||
|
### streamlit\_app.py
|
||||||
|
|
||||||
|
This application provides a simple interface for asking GPT-4 questions and receiving answers.
|
||||||
|
|
||||||
|
To run the application:
|
||||||
|
|
||||||
|
run:
|
||||||
|
```arduino
|
||||||
|
streamlit run gui/streamlit_app.py
|
||||||
|
```
|
||||||
|
<br>
|
||||||
|
|
||||||
|
<img width="724" alt="image" src="https://user-images.githubusercontent.com/98614666/234232449-0d5cd092-a29d-4759-8197-e00ba712cb1a.png">
|
||||||
|
|
||||||
|
<br>
|
||||||
|
<br>
|
||||||
|
|
||||||
preview:
|
preview:
|
||||||
|
|
||||||
<img width="1125" alt="image" src="https://user-images.githubusercontent.com/98614666/234232398-09e9d3c5-08e6-4b8a-b4f2-0666e9790c7d.png">
|
<img width="1125" alt="image" src="https://user-images.githubusercontent.com/98614666/234232398-09e9d3c5-08e6-4b8a-b4f2-0666e9790c7d.png">
|
||||||
|
|
||||||
|
|
||||||
run:
|
### streamlit\_chat\_app.py
|
||||||
<img width="724" alt="image" src="https://user-images.githubusercontent.com/98614666/234232449-0d5cd092-a29d-4759-8197-e00ba712cb1a.png">
|
|
||||||
|
This application provides a chat-like interface for asking GPT-4 questions and receiving answers. It supports multiple query methods, and users can select the desired API for their queries. The application also maintains a conversation history.
|
||||||
|
|
||||||
|
To run the application:
|
||||||
|
|
||||||
|
```arduino
|
||||||
|
streamlit run streamlit_chat_app.py
|
||||||
|
```
|
||||||
|
|
||||||
|
<br>
|
||||||
|
|
||||||
|
<img width="724" alt="image" src="image1.png">
|
||||||
|
|
||||||
|
<br>
|
||||||
|
<br>
|
||||||
|
|
||||||
|
preview:
|
||||||
|
|
||||||
|
<img width="1125" alt="image" src="image2.png">
|
||||||
|
|
||||||
|
Contributing
|
||||||
|
------------
|
||||||
|
|
||||||
|
Feel free to submit pull requests, report bugs, or request new features by opening issues on the GitHub repository.
|
||||||
|
|
||||||
|
Bug
|
||||||
|
----
|
||||||
|
There is a bug in `streamlit_chat_app.py` right now that I haven't pinpointed yet, probably is really simple but havent had the time to look for it. Whenever you open a new conversation or access an old conversation it will only start prompt-answering after the second time you input to the text input, other than that, everything else seems to work accordingly.
|
||||||
|
|
||||||
|
License
|
||||||
|
-------
|
||||||
|
|
||||||
|
This project is licensed under the MIT License.
|
0
gui/__init__.py
Normal file
0
gui/__init__.py
Normal file
BIN
gui/image1.png
Normal file
BIN
gui/image1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 168 KiB |
BIN
gui/image2.png
Normal file
BIN
gui/image2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 336 KiB |
163
gui/query_methods.py
Normal file
163
gui/query_methods.py
Normal file
|
@ -0,0 +1,163 @@
|
||||||
|
import forefront, quora, theb, you
|
||||||
|
import random
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def query_forefront(question: str) -> str:
|
||||||
|
# create an account
|
||||||
|
token = forefront.Account.create(logging=True)
|
||||||
|
|
||||||
|
# get a response
|
||||||
|
try:
|
||||||
|
result = forefront.StreamingCompletion.create(token = token, prompt = 'hello world', model='gpt-4')
|
||||||
|
|
||||||
|
return result['response']
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# Return error message if an exception occurs
|
||||||
|
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
|
||||||
|
|
||||||
|
|
||||||
|
def query_quora(question: str) -> str:
|
||||||
|
token = quora.Account.create(logging=False, enable_bot_creation=True)
|
||||||
|
response = quora.Completion.create(
|
||||||
|
model='gpt-4',
|
||||||
|
prompt=question,
|
||||||
|
token=token
|
||||||
|
)
|
||||||
|
|
||||||
|
return response.completion.choices[0].tex
|
||||||
|
|
||||||
|
|
||||||
|
def query_theb(question: str) -> str:
|
||||||
|
# Set cloudflare clearance cookie and get answer from GPT-4 model
|
||||||
|
try:
|
||||||
|
result = theb.Completion.create(
|
||||||
|
prompt = question)
|
||||||
|
|
||||||
|
return result['response']
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# Return error message if an exception occurs
|
||||||
|
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
|
||||||
|
|
||||||
|
|
||||||
|
def query_you(question: str) -> str:
|
||||||
|
# Set cloudflare clearance cookie and get answer from GPT-4 model
|
||||||
|
try:
|
||||||
|
result = you.Completion.create(
|
||||||
|
prompt = question)
|
||||||
|
|
||||||
|
return result['response']
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# Return error message if an exception occurs
|
||||||
|
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
|
||||||
|
|
||||||
|
# Define a dictionary containing all query methods
|
||||||
|
avail_query_methods = {
|
||||||
|
"Forefront": query_forefront,
|
||||||
|
"Quora": query_quora,
|
||||||
|
"Theb": query_theb,
|
||||||
|
"You": query_you,
|
||||||
|
# "Writesonic": query_writesonic,
|
||||||
|
# "T3nsor": query_t3nsor,
|
||||||
|
# "Phind": query_phind,
|
||||||
|
# "Ora": query_ora,
|
||||||
|
}
|
||||||
|
|
||||||
|
def query(user_input: str, selected_method: str = "Random") -> str:
|
||||||
|
|
||||||
|
# If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
|
||||||
|
if selected_method != "Random" and selected_method in avail_query_methods:
|
||||||
|
try:
|
||||||
|
return avail_query_methods[selected_method](user_input)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error with {selected_method}: {e}")
|
||||||
|
return "😵 Sorry, some error occurred please try again."
|
||||||
|
|
||||||
|
# Initialize variables for determining success and storing the result
|
||||||
|
success = False
|
||||||
|
result = "😵 Sorry, some error occurred please try again."
|
||||||
|
# Create a list of available query methods
|
||||||
|
query_methods_list = list(avail_query_methods.values())
|
||||||
|
|
||||||
|
# Continue trying different methods until a successful result is obtained or all methods have been tried
|
||||||
|
while not success and query_methods_list:
|
||||||
|
# Choose a random method from the list
|
||||||
|
chosen_query = random.choice(query_methods_list)
|
||||||
|
# Find the name of the chosen method
|
||||||
|
chosen_query_name = [k for k, v in avail_query_methods.items() if v == chosen_query][0]
|
||||||
|
try:
|
||||||
|
# Try to call the chosen method with the user input
|
||||||
|
result = chosen_query(user_input)
|
||||||
|
success = True
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error with {chosen_query_name}: {e}")
|
||||||
|
# Remove the failed method from the list of available methods
|
||||||
|
query_methods_list.remove(chosen_query)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['query', 'avail_query_methods']
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# def query_ora(question:str)->str:
|
||||||
|
# result =""
|
||||||
|
# try:
|
||||||
|
# gpt4_chatbot_ids = ['b8b12eaa-5d47-44d3-92a6-4d706f2bcacf', 'fbe53266-673c-4b70-9d2d-d247785ccd91', 'bd5781cf-727a-45e9-80fd-a3cfce1350c6', '993a0102-d397-47f6-98c3-2587f2c9ec3a', 'ae5c524e-d025-478b-ad46-8843a5745261', 'cc510743-e4ab-485e-9191-76960ecb6040', 'a5cd2481-8e24-4938-aa25-8e26d6233390', '6bca5930-2aa1-4bf4-96a7-bea4d32dcdac', '884a5f2b-47a2-47a5-9e0f-851bbe76b57c', 'd5f3c491-0e74-4ef7-bdca-b7d27c59e6b3', 'd72e83f6-ef4e-4702-844f-cf4bd432eef7', '6e80b170-11ed-4f1a-b992-fd04d7a9e78c', '8ef52d68-1b01-466f-bfbf-f25c13ff4a72', 'd0674e11-f22e-406b-98bc-c1ba8564f749', 'a051381d-6530-463f-be68-020afddf6a8f', '99c0afa1-9e32-4566-8909-f4ef9ac06226', '1be65282-9c59-4a96-99f8-d225059d9001', 'dba16bd8-5785-4248-a8e9-b5d1ecbfdd60', '1731450d-3226-42d0-b41c-4129fe009524', '8e74635d-000e-4819-ab2c-4e986b7a0f48', 'afe7ed01-c1ac-4129-9c71-2ca7f3800b30', 'e374c37a-8c44-4f0e-9e9f-1ad4609f24f5']
|
||||||
|
# chatbot_id = random.choice(gpt4_chatbot_ids)
|
||||||
|
# model = ora.CompletionModel.load(chatbot_id, 'gpt-4')
|
||||||
|
# response = ora.Completion.create(model, question)
|
||||||
|
# result = response.completion.choices[0].text
|
||||||
|
# except Exception as e:
|
||||||
|
# print(f"Error : {e}")
|
||||||
|
# result = "😵 Sorry, some error occurred please try again."
|
||||||
|
# return result
|
||||||
|
|
||||||
|
|
||||||
|
# def query_writesonic(question:str)->str:
|
||||||
|
# account = writesonic.Account.create(logging = False)
|
||||||
|
# response = writesonic.Completion.create(
|
||||||
|
# api_key = account.key,
|
||||||
|
# prompt = question,
|
||||||
|
# )
|
||||||
|
|
||||||
|
# return response.completion.choices[0].text
|
||||||
|
|
||||||
|
|
||||||
|
# def query_t3nsor(question: str) -> str:
|
||||||
|
# messages = []
|
||||||
|
|
||||||
|
# user = question
|
||||||
|
|
||||||
|
# t3nsor_cmpl = t3nsor.Completion.create(
|
||||||
|
# prompt=user,
|
||||||
|
# messages=messages
|
||||||
|
# )
|
||||||
|
|
||||||
|
# messages.extend([
|
||||||
|
# {'role': 'user', 'content': user},
|
||||||
|
# {'role': 'assistant', 'content': t3nsor_cmpl.completion.choices[0].text}
|
||||||
|
# ])
|
||||||
|
|
||||||
|
# return t3nsor_cmpl.completion.choices[0].text
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# def query_phind(question:str)->str:
|
||||||
|
# phind.cf_clearance = 'KvXc1rh.TFQG1rNF0eMlcpJbsdmJkYgvmqS42OOfqUk-1682393898-0-160'
|
||||||
|
# # phind.cf_clearance = 'heguhSRBB9d0sjLvGbQECS8b80m2BQ31xEmk9ChshKI-1682268995-0-160'
|
||||||
|
# # phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
|
||||||
|
# phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4.1 Safari/605.1.15'
|
||||||
|
# result = phind.Completion.create(
|
||||||
|
# model = 'gpt-4',
|
||||||
|
# prompt = question,
|
||||||
|
# results = phind.Search.create(question, actualSearch = False),
|
||||||
|
# creative = False,
|
||||||
|
# detailed = False,
|
||||||
|
# codeContext = '')
|
||||||
|
# # print(result.completion.choices[0].text)
|
||||||
|
# return result.completion.choices[0].text
|
97
gui/streamlit_chat_app.py
Normal file
97
gui/streamlit_chat_app.py
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
|
||||||
|
|
||||||
|
import streamlit as st
|
||||||
|
from streamlit_chat import message
|
||||||
|
from query_methods import query, avail_query_methods
|
||||||
|
import pickle
|
||||||
|
|
||||||
|
|
||||||
|
conversations_file = "conversations.pkl"
|
||||||
|
|
||||||
|
def load_conversations():
|
||||||
|
try:
|
||||||
|
with open(conversations_file, "rb") as f:
|
||||||
|
return pickle.load(f)
|
||||||
|
except FileNotFoundError:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def save_conversations(conversations, current_conversation):
|
||||||
|
updated = False
|
||||||
|
for i, conversation in enumerate(conversations):
|
||||||
|
if conversation == current_conversation:
|
||||||
|
conversations[i] = current_conversation
|
||||||
|
updated = True
|
||||||
|
break
|
||||||
|
if not updated:
|
||||||
|
conversations.append(current_conversation)
|
||||||
|
with open(conversations_file, "wb") as f:
|
||||||
|
pickle.dump(conversations, f)
|
||||||
|
|
||||||
|
st.header("Chat Placeholder")
|
||||||
|
|
||||||
|
if 'conversations' not in st.session_state:
|
||||||
|
st.session_state['conversations'] = load_conversations()
|
||||||
|
|
||||||
|
if 'input_text' not in st.session_state:
|
||||||
|
st.session_state['input_text'] = ''
|
||||||
|
|
||||||
|
if 'selected_conversation' not in st.session_state:
|
||||||
|
st.session_state['selected_conversation'] = None
|
||||||
|
|
||||||
|
if 'input_field_key' not in st.session_state:
|
||||||
|
st.session_state['input_field_key'] = 0
|
||||||
|
|
||||||
|
if 'query_method' not in st.session_state:
|
||||||
|
st.session_state['query_method'] = query
|
||||||
|
|
||||||
|
# Initialize new conversation
|
||||||
|
if 'current_conversation' not in st.session_state or st.session_state['current_conversation'] is None:
|
||||||
|
st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
|
||||||
|
|
||||||
|
|
||||||
|
input_placeholder = st.empty()
|
||||||
|
user_input = input_placeholder.text_input('You:', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}')
|
||||||
|
submit_button = st.button("Submit")
|
||||||
|
|
||||||
|
if user_input or submit_button:
|
||||||
|
output = query(user_input, st.session_state['query_method'])
|
||||||
|
|
||||||
|
st.session_state.current_conversation['user_inputs'].append(user_input)
|
||||||
|
st.session_state.current_conversation['generated_responses'].append(output)
|
||||||
|
save_conversations(st.session_state.conversations, st.session_state.current_conversation)
|
||||||
|
user_input = input_placeholder.text_input('You:', value='', key=f'input_text_{len(st.session_state["current_conversation"]["user_inputs"])}') # Clear the input field
|
||||||
|
|
||||||
|
|
||||||
|
# Add a button to create a new conversation
|
||||||
|
if st.sidebar.button("New Conversation"):
|
||||||
|
st.session_state['selected_conversation'] = None
|
||||||
|
st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
|
||||||
|
st.session_state['input_field_key'] += 1
|
||||||
|
|
||||||
|
|
||||||
|
st.session_state['query_method'] = st.sidebar.selectbox(
|
||||||
|
"Select API:",
|
||||||
|
options=avail_query_methods.keys(),
|
||||||
|
index=0
|
||||||
|
)
|
||||||
|
|
||||||
|
# Sidebar
|
||||||
|
st.sidebar.header("Conversation History")
|
||||||
|
|
||||||
|
for i, conversation in enumerate(st.session_state.conversations):
|
||||||
|
if st.sidebar.button(f"Conversation {i + 1}: {conversation['user_inputs'][0]}", key=f"sidebar_btn_{i}"):
|
||||||
|
st.session_state['selected_conversation'] = i
|
||||||
|
st.session_state['current_conversation'] = st.session_state.conversations[i]
|
||||||
|
|
||||||
|
if st.session_state['selected_conversation'] is not None:
|
||||||
|
conversation_to_display = st.session_state.conversations[st.session_state['selected_conversation']]
|
||||||
|
else:
|
||||||
|
conversation_to_display = st.session_state.current_conversation
|
||||||
|
|
||||||
|
if conversation_to_display['generated_responses']:
|
||||||
|
for i in range(len(conversation_to_display['generated_responses']) - 1, -1, -1):
|
||||||
|
message(conversation_to_display["generated_responses"][i], key=f"display_generated_{i}")
|
||||||
|
message(conversation_to_display['user_inputs'][i], is_user=True, key=f"display_user_{i}")
|
|
@ -9,3 +9,4 @@ streamlit==1.21.0
|
||||||
selenium
|
selenium
|
||||||
fake-useragent
|
fake-useragent
|
||||||
twocaptcha
|
twocaptcha
|
||||||
|
https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip
|
||||||
|
|
Loading…
Reference in a new issue