Merge branch 'xtekky:main' into main
This commit is contained in:
commit
b595351296
5 changed files with 126 additions and 5 deletions
37
.github/workflows/ci.yml
vendored
Normal file
37
.github/workflows/ci.yml
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
name: Build and push `gpt4free` docker image
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up qemu
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
|
|
||||||
|
- name: Set up docker buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
|
- name: Login to docker hub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKER_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
|
|
||||||
|
- name: Build and push docker image
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
push: ${{ github.ref == 'refs/heads/main' }}
|
||||||
|
tags: |
|
||||||
|
${{ secrets.DOCKER_USERNAME }}/gpt4free:latest
|
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
This code provides a Graphical User Interface (GUI) for gpt4free. Users can ask questions and get answers from GPT-4 API's, utilizing multiple API implementations. The project contains two different Streamlit applications: `streamlit_app.py` and `streamlit_chat_app.py`.
|
This code provides a Graphical User Interface (GUI) for gpt4free. Users can ask questions and get answers from GPT-4 API's, utilizing multiple API implementations. The project contains two different Streamlit applications: `streamlit_app.py` and `streamlit_chat_app.py`.
|
||||||
|
|
||||||
|
In addition, a new GUI script specifically implemented using PyWebIO has been added and can be found in the pywebio-gui folder. If there are errors with the Streamlit version, you can try using the PyWebIO version instead
|
||||||
|
|
||||||
Installation
|
Installation
|
||||||
------------
|
------------
|
||||||
|
|
||||||
|
|
24
gui/pywebio-gui/README.md
Normal file
24
gui/pywebio-gui/README.md
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
# GUI with PyWebIO
|
||||||
|
Simple, fast, and with fewer errors
|
||||||
|
Only requires
|
||||||
|
```bash
|
||||||
|
pip install gpt4free
|
||||||
|
pip install pywebio
|
||||||
|
```
|
||||||
|
clicking on 'pywebio-usesless.py' will run it
|
||||||
|
|
||||||
|
PS: Currently, only 'usesless' is implemented, and the GUI is expected to be updated infrequently, with a focus on stability.
|
||||||
|
|
||||||
|
↓ Here is the introduction in zh-Hans-CN below.
|
||||||
|
|
||||||
|
# 使用pywebio实现的极简GUI
|
||||||
|
简单,快捷,报错少
|
||||||
|
只需要
|
||||||
|
```bash
|
||||||
|
pip install gpt4free
|
||||||
|
pip install pywebio
|
||||||
|
```
|
||||||
|
|
||||||
|
双击pywebio-usesless.py即可运行
|
||||||
|
|
||||||
|
ps:目前仅实现usesless,这个gui更新频率应该会比较少,目的是追求稳定
|
59
gui/pywebio-gui/pywebio-usesless.py
Normal file
59
gui/pywebio-gui/pywebio-usesless.py
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
from gpt4free import usesless
|
||||||
|
import time
|
||||||
|
from pywebio import start_server,config
|
||||||
|
from pywebio.input import *
|
||||||
|
from pywebio.output import *
|
||||||
|
from pywebio.session import local
|
||||||
|
message_id = ""
|
||||||
|
def status():
|
||||||
|
try:
|
||||||
|
req = usesless.Completion.create(prompt="hello", parentMessageId=message_id)
|
||||||
|
print(f"Answer: {req['text']}")
|
||||||
|
put_success(f"Answer: {req['text']}",scope="body")
|
||||||
|
except:
|
||||||
|
put_error("Program Error",scope="body")
|
||||||
|
|
||||||
|
def ask(prompt):
|
||||||
|
req = usesless.Completion.create(prompt=prompt, parentMessageId=local.message_id)
|
||||||
|
rp=req['text']
|
||||||
|
local.message_id=req["id"]
|
||||||
|
print("AI:\n"+rp)
|
||||||
|
local.conversation.extend([
|
||||||
|
{"role": "user", "content": prompt},
|
||||||
|
{"role": "assistant", "content": rp}
|
||||||
|
])
|
||||||
|
print(local.conversation)
|
||||||
|
return rp
|
||||||
|
|
||||||
|
def msg():
|
||||||
|
while True:
|
||||||
|
text= input_group("You:",[textarea('You:',name='text',rows=3, placeholder='请输入问题')])
|
||||||
|
if not(bool(text)):
|
||||||
|
break
|
||||||
|
if not(bool(text["text"])):
|
||||||
|
continue
|
||||||
|
time.sleep(0.5)
|
||||||
|
put_code("You:"+text["text"],scope="body")
|
||||||
|
print("Question:"+text["text"])
|
||||||
|
with use_scope('foot'):
|
||||||
|
put_loading(color="info")
|
||||||
|
rp= ask(text["text"])
|
||||||
|
clear(scope="foot")
|
||||||
|
time.sleep(0.5)
|
||||||
|
put_markdown("Bot:\n"+rp,scope="body")
|
||||||
|
time.sleep(0.7)
|
||||||
|
|
||||||
|
@config(title="AIchat",theme="dark")
|
||||||
|
def main():
|
||||||
|
put_scope("heads")
|
||||||
|
with use_scope('heads'):
|
||||||
|
put_html("<h1><center>AI Chat</center></h1>")
|
||||||
|
put_scope("body")
|
||||||
|
put_scope("foot")
|
||||||
|
status()
|
||||||
|
local.conversation=[]
|
||||||
|
local.message_id=""
|
||||||
|
msg()
|
||||||
|
|
||||||
|
print("Click link to chat page")
|
||||||
|
start_server(main, port=8099,allowed_origins="*",auto_open_webbrowser=True,debug=True)
|
|
@ -78,7 +78,6 @@ user_input = input_placeholder.text_input(
|
||||||
)
|
)
|
||||||
submit_button = st.button("Submit")
|
submit_button = st.button("Submit")
|
||||||
|
|
||||||
|
|
||||||
if (user_input and user_input != st.session_state['input_text']) or submit_button:
|
if (user_input and user_input != st.session_state['input_text']) or submit_button:
|
||||||
output = query(user_input, st.session_state['query_method'])
|
output = query(user_input, st.session_state['query_method'])
|
||||||
|
|
||||||
|
@ -88,6 +87,7 @@ if (user_input and user_input != st.session_state['input_text']) or submit_butto
|
||||||
st.session_state.current_conversation['generated_responses'].append(escaped_output)
|
st.session_state.current_conversation['generated_responses'].append(escaped_output)
|
||||||
save_conversations(st.session_state.conversations, st.session_state.current_conversation)
|
save_conversations(st.session_state.conversations, st.session_state.current_conversation)
|
||||||
st.session_state['input_text'] = ''
|
st.session_state['input_text'] = ''
|
||||||
|
st.session_state['input_field_key'] += 1 # Increment key value for new widget
|
||||||
user_input = input_placeholder.text_input(
|
user_input = input_placeholder.text_input(
|
||||||
'You:', value=st.session_state['input_text'], key=f'input_text_{st.session_state["input_field_key"]}'
|
'You:', value=st.session_state['input_text'], key=f'input_text_{st.session_state["input_field_key"]}'
|
||||||
) # Clear the input field
|
) # Clear the input field
|
||||||
|
@ -96,8 +96,7 @@ if (user_input and user_input != st.session_state['input_text']) or submit_butto
|
||||||
if st.sidebar.button("New Conversation"):
|
if st.sidebar.button("New Conversation"):
|
||||||
st.session_state['selected_conversation'] = None
|
st.session_state['selected_conversation'] = None
|
||||||
st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
|
st.session_state['current_conversation'] = {'user_inputs': [], 'generated_responses': []}
|
||||||
st.session_state['input_field_key'] += 1
|
st.session_state['input_field_key'] += 1 # Increment key value for new widget
|
||||||
|
|
||||||
st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)
|
st.session_state['query_method'] = st.sidebar.selectbox("Select API:", options=avail_query_methods, index=0)
|
||||||
|
|
||||||
# Proxy
|
# Proxy
|
||||||
|
|
Loading…
Reference in a new issue