1
Some checks are pending
build-with-all-capacity / build-and-push-image (push) Waiting to run
build-with-audio-assistant / build-and-push-image (push) Waiting to run
build-with-chatglm / build-and-push-image (push) Waiting to run
build-with-latex-arm / build-and-push-image (push) Waiting to run
build-with-latex / build-and-push-image (push) Waiting to run
build-without-local-llms / build-and-push-image (push) Waiting to run
Some checks are pending
build-with-all-capacity / build-and-push-image (push) Waiting to run
build-with-audio-assistant / build-and-push-image (push) Waiting to run
build-with-chatglm / build-and-push-image (push) Waiting to run
build-with-latex-arm / build-and-push-image (push) Waiting to run
build-with-latex / build-and-push-image (push) Waiting to run
build-without-local-llms / build-and-push-image (push) Waiting to run
This commit is contained in:
71
config_private.py
Normal file
71
config_private.py
Normal file
@@ -0,0 +1,71 @@
|
||||
API_KEY = "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" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
|
||||
USE_PROXY = True
|
||||
if USE_PROXY:
|
||||
proxies = {
|
||||
"http":"socks5h://192.168.8.9:1070", # 再例如 "http": "http://127.0.0.1:7890",
|
||||
"https":"socks5h://192.168.8.9:1070", # 再例如 "https": "http://127.0.0.1:7890",
|
||||
}
|
||||
else:
|
||||
proxies = None
|
||||
DEFAULT_WORKER_NUM = 256
|
||||
|
||||
|
||||
# [step 4]>> 以下配置可以优化体验,但大部分场合下并不需要修改
|
||||
# 对话窗的高度
|
||||
CHATBOT_HEIGHT = 1115
|
||||
|
||||
# 代码高亮
|
||||
CODE_HIGHLIGHT = True
|
||||
|
||||
# 窗口布局
|
||||
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
||||
DARK_MODE = True # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
||||
|
||||
# 发送请求到OpenAI后,等待多久判定为超时
|
||||
TIMEOUT_SECONDS = 20
|
||||
|
||||
# 网页的端口, -1代表随机端口
|
||||
WEB_PORT = 19998
|
||||
|
||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
||||
MAX_RETRY = 3
|
||||
|
||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 同时它必须被包含在AVAIL_LLM_MODELS切换列表中 )
|
||||
LLM_MODEL = "gpt-4" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "newbing-free", "stack-claude"]
|
||||
# P.S. 其他可用的模型还包括 ["newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
|
||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||
|
||||
# 设置gradio的并行线程数(不需要修改)
|
||||
CONCURRENT_COUNT = 100
|
||||
|
||||
# 加一个live2d装饰
|
||||
|
||||
ADD_WAIFU = True
|
||||
|
||||
# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
||||
# [("username", "password"), ("username2", "password2"), ...]
|
||||
AUTHENTICATION = [("van", "L807878712"),("林", "L807878712"),("源", "L807878712"),("欣", "L807878712"),("z", "czh123456789")]
|
||||
# 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!!)
|
||||
# (高危设置!通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
||||
# 格式 {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
||||
# 例如 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://ai.open.com/api/conversation"}
|
||||
API_URL_REDIRECT = {}
|
||||
|
||||
# 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!)
|
||||
CUSTOM_PATH = "/"
|
||||
|
||||
# 如果需要使用newbing,把newbing的长长的cookie放到这里
|
||||
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
||||
# 从现在起,如果您调用"newbing-free"模型,则无需填写NEWBING_COOKIES
|
||||
NEWBING_COOKIES = """
|
||||
your bing cookies here
|
||||
"""
|
||||
|
||||
# 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md
|
||||
SLACK_CLAUDE_BOT_ID = ''
|
||||
SLACK_CLAUDE_USER_TOKEN = ''
|
||||
|
||||
|
||||
11
start.sh
Normal file
11
start.sh
Normal file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
GPT_COMMAND="/home/van/.env/python3.12-venv/bin/python main.py"
|
||||
LOG_FILE="/home/van/project/gpt/gpt.log"
|
||||
GPT_PROCESS=$(ps aux | grep "$/home/van/.env/python3.12-venv/bin/python main.py" | grep -v grep)
|
||||
if [ -n "$GPT_PROCESS" ]; then
|
||||
echo "gpt is running..."
|
||||
else
|
||||
cd /home/van/project/gpt/
|
||||
$GPT_COMMAND > "$LOG_FILE" 2>&1 &
|
||||
echo "gpt start successfully. Log file: $LOG_FILE"
|
||||
fi
|
||||
Reference in New Issue
Block a user