Merge remote-tracking branch 'github/master'
Some checks failed
build-with-all-capacity / build-and-push-image (push) Has been cancelled
build-with-audio-assistant / build-and-push-image (push) Has been cancelled
build-with-chatglm / build-and-push-image (push) Has been cancelled
build-with-latex-arm / build-and-push-image (push) Has been cancelled
build-with-latex / build-and-push-image (push) Has been cancelled
build-without-local-llms / build-and-push-image (push) Has been cancelled
Some checks failed
build-with-all-capacity / build-and-push-image (push) Has been cancelled
build-with-audio-assistant / build-and-push-image (push) Has been cancelled
build-with-chatglm / build-and-push-image (push) Has been cancelled
build-with-latex-arm / build-and-push-image (push) Has been cancelled
build-with-latex / build-and-push-image (push) Has been cancelled
build-without-local-llms / build-and-push-image (push) Has been cancelled
# Conflicts: # config.py
This commit is contained in:
@@ -14,7 +14,7 @@ API_KEY = "在此处填写APIKEY" # 可同时填写多个API-KEY,用英文
|
||||
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY
|
||||
|
||||
# [step 1-3]>> ( 接入 deepseek-reasoner, 即 deepseek-r1 ) 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions"
|
||||
DEEPSEEK_API_KEY = "sk-d99b8cc6b7414cc88a5d950a3ff7585e"
|
||||
DEEPSEEK_API_KEY = ""
|
||||
|
||||
# [step 2]>> 改为True应用代理。如果使用本地或无地域限制的大模型时,此处不修改;如果直接在海外服务器部署,此处不修改
|
||||
USE_PROXY = False
|
||||
@@ -81,7 +81,7 @@ API_URL_REDIRECT = {}
|
||||
|
||||
# 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次
|
||||
# 一言以蔽之:免费(5刀)用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview
|
||||
DEFAULT_WORKER_NUM = 3
|
||||
DEFAULT_WORKER_NUM = 8
|
||||
|
||||
|
||||
# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
||||
@@ -103,6 +103,7 @@ AVAIL_FONTS = [
|
||||
"华文中宋(STZhongsong)",
|
||||
"华文新魏(STXinwei)",
|
||||
"华文隶书(STLiti)",
|
||||
# 备注:以下字体需要网络支持,您可以自定义任意您喜欢的字体,如下所示,需要满足的格式为 "字体昵称(字体英文真名@字体css下载链接)"
|
||||
"思源宋体(Source Han Serif CN VF@https://chinese-fonts-cdn.deno.dev/packages/syst/dist/SourceHanSerifCN/result.css)",
|
||||
"月星楷(Moon Stars Kai HW@https://chinese-fonts-cdn.deno.dev/packages/moon-stars-kai/dist/MoonStarsKaiHW-Regular/result.css)",
|
||||
"珠圆体(MaokenZhuyuanTi@https://chinese-fonts-cdn.deno.dev/packages/mkzyt/dist/猫啃珠圆体/result.css)",
|
||||
|
||||
4
main.py
4
main.py
@@ -54,7 +54,7 @@ def main():
|
||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||
from check_proxy import get_current_version
|
||||
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_show_or_hide, js_code_show_or_hide_group2
|
||||
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_show_or_hide
|
||||
from themes.theme import js_code_for_toggle_darkmode
|
||||
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, assign_user_uuid
|
||||
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
||||
@@ -207,7 +207,7 @@ def main():
|
||||
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
|
||||
return ret
|
||||
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
|
||||
checkboxes_2.select(None, [checkboxes_2], None, _js=js_code_show_or_hide_group2)
|
||||
checkboxes_2.select(None, [checkboxes_2], None, _js="""apply_checkbox_change_for_group2""")
|
||||
|
||||
# 整理反复出现的控件句柄组合
|
||||
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
||||
|
||||
@@ -1072,7 +1072,7 @@ if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容
|
||||
})
|
||||
except:
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 幻方-深度求索大模型 -=-=-=-=-=-=-
|
||||
# -=-=-=-=-=-=- 幻方-深度求索本地大模型 -=-=-=-=-=-=-
|
||||
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||
try:
|
||||
from .bridge_deepseekcoder import predict_no_ui_long_connection as deepseekcoder_noui
|
||||
|
||||
@@ -6,7 +6,6 @@ from toolbox import get_conf
|
||||
from request_llms.local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
from threading import Thread
|
||||
from loguru import logger
|
||||
import torch
|
||||
import os
|
||||
|
||||
def download_huggingface_model(model_name, max_retry, local_dir):
|
||||
@@ -29,6 +28,7 @@ class GetCoderLMHandle(LocalLLMHandle):
|
||||
self.cmd_to_install = cmd_to_install
|
||||
|
||||
def load_model_and_tokenizer(self):
|
||||
import torch
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
with ProxyNetworkActivate('Download_LLM'):
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
||||
|
||||
@@ -512,7 +512,7 @@ def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:st
|
||||
model, _ = read_one_api_model_name(model)
|
||||
if llm_kwargs['llm_model'].startswith('openrouter-'):
|
||||
model = llm_kwargs['llm_model'][len('openrouter-'):]
|
||||
model= read_one_api_model_name(model)
|
||||
model, _= read_one_api_model_name(model)
|
||||
if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制
|
||||
model = random.choice([
|
||||
"gpt-3.5-turbo",
|
||||
|
||||
@@ -2,15 +2,9 @@ import json
|
||||
import time
|
||||
import traceback
|
||||
import requests
|
||||
from loguru import logger
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
from toolbox import (
|
||||
get_conf,
|
||||
update_ui,
|
||||
is_the_upload_folder,
|
||||
)
|
||||
from loguru import logger
|
||||
from toolbox import get_conf, is_the_upload_folder, update_ui, update_ui_lastest_msg
|
||||
|
||||
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf(
|
||||
"proxies", "TIMEOUT_SECONDS", "MAX_RETRY"
|
||||
@@ -39,27 +33,35 @@ def decode_chunk(chunk):
|
||||
用于解读"content"和"finish_reason"的内容(如果支持思维链也会返回"reasoning_content"内容)
|
||||
"""
|
||||
chunk = chunk.decode()
|
||||
respose = ""
|
||||
response = ""
|
||||
reasoning_content = ""
|
||||
finish_reason = "False"
|
||||
|
||||
# 考虑返回类型是 text/json 和 text/event-stream 两种
|
||||
if chunk.startswith("data: "):
|
||||
chunk = chunk[6:]
|
||||
else:
|
||||
chunk = chunk
|
||||
|
||||
try:
|
||||
chunk = json.loads(chunk[6:])
|
||||
chunk = json.loads(chunk)
|
||||
except:
|
||||
respose = ""
|
||||
response = ""
|
||||
finish_reason = chunk
|
||||
|
||||
# 错误处理部分
|
||||
if "error" in chunk:
|
||||
respose = "API_ERROR"
|
||||
response = "API_ERROR"
|
||||
try:
|
||||
chunk = json.loads(chunk)
|
||||
finish_reason = chunk["error"]["code"]
|
||||
except:
|
||||
finish_reason = "API_ERROR"
|
||||
return respose, finish_reason
|
||||
return response, reasoning_content, finish_reason
|
||||
|
||||
try:
|
||||
if chunk["choices"][0]["delta"]["content"] is not None:
|
||||
respose = chunk["choices"][0]["delta"]["content"]
|
||||
response = chunk["choices"][0]["delta"]["content"]
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
@@ -71,7 +73,7 @@ def decode_chunk(chunk):
|
||||
finish_reason = chunk["choices"][0]["finish_reason"]
|
||||
except:
|
||||
pass
|
||||
return respose, reasoning_content, finish_reason
|
||||
return response, reasoning_content, finish_reason, str(chunk)
|
||||
|
||||
|
||||
def generate_message(input, model, key, history, max_output_token, system_prompt, temperature):
|
||||
@@ -106,7 +108,7 @@ def generate_message(input, model, key, history, max_output_token, system_prompt
|
||||
what_i_ask_now["role"] = "user"
|
||||
what_i_ask_now["content"] = input
|
||||
messages.append(what_i_ask_now)
|
||||
playload = {
|
||||
payload = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": temperature,
|
||||
@@ -114,7 +116,7 @@ def generate_message(input, model, key, history, max_output_token, system_prompt
|
||||
"max_tokens": max_output_token,
|
||||
}
|
||||
|
||||
return headers, playload
|
||||
return headers, payload
|
||||
|
||||
|
||||
def get_predict_function(
|
||||
@@ -141,7 +143,7 @@ def get_predict_function(
|
||||
history=[],
|
||||
sys_prompt="",
|
||||
observe_window=None,
|
||||
console_slience=False,
|
||||
console_silence=False,
|
||||
):
|
||||
"""
|
||||
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||
@@ -157,12 +159,12 @@ def get_predict_function(
|
||||
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||
"""
|
||||
from .bridge_all import model_info
|
||||
watch_dog_patience = 5 # 看门狗的耐心,设置5秒不准咬人(咬的也不是人
|
||||
watch_dog_patience = 5 # 看门狗的耐心,设置5秒不准咬人 (咬的也不是人)
|
||||
if len(APIKEY) == 0:
|
||||
raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}")
|
||||
if inputs == "":
|
||||
inputs = "你好👋"
|
||||
headers, playload = generate_message(
|
||||
headers, payload = generate_message(
|
||||
input=inputs,
|
||||
model=llm_kwargs["llm_model"],
|
||||
key=APIKEY,
|
||||
@@ -182,7 +184,7 @@ def get_predict_function(
|
||||
endpoint,
|
||||
headers=headers,
|
||||
proxies=None if disable_proxy else proxies,
|
||||
json=playload,
|
||||
json=payload,
|
||||
stream=True,
|
||||
timeout=TIMEOUT_SECONDS,
|
||||
)
|
||||
@@ -198,7 +200,7 @@ def get_predict_function(
|
||||
result = ""
|
||||
finish_reason = ""
|
||||
if reasoning:
|
||||
resoning_buffer = ""
|
||||
reasoning_buffer = ""
|
||||
|
||||
stream_response = response.iter_lines()
|
||||
while True:
|
||||
@@ -210,7 +212,7 @@ def get_predict_function(
|
||||
break
|
||||
except requests.exceptions.ConnectionError:
|
||||
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||
response_text, reasoning_content, finish_reason = decode_chunk(chunk)
|
||||
response_text, reasoning_content, finish_reason, decoded_chunk = decode_chunk(chunk)
|
||||
# 返回的数据流第一次为空,继续等待
|
||||
if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False":
|
||||
continue
|
||||
@@ -226,12 +228,12 @@ def get_predict_function(
|
||||
if chunk:
|
||||
try:
|
||||
if finish_reason == "stop":
|
||||
if not console_slience:
|
||||
if not console_silence:
|
||||
print(f"[response] {result}")
|
||||
break
|
||||
result += response_text
|
||||
if reasoning:
|
||||
resoning_buffer += reasoning_content
|
||||
reasoning_buffer += reasoning_content
|
||||
if observe_window is not None:
|
||||
# 观测窗,把已经获取的数据显示出去
|
||||
if len(observe_window) >= 1:
|
||||
@@ -247,9 +249,8 @@ def get_predict_function(
|
||||
logger.error(error_msg)
|
||||
raise RuntimeError("Json解析不合常规")
|
||||
if reasoning:
|
||||
# reasoning 的部分加上框 (>)
|
||||
return '\n'.join(map(lambda x: '> ' + x, resoning_buffer.split('\n'))) + \
|
||||
'\n\n' + result
|
||||
paragraphs = ''.join([f'<p style="margin: 1.25em 0;">{line}</p>' for line in reasoning_buffer.split('\n')])
|
||||
return f'''<div class="reasoning_process" >{paragraphs}</div>\n\n''' + result
|
||||
return result
|
||||
|
||||
def predict(
|
||||
@@ -268,7 +269,7 @@ def get_predict_function(
|
||||
inputs 是本次问询的输入
|
||||
top_p, temperature是chatGPT的内部调优参数
|
||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yield出去,可以直接修改对话界面内容
|
||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||
"""
|
||||
from .bridge_all import model_info
|
||||
@@ -299,7 +300,7 @@ def get_predict_function(
|
||||
) # 刷新界面
|
||||
time.sleep(2)
|
||||
|
||||
headers, playload = generate_message(
|
||||
headers, payload = generate_message(
|
||||
input=inputs,
|
||||
model=llm_kwargs["llm_model"],
|
||||
key=APIKEY,
|
||||
@@ -321,7 +322,7 @@ def get_predict_function(
|
||||
endpoint,
|
||||
headers=headers,
|
||||
proxies=None if disable_proxy else proxies,
|
||||
json=playload,
|
||||
json=payload,
|
||||
stream=True,
|
||||
timeout=TIMEOUT_SECONDS,
|
||||
)
|
||||
@@ -343,14 +344,21 @@ def get_predict_function(
|
||||
gpt_reasoning_buffer = ""
|
||||
|
||||
stream_response = response.iter_lines()
|
||||
wait_counter = 0
|
||||
while True:
|
||||
try:
|
||||
chunk = next(stream_response)
|
||||
except StopIteration:
|
||||
if wait_counter != 0 and gpt_replying_buffer == "":
|
||||
yield from update_ui_lastest_msg(lastmsg="模型调用失败 ...", chatbot=chatbot, history=history, msg="failed")
|
||||
break
|
||||
except requests.exceptions.ConnectionError:
|
||||
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||
response_text, reasoning_content, finish_reason = decode_chunk(chunk)
|
||||
response_text, reasoning_content, finish_reason, decoded_chunk = decode_chunk(chunk)
|
||||
if decoded_chunk == ': keep-alive':
|
||||
wait_counter += 1
|
||||
yield from update_ui_lastest_msg(lastmsg="等待中 " + "".join(["."] * (wait_counter%10)), chatbot=chatbot, history=history, msg="waiting ...")
|
||||
continue
|
||||
# 返回的数据流第一次为空,继续等待
|
||||
if response_text == "" and (reasoning == False or reasoning_content == "") and finish_reason != "False":
|
||||
status_text = f"finish_reason: {finish_reason}"
|
||||
@@ -367,7 +375,7 @@ def get_predict_function(
|
||||
chunk_decoded = chunk.decode()
|
||||
chatbot[-1] = (
|
||||
chatbot[-1][0],
|
||||
"[Local Message] {finish_reason},获得以下报错信息:\n"
|
||||
f"[Local Message] {finish_reason}, 获得以下报错信息:\n"
|
||||
+ chunk_decoded,
|
||||
)
|
||||
yield from update_ui(
|
||||
@@ -385,7 +393,8 @@ def get_predict_function(
|
||||
if reasoning:
|
||||
gpt_replying_buffer += response_text
|
||||
gpt_reasoning_buffer += reasoning_content
|
||||
history[-1] = '\n'.join(map(lambda x: '> ' + x, gpt_reasoning_buffer.split('\n'))) + '\n\n' + gpt_replying_buffer
|
||||
paragraphs = ''.join([f'<p style="margin: 1.25em 0;">{line}</p>' for line in gpt_reasoning_buffer.split('\n')])
|
||||
history[-1] = f'<div class="reasoning_process">{paragraphs}</div>\n\n---\n\n' + gpt_replying_buffer
|
||||
else:
|
||||
gpt_replying_buffer += response_text
|
||||
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
||||
|
||||
@@ -111,6 +111,8 @@ def extract_archive(file_path, dest_dir):
|
||||
member_path = os.path.normpath(member.name)
|
||||
full_path = os.path.join(dest_dir, member_path)
|
||||
full_path = os.path.abspath(full_path)
|
||||
if member.islnk() or member.issym():
|
||||
raise Exception(f"Attempted Symlink in {member.name}")
|
||||
if not full_path.startswith(os.path.abspath(dest_dir) + os.sep):
|
||||
raise Exception(f"Attempted Path Traversal in {member.name}")
|
||||
|
||||
|
||||
@@ -311,3 +311,15 @@
|
||||
backdrop-filter: blur(10px);
|
||||
background-color: rgba(var(--block-background-fill), 0.5);
|
||||
}
|
||||
|
||||
|
||||
.reasoning_process {
|
||||
font-size: smaller;
|
||||
font-style: italic;
|
||||
margin: 0px;
|
||||
padding: 1em;
|
||||
line-height: 1.5;
|
||||
text-wrap: wrap;
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
|
||||
@@ -26,8 +26,8 @@ def define_gui_toolbar(AVAIL_LLM_MODELS, LLM_MODEL, INIT_SYS_PROMPT, THEME, AVAI
|
||||
fontfamily_dropdown = gr.Dropdown(AVAIL_FONTS, value=get_conf("FONT"), elem_id="elem_fontfamily", label="更换字体类型").style(container=False)
|
||||
fontsize_slider = gr.Slider(minimum=5, maximum=25, value=15, step=1, interactive=True, label="字体大小(默认15)", elem_id="elem_fontsize")
|
||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
||||
opt = ["自定义菜单"]
|
||||
value=[]
|
||||
opt = ["自定义菜单", "主标题", "副标题", "显示logo"]
|
||||
value=["主标题", "副标题", "显示logo"]
|
||||
if ADD_WAIFU: opt += ["添加Live2D形象"]; value += ["添加Live2D形象"]
|
||||
checkboxes_2 = gr.CheckboxGroup(opt, value=value, label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
|
||||
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
||||
|
||||
145
themes/init.js
145
themes/init.js
@@ -128,6 +128,14 @@ function gpt_academic_change_chatbot_font(fontfamily, fontsize, fontcolor) {
|
||||
}
|
||||
}
|
||||
|
||||
function footer_show_hide(show) {
|
||||
if (show) {
|
||||
document.querySelector('footer').style.display = '';
|
||||
} else {
|
||||
document.querySelector('footer').style.display = 'none';
|
||||
}
|
||||
}
|
||||
|
||||
async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
||||
// 第一部分,布局初始化
|
||||
remove_legacy_cookie();
|
||||
@@ -179,6 +187,7 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 字体
|
||||
gpt_academic_gradio_saveload("load", "elem_fontfamily", "js_fontfamily", null, "str");
|
||||
gpt_academic_change_chatbot_font(getCookie("js_fontfamily"), null, null);
|
||||
@@ -205,7 +214,93 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (getCookie("js_show_title")) {
|
||||
// have cookie
|
||||
bool_value = getCookie("js_show_title")
|
||||
bool_value = bool_value == "True";
|
||||
searchString = "主标题";
|
||||
tool_bar_group = "cbsc";
|
||||
const true_function = function () {
|
||||
document.querySelector('.prose.svelte-1ybaih5 h1').style.display = '';
|
||||
}
|
||||
const false_function = function () {
|
||||
document.querySelector('.prose.svelte-1ybaih5 h1').style.display = 'none';
|
||||
}
|
||||
if (bool_value) {
|
||||
// make btns appear
|
||||
true_function();
|
||||
// deal with checkboxes
|
||||
let arr_with_clear_btn = update_array(
|
||||
await get_data_from_gradio_component(tool_bar_group), searchString, "add"
|
||||
)
|
||||
push_data_to_gradio_component(arr_with_clear_btn, tool_bar_group, "no_conversion");
|
||||
} else {
|
||||
false_function();
|
||||
// deal with checkboxes
|
||||
let arr_without_clear_btn = update_array(
|
||||
await get_data_from_gradio_component(tool_bar_group), searchString, "remove"
|
||||
)
|
||||
push_data_to_gradio_component(arr_without_clear_btn, tool_bar_group, "no_conversion");
|
||||
}
|
||||
}
|
||||
if (getCookie("js_show_subtitle")) {
|
||||
// have cookie
|
||||
bool_value = getCookie("js_show_subtitle")
|
||||
bool_value = bool_value == "True";
|
||||
searchString = "副标题";
|
||||
tool_bar_group = "cbsc";
|
||||
const true_function = function () {
|
||||
element = document.querySelector('.prose.svelte-1ybaih5 h2');
|
||||
if (element) element.style.display = '';
|
||||
element = document.querySelector('.prose.svelte-1ybaih5 h6');
|
||||
if (element) element.style.display = '';
|
||||
}
|
||||
const false_function = function () {
|
||||
element = document.querySelector('.prose.svelte-1ybaih5 h2');
|
||||
if (element) element.style.display = 'none';
|
||||
element = document.querySelector('.prose.svelte-1ybaih5 h6');
|
||||
if (element) element.style.display = 'none';
|
||||
}
|
||||
if (bool_value) {
|
||||
// make btns appear
|
||||
true_function();
|
||||
// deal with checkboxes
|
||||
let arr_with_clear_btn = update_array(
|
||||
await get_data_from_gradio_component(tool_bar_group), searchString, "add"
|
||||
)
|
||||
push_data_to_gradio_component(arr_with_clear_btn, tool_bar_group, "no_conversion");
|
||||
} else {
|
||||
false_function();
|
||||
// deal with checkboxes
|
||||
let arr_without_clear_btn = update_array(
|
||||
await get_data_from_gradio_component(tool_bar_group), searchString, "remove"
|
||||
)
|
||||
push_data_to_gradio_component(arr_without_clear_btn, tool_bar_group, "no_conversion");
|
||||
}
|
||||
}
|
||||
if (getCookie("js_show_footer")) {
|
||||
// have cookie
|
||||
bool_value = getCookie("js_show_footer")
|
||||
searchString = "显示logo";
|
||||
tool_bar_group = "cbsc";
|
||||
bool_value = bool_value == "True";
|
||||
if (bool_value) {
|
||||
// make btns appear
|
||||
footer_show_hide(true);
|
||||
// deal with checkboxes
|
||||
let arr_with_clear_btn = update_array(
|
||||
await get_data_from_gradio_component(tool_bar_group), searchString, "add"
|
||||
)
|
||||
push_data_to_gradio_component(arr_with_clear_btn, tool_bar_group, "no_conversion");
|
||||
} else {
|
||||
footer_show_hide(false);
|
||||
// deal with checkboxes
|
||||
let arr_without_clear_btn = update_array(
|
||||
await get_data_from_gradio_component(tool_bar_group), searchString, "remove"
|
||||
)
|
||||
push_data_to_gradio_component(arr_without_clear_btn, tool_bar_group, "no_conversion");
|
||||
}
|
||||
}
|
||||
// clearButton 自动清除按钮
|
||||
if (getCookie("js_clearbtn_show_cookie")) {
|
||||
// have cookie
|
||||
@@ -219,7 +314,7 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
||||
let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "block";
|
||||
// deal with checkboxes
|
||||
let arr_with_clear_btn = update_array(
|
||||
await get_data_from_gradio_component('cbs'), "输入清除键", "add"
|
||||
await get_data_from_gradio_component("cbs"), "输入清除键", "add"
|
||||
)
|
||||
push_data_to_gradio_component(arr_with_clear_btn, "cbs", "no_conversion");
|
||||
} else {
|
||||
@@ -228,7 +323,7 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
||||
let clearButton2 = document.getElementById("elem_clear2"); clearButton2.style.display = "none";
|
||||
// deal with checkboxes
|
||||
let arr_without_clear_btn = update_array(
|
||||
await get_data_from_gradio_component('cbs'), "输入清除键", "remove"
|
||||
await get_data_from_gradio_component("cbs"), "输入清除键", "remove"
|
||||
)
|
||||
push_data_to_gradio_component(arr_without_clear_btn, "cbs", "no_conversion");
|
||||
}
|
||||
@@ -268,3 +363,47 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
||||
change_theme("", "")
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
function apply_checkbox_change_for_group2(display_panel_arr) {
|
||||
setTimeout(() => {
|
||||
display_panel_arr = get_checkbox_selected_items("cbsc");
|
||||
|
||||
let searchString = "添加Live2D形象";
|
||||
if (display_panel_arr.includes(searchString)) {
|
||||
setCookie("js_live2d_show_cookie", "True", 365);
|
||||
loadLive2D();
|
||||
} else {
|
||||
try {
|
||||
setCookie("js_live2d_show_cookie", "False", 365);
|
||||
$('.waifu').hide();
|
||||
} catch (e) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function handleDisplay(searchString, key, displayElement, showFn, hideFn) {
|
||||
if (display_panel_arr.includes(searchString)) {
|
||||
setCookie(key, "True", 365);
|
||||
if (showFn) showFn();
|
||||
if (displayElement) displayElement.style.display = '';
|
||||
} else {
|
||||
setCookie(key, "False", 365);
|
||||
if (hideFn) hideFn();
|
||||
if (displayElement) displayElement.style.display = 'none';
|
||||
}
|
||||
}
|
||||
|
||||
// 主标题
|
||||
const mainTitle = document.querySelector('.prose.svelte-1ybaih5 h1');
|
||||
handleDisplay("主标题", "js_show_title", mainTitle, null, null);
|
||||
|
||||
// 副标题
|
||||
const subTitle = document.querySelector('.prose.svelte-1ybaih5 h2');
|
||||
handleDisplay("副标题", "js_show_subtitle", subTitle, null, null);
|
||||
|
||||
// 显示logo
|
||||
handleDisplay("显示logo", "js_show_footer", null, () => footer_show_hide(true), () => footer_show_hide(false));
|
||||
}, 50);
|
||||
}
|
||||
@@ -141,23 +141,3 @@ setTimeout(() => {
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
|
||||
js_code_show_or_hide_group2 = """
|
||||
(display_panel_arr)=>{
|
||||
setTimeout(() => {
|
||||
display_panel_arr = get_checkbox_selected_items("cbsc");
|
||||
|
||||
let searchString = "添加Live2D形象";
|
||||
let ele = "none";
|
||||
if (display_panel_arr.includes(searchString)) {
|
||||
setCookie("js_live2d_show_cookie", "True", 365);
|
||||
loadLive2D();
|
||||
} else {
|
||||
setCookie("js_live2d_show_cookie", "False", 365);
|
||||
$('.waifu').hide();
|
||||
}
|
||||
|
||||
}, 50);
|
||||
}
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user