From 142b51674981ef781283fc0f87a75fb85ecf3f92 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 20 Jan 2024 18:00:06 +0800 Subject: [PATCH 01/33] gpt_academic text mask imp --- core_functional.py | 5 +- request_llms/bridge_all.py | 4 +- shared_utils/advanced_markdown_format.py | 204 ++++++++++++++--------- shared_utils/text_mask.py | 56 +++++++ toolbox.py | 6 +- 5 files changed, 191 insertions(+), 84 deletions(-) create mode 100644 shared_utils/text_mask.py diff --git a/core_functional.py b/core_functional.py index 0b283a89..5e67eee8 100644 --- a/core_functional.py +++ b/core_functional.py @@ -3,6 +3,7 @@ # 'stop' 颜色对应 theme.py 中的 color_er import importlib from toolbox import clear_line_break +from toolbox import build_gpt_academic_masked_string from textwrap import dedent def get_core_functions(): @@ -32,12 +33,12 @@ def get_core_functions(): "Prefix": r"", # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来 "Suffix": - dedent("\n"+r''' + dedent("\n"+f''' ============================== 使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如: 以下是对以上文本的总结,以mermaid flowchart的形式展示: - ```mermaid + ```{build_gpt_academic_masked_string(text_show_llm="mermaid", text_show_render="")} flowchart LR A["节点名1"] --> B("节点名2") B --> C{"节点名3"} diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index c19691e8..14352475 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -11,7 +11,7 @@ import tiktoken, copy from functools import lru_cache from concurrent.futures import ThreadPoolExecutor -from toolbox import get_conf, trimmed_format_exc +from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui from .bridge_chatgpt import predict as chatgpt_ui @@ -668,6 +668,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser """ import threading, time, copy + inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm") model = llm_kwargs['llm_model'] n_model = 1 if '&' not in model: @@ -741,6 +742,7 @@ def predict(inputs, llm_kwargs, *args, **kwargs): additional_fn代表点击的哪个按钮,按钮见functional.py """ + inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm") method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项 yield from method(inputs, llm_kwargs, *args, **kwargs) diff --git a/shared_utils/advanced_markdown_format.py b/shared_utils/advanced_markdown_format.py index 9eed4f09..ece374b0 100644 --- a/shared_utils/advanced_markdown_format.py +++ b/shared_utils/advanced_markdown_format.py @@ -4,52 +4,47 @@ import os import math from textwrap import dedent from functools import lru_cache -from pymdownx.superfences import fence_div_format, fence_code_format +from pymdownx.superfences import fence_code_format from latex2mathml.converter import convert as tex2mathml from shared_utils.config_loader import get_conf as get_conf - -pj = os.path.join -default_user_name = 'default_user' +from shared_utils.text_mask import apply_gpt_academic_string_mask markdown_extension_configs = { - 'mdx_math': { - 'enable_dollar_delimiter': True, - 'use_gitlab_delimiters': False, + "mdx_math": { + "enable_dollar_delimiter": True, + "use_gitlab_delimiters": False, }, } code_highlight_configs = { "pymdownx.superfences": { - 'css_class': 'codehilite', + "css_class": "codehilite", "custom_fences": [ - { - 'name': 'mermaid', - 'class': 'mermaid', - 'format': fence_code_format - } - ] + {"name": "mermaid", "class": "mermaid", "format": fence_code_format} + ], }, "pymdownx.highlight": { - 'css_class': 'codehilite', - 'guess_lang': True, + "css_class": "codehilite", + "guess_lang": True, # 'auto_title': True, # 'linenums': True - } + }, } + def text_divide_paragraph(text): """ 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。 """ pre = '
' - suf = '
' + suf = "" if text.startswith(pre) and text.endswith(suf): return text - if '```' in text: + if "```" in text: # careful input return text - elif '' in text: + elif "" in text: # careful input return text else: @@ -71,20 +66,20 @@ def tex2mathml_catch_exception(content, *args, **kwargs): def replace_math_no_render(match): content = match.group(1) - if 'mode=display' in match.group(0): - content = content.replace('\n', '
') - return f"$${content}$$" + if "mode=display" in match.group(0): + content = content.replace("\n", "
") + return f'$${content}$$' else: - return f"${content}$" + return f'${content}$' def replace_math_render(match): content = match.group(1) - if 'mode=display' in match.group(0): - if '\\begin{aligned}' in content: - content = content.replace('\\begin{aligned}', '\\begin{array}') - content = content.replace('\\end{aligned}', '\\end{array}') - content = content.replace('&', ' ') + if "mode=display" in match.group(0): + if "\\begin{aligned}" in content: + content = content.replace("\\begin{aligned}", "\\begin{array}") + content = content.replace("\\end{aligned}", "\\end{array}") + content = content.replace("&", " ") content = tex2mathml_catch_exception(content, display="block") return content else: @@ -95,9 +90,11 @@ def markdown_bug_hunt(content): """ 解决一个mdx_math的bug(单$包裹begin命令时多余\n', '') + content = content.replace( + '\n", "") return content @@ -105,25 +102,29 @@ def is_equation(txt): """ 判定是否为公式 | 测试1 写出洛伦兹定律,使用tex格式公式 测试2 给出柯西不等式,使用latex格式 测试3 写出麦克斯韦方程组 """ - if '```' in txt and '```reference' not in txt: return False - if '$' not in txt and '\\[' not in txt: return False + if "```" in txt and "```reference" not in txt: + return False + if "$" not in txt and "\\[" not in txt: + return False mathpatterns = { - r'(?^[ \t]*(?:~{3,}|`{3,}))[ ]* # opening fence ((\{(?P[^\}\n]*)\})| # (optional {attrs} or (\.?(?P[\w#.+-]*)[ ]*)? # optional (.)lang @@ -162,16 +164,17 @@ FENCED_BLOCK_RE = re.compile( \n # newline (end of opening fence) (?P.*?)(?<=\n) # the code block (?P=fence)[ ]*$ # closing fence - '''), - re.MULTILINE | re.DOTALL | re.VERBOSE + """ + ), + re.MULTILINE | re.DOTALL | re.VERBOSE, ) def get_line_range(re_match_obj, txt): start_pos, end_pos = re_match_obj.regs[0] - num_newlines_before = txt[:start_pos+1].count('\n') + num_newlines_before = txt[: start_pos + 1].count("\n") line_start = num_newlines_before - line_end = num_newlines_before + txt[start_pos:end_pos].count('\n')+1 + line_end = num_newlines_before + txt[start_pos:end_pos].count("\n") + 1 return line_start, line_end @@ -181,14 +184,16 @@ def fix_code_segment_indent(txt): txt_tmp = txt while True: re_match_obj = FENCED_BLOCK_RE.search(txt_tmp) - if not re_match_obj: break - if len(lines) == 0: lines = txt.split("\n") - + if not re_match_obj: + break + if len(lines) == 0: + lines = txt.split("\n") + # 清空 txt_tmp 对应的位置方便下次搜索 start_pos, end_pos = re_match_obj.regs[0] - txt_tmp = txt_tmp[:start_pos] + ' '*(end_pos-start_pos) + txt_tmp[end_pos:] + txt_tmp = txt_tmp[:start_pos] + " " * (end_pos - start_pos) + txt_tmp[end_pos:] line_start, line_end = get_line_range(re_match_obj, txt) - + # 获取公共缩进 shared_indent_cnt = 1e5 for i in range(line_start, line_end): @@ -202,26 +207,26 @@ def fix_code_segment_indent(txt): num_spaces_should_be = math.ceil(shared_indent_cnt / 4) * 4 for i in range(line_start, line_end): add_n = num_spaces_should_be - shared_indent_cnt - lines[i] = ' ' * add_n + lines[i] - if not change_any: # 遇到第一个 + lines[i] = " " * add_n + lines[i] + if not change_any: # 遇到第一个 change_any = True if change_any: - return '\n'.join(lines) + return "\n".join(lines) else: return txt - - -@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度 + + +@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度 def markdown_convertion(txt): """ 将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。 """ pre = '
' - suf = '
' + suf = "" if txt.startswith(pre) and txt.endswith(suf): # print('警告,输入了已经经过转化的字符串,二次转化可能出问题') - return txt # 已经被转化过,不需要再次转化 + return txt # 已经被转化过,不需要再次转化 find_equation_pattern = r'\n""" + + # 添加Live2D + if ADD_WAIFU: + for jsf in [ + "file=docs/waifu_plugin/jquery.min.js", + "file=docs/waifu_plugin/jquery-ui.min.js", + "file=docs/waifu_plugin/autoload.js", + ]: + js += f"""\n""" + return js \ No newline at end of file diff --git a/themes/contrast.py b/themes/contrast.py index 9a4b56fd..1e988377 100644 --- a/themes/contrast.py +++ b/themes/contrast.py @@ -67,22 +67,9 @@ def adjust_theme(): button_cancel_text_color_dark="white", ) - js = "" - for jsf in [ - os.path.join(theme_dir, "common.js"), - os.path.join(theme_dir, "mermaid.min.js"), - os.path.join(theme_dir, "mermaid_loader.js"), - ]: - with open(jsf, "r", encoding="utf8") as f: - js += f"" - - # 添加一个萌萌的看板娘 - if ADD_WAIFU: - js += """ - - - - """ + from themes.common import get_common_html_javascript_code + js = get_common_html_javascript_code() + if not hasattr(gr, "RawTemplateResponse"): gr.RawTemplateResponse = gr.routes.templates.TemplateResponse gradio_original_template_fn = gr.RawTemplateResponse diff --git a/themes/default.py b/themes/default.py index b8e94319..a65b0119 100644 --- a/themes/default.py +++ b/themes/default.py @@ -67,22 +67,8 @@ def adjust_theme(): button_cancel_text_color_dark="white", ) - js = "" - for jsf in [ - os.path.join(theme_dir, "common.js"), - os.path.join(theme_dir, "mermaid.min.js"), - os.path.join(theme_dir, "mermaid_loader.js"), - ]: - with open(jsf, "r", encoding="utf8") as f: - js += f"" - - # 添加一个萌萌的看板娘 - if ADD_WAIFU: - js += """ - - - - """ + from themes.common import get_common_html_javascript_code + js = get_common_html_javascript_code() if not hasattr(gr, "RawTemplateResponse"): gr.RawTemplateResponse = gr.routes.templates.TemplateResponse gradio_original_template_fn = gr.RawTemplateResponse diff --git a/themes/gradios.py b/themes/gradios.py index 68f15df8..14d88a29 100644 --- a/themes/gradios.py +++ b/themes/gradios.py @@ -31,23 +31,9 @@ def adjust_theme(): THEME = THEME.lstrip("huggingface-") set_theme = set_theme.from_hub(THEME.lower()) - js = "" - for jsf in [ - os.path.join(theme_dir, "common.js"), - os.path.join(theme_dir, "mermaid.min.js"), - os.path.join(theme_dir, "mermaid_loader.js"), - ]: - with open(jsf, "r", encoding="utf8") as f: - js += f"" - - - # 添加一个萌萌的看板娘 - if ADD_WAIFU: - js += """ - - - - """ + from themes.common import get_common_html_javascript_code + js = get_common_html_javascript_code() + if not hasattr(gr, "RawTemplateResponse"): gr.RawTemplateResponse = gr.routes.templates.TemplateResponse gradio_original_template_fn = gr.RawTemplateResponse diff --git a/themes/green.py b/themes/green.py index 84287417..b16249a8 100644 --- a/themes/green.py +++ b/themes/green.py @@ -76,22 +76,8 @@ def adjust_theme(): chatbot_code_background_color_dark="*neutral_950", ) - js = "" - for jsf in [ - os.path.join(theme_dir, "common.js"), - os.path.join(theme_dir, "mermaid.min.js"), - os.path.join(theme_dir, "mermaid_loader.js"), - ]: - with open(jsf, "r", encoding="utf8") as f: - js += f"" - - # 添加一个萌萌的看板娘 - if ADD_WAIFU: - js += """ - - - - """ + from themes.common import get_common_html_javascript_code + js = get_common_html_javascript_code() with open(os.path.join(theme_dir, "green.js"), "r", encoding="utf8") as f: js += f"" diff --git a/themes/mermaid_loader.js b/themes/mermaid_loader.js index 443d636a..36304f24 100644 --- a/themes/mermaid_loader.js +++ b/themes/mermaid_loader.js @@ -106,7 +106,7 @@ const uml = async className => { defaultConfig.theme = "dark" } - const Module = await import('./file=themes/mermaid_editor.js'); + const Module = await import('/file=themes/mermaid_editor.js'); function do_render(block, code, codeContent, cnt) { var rendered_content = mermaid.render(`_diagram_${cnt}`, code); From 7b6828ab07df0c3b60355e033cdfa4200610f25c Mon Sep 17 00:00:00 2001 From: Menghuan1918 Date: Sun, 21 Jan 2024 23:41:39 +0800 Subject: [PATCH 08/33] =?UTF-8?q?=E4=BB=8E=E5=BD=93=E5=89=8D=E5=AF=B9?= =?UTF-8?q?=E8=AF=9D=E5=8E=86=E5=8F=B2=E4=B8=AD=E7=94=9F=E4=BA=A7Mermaid?= =?UTF-8?q?=E5=9B=BE=E8=A1=A8=E7=9A=84=E6=8F=92=E4=BB=B6=20(#1497)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add functionality to generate multiple types of Mermaid charts * Update conditional statement in 解析历史输入 function --- crazy_functional.py | 8 + crazy_functions/生成多种Mermaid图表.py | 224 +++++++++++++++++++++++++ 2 files changed, 232 insertions(+) create mode 100644 crazy_functions/生成多种Mermaid图表.py diff --git a/crazy_functional.py b/crazy_functional.py index ee5e8b24..166676d9 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -34,6 +34,7 @@ def get_crazy_functions(): from crazy_functions.Latex全文润色 import Latex英文纠错 from crazy_functions.批量Markdown翻译 import Markdown中译英 from crazy_functions.虚空终端 import 虚空终端 + from crazy_functions.生成多种Mermaid图表 import 生成多种Mermaid图表 function_plugins = { "虚空终端": { @@ -69,6 +70,13 @@ def get_crazy_functions(): "Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数", "Function": HotReload(清除缓存), }, + "生成多种Mermaid图表(从当前对话内容中生产多种图表)": { + "Group": "对话", + "Color": "stop", + "AsButton": False, + "Info" : "基于当前对话生成多种Mermaid图表,图表类型由对话模型自行判断", + "Function": HotReload(生成多种Mermaid图表), + }, "批量总结Word文档": { "Group": "学术", "Color": "stop", diff --git a/crazy_functions/生成多种Mermaid图表.py b/crazy_functions/生成多种Mermaid图表.py new file mode 100644 index 00000000..48eadd98 --- /dev/null +++ b/crazy_functions/生成多种Mermaid图表.py @@ -0,0 +1,224 @@ +from toolbox import CatchException, update_ui +from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +import datetime + +#暂时只写了这几种的PROMPT +SELECT_PROMPT = """ +“{subject}” +============= +以上是从文章中提取的摘要,将会使用这些摘要绘制图表。请你选择一个合适的图表类型: +1 流程图 +2 序列图 +3 类图 +4 饼图 +5 甘特图 +6 状态图 +7 实体关系图 +8 象限提示图 +不需要解释原因,仅需要输出单个不带任何标点符号的数字。 +""" +#流程图 +PROMPT_1 = """ +请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例: +```mermaid +graph TD + P(编程) --> L1(Python) + P(编程) --> L2(C) + P(编程) --> L3(C++) + P(编程) --> L4(Javascipt) + P(编程) --> L5(PHP) +``` +""" +#序列图 +PROMPT_2 = """ +请你给出围绕“{subject}”的序列图,使用mermaid语法,mermaid语法举例: +```mermaid +sequenceDiagram + participant A as 用户 + participant B as 系统 + A->>B: 登录请求 + B->>A: 登录成功 + A->>B: 获取数据 + B->>A: 返回数据 +``` +""" +#类图 +PROMPT_3 = """ +请你给出围绕“{subject}”的类图,使用mermaid语法,mermaid语法举例: +```mermaid +classDiagram + Class01 <|-- AveryLongClass : Cool + Class03 *-- Class04 + Class05 o-- Class06 + Class07 .. Class08 + Class09 --> C2 : Where am i? + Class09 --* C3 + Class09 --|> Class07 + Class07 : equals() + Class07 : Object[] elementData + Class01 : size() + Class01 : int chimp + Class01 : int gorilla + Class08 <--> C2: Cool label +``` +""" +#饼图 +PROMPT_4 = """ +请你给出围绕“{subject}”的饼图,使用mermaid语法,mermaid语法举例: +```mermaid +pie title Pets adopted by volunteers + "狗" : 386 + "猫" : 85 + "兔子" : 15 +``` +""" +#甘特图 +PROMPT_5 = """ +请你给出围绕“{subject}”的甘特图,使用mermaid语法,mermaid语法举例: +```mermaid +gantt + title 项目开发流程 + dateFormat YYYY-MM-DD + section 设计 + 需求分析 :done, des1, 2024-01-06,2024-01-08 + 原型设计 :active, des2, 2024-01-09, 3d + UI设计 : des3, after des2, 5d + section 开发 + 前端开发 :2024-01-20, 10d + 后端开发 :2024-01-20, 10d +``` +""" +#状态图 +PROMPT_6 = """ +请你给出围绕“{subject}”的状态图,使用mermaid语法,mermaid语法举例: +```mermaid +stateDiagram-v2 + [*] --> Still + Still --> [*] + Still --> Moving + Moving --> Still + Moving --> Crash + Crash --> [*] +``` +""" +#实体关系图 +PROMPT_7 = """ +请你给出围绕“{subject}”的实体关系图,使用mermaid语法,mermaid语法举例: +```mermaid +erDiagram + CUSTOMER ||--o{ ORDER : places + ORDER ||--|{ LINE-ITEM : contains + CUSTOMER { + string name + string id + } + ORDER { + string orderNumber + date orderDate + string customerID + } + LINE-ITEM { + number quantity + string productID + } +``` +""" +#象限提示图 +PROMPT_8 = """ +请你给出围绕“{subject}”的象限图,使用mermaid语法,mermaid语法举例: +```mermaid +graph LR + A[Hard skill] --> B(Programming) + A[Hard skill] --> C(Design) + D[Soft skill] --> E(Coordination) + D[Soft skill] --> F(Communication) +``` +""" + +def 解析历史输入(history,llm_kwargs,chatbot): + ############################## <第 0 步,切割输入> ################################## + # 借用PDF切割中的函数对文本进行切割 + TOKEN_LIMIT_PER_FRAGMENT = 2500 + txt = str(history).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars + from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit + txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model']) + ############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ################################## + i_say_show_user = f'首先你从历史记录中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示 + chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI + results = [] + MAX_WORD_TOTAL = 4096 + n_txt = len(txt) + last_iteration_result = "从以下文本中提取摘要。" + if n_txt >= 20: print('文章极长,不能达到预期效果') + for i in range(n_txt): + NUM_OF_WORD = MAX_WORD_TOTAL // n_txt + i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i]}" + i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...." + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 + llm_kwargs, chatbot, + history=["The main content of the previous section is?", last_iteration_result], # 迭代上一次的结果 + sys_prompt="Extracts the main content from the text section where it is located for graphing purposes, answer me with Chinese." # 提示 + ) + results.append(gpt_say) + last_iteration_result = gpt_say + ############################## <第 2 步,根据整理的摘要选择图表类型> ################################## + i_say_show_user = f'接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制'; gpt_say = "[Local Message] 收到。" # 用户提示 + chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI + results_txt = '\n'.join(results) + i_say = SELECT_PROMPT.format(subject=results_txt) + i_say_show_user = f'请判断适合使用的流程图类型,其中数字对应关系为:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图' + for i in range(3): + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, + inputs_show_user=i_say_show_user, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + sys_prompt="" + ) + if gpt_say in ['1','2','3','4','5','6','7','8']: #判断返回是否正确 + break + if gpt_say not in ['1','2','3','4','5','6','7','8']: + gpt_say = '1' + ############################## <第 3 步,根据选择的图表类型绘制图表> ################################## + if gpt_say == '1': + i_say = PROMPT_1.format(subject=results_txt) + elif gpt_say == '2': + i_say = PROMPT_2.format(subject=results_txt) + elif gpt_say == '3': + i_say = PROMPT_3.format(subject=results_txt) + elif gpt_say == '4': + i_say = PROMPT_4.format(subject=results_txt) + elif gpt_say == '5': + i_say = PROMPT_5.format(subject=results_txt) + elif gpt_say == '6': + i_say = PROMPT_6.format(subject=results_txt) + elif gpt_say == '7': + i_say = PROMPT_7.format(subject=results_txt) + elif gpt_say == '8': + i_say = PROMPT_8.format(subject=results_txt) + i_say_show_user = f'请根据判断结果绘制相应的图表。' + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, + inputs_show_user=i_say_show_user, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + sys_prompt="" + ) + history.append(gpt_say) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 + +@CatchException +def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + """ + txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 + llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 + plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数 + chatbot 聊天显示框的句柄,用于显示给用户 + history 聊天历史,前情提要 + system_prompt 给gpt的静默提醒 + web_port 当前软件运行的端口号 + """ + chatbot.append([ + "函数插件功能?", + "根据当前聊天历史绘制多种mermaid图表的功能,将会首先判断适合的图表类型,随后绘制图表。函数插件贡献者: Menghuan1918"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + if txt == "": txt = "空白的输入栏" # :)虽然暂时没用到输入栏内容 + yield from 解析历史输入(history,llm_kwargs,chatbot) \ No newline at end of file From 1506c198343bb581ced5160900ca39e521179184 Mon Sep 17 00:00:00 2001 From: Menghuan1918 Date: Mon, 22 Jan 2024 14:55:39 +0800 Subject: [PATCH 09/33] Update crazy_functional.py with new functionality deal with PDF (#1500) --- crazy_functional.py | 2 +- crazy_functions/生成多种Mermaid图表.py | 25 ++++++++++++++++++++++--- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index 166676d9..c6cb01a5 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -74,7 +74,7 @@ def get_crazy_functions(): "Group": "对话", "Color": "stop", "AsButton": False, - "Info" : "基于当前对话生成多种Mermaid图表,图表类型由对话模型自行判断", + "Info" : "基于当前对话或PDF生成多种Mermaid图表,图表类型由模型判断", "Function": HotReload(生成多种Mermaid图表), }, "批量总结Word文档": { diff --git a/crazy_functions/生成多种Mermaid图表.py b/crazy_functions/生成多种Mermaid图表.py index 48eadd98..a9a2f9b4 100644 --- a/crazy_functions/生成多种Mermaid图表.py +++ b/crazy_functions/生成多种Mermaid图表.py @@ -1,5 +1,6 @@ from toolbox import CatchException, update_ui from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +from .crazy_utils import read_and_clean_pdf_text import datetime #暂时只写了这几种的PROMPT @@ -143,7 +144,7 @@ def 解析历史输入(history,llm_kwargs,chatbot): from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model']) ############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ################################## - i_say_show_user = f'首先你从历史记录中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示 + i_say_show_user = f'首先你从历史记录或文件中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示 chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI results = [] MAX_WORD_TOTAL = 4096 @@ -205,6 +206,21 @@ def 解析历史输入(history,llm_kwargs,chatbot): history.append(gpt_say) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 +def 输入区文件处理(txt): + if txt == "": return False, txt + success = True + import glob + from .crazy_utils import get_files_from_everything + file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf') + if not file_pdf or len(pdf_manifest) == 0: + return False, txt #如不是pdf文件则返回输入区内容 + final_result = "" + for index, fp in enumerate(pdf_manifest): + file_content, page_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF + file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars + final_result += file_content + return True, final_result + @CatchException def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): """ @@ -216,9 +232,12 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt 给gpt的静默提醒 web_port 当前软件运行的端口号 """ + import os chatbot.append([ "函数插件功能?", - "根据当前聊天历史绘制多种mermaid图表的功能,将会首先判断适合的图表类型,随后绘制图表。函数插件贡献者: Menghuan1918"]) + "根据当前聊天历史或PDF中绘制多种mermaid图表的功能,将会首先判断适合的图表类型,随后绘制图表。函数插件贡献者: Menghuan1918"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - if txt == "": txt = "空白的输入栏" # :)虽然暂时没用到输入栏内容 + if os.path.exists(txt): #如输入区无内容则直接解析历史记录 + file_exist, txt = 输入区文件处理(txt) + history.append(txt) #将解析后的txt传递加入到历史中 yield from 解析历史输入(history,llm_kwargs,chatbot) \ No newline at end of file From 2291be2b28097971b1908e0a51d12d75945da80d Mon Sep 17 00:00:00 2001 From: Menghuan1918 Date: Tue, 23 Jan 2024 15:45:34 +0800 Subject: [PATCH 10/33] Update "Generate multiple Mermaid charts" plugin (#1503) * Update crazy_functional.py with new functionality deal with PDF * Update crazy_functional.py and Mermaid.py for plugin_kwargs --- crazy_functional.py | 4 +- crazy_functions/生成多种Mermaid图表.py | 62 +++++++++++++++++--------- 2 files changed, 44 insertions(+), 22 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index c6cb01a5..f957b74e 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -70,12 +70,14 @@ def get_crazy_functions(): "Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数", "Function": HotReload(清除缓存), }, - "生成多种Mermaid图表(从当前对话内容中生产多种图表)": { + "生成多种Mermaid图表(从当前对话或文件(.pdf)中生产图表)": { "Group": "对话", "Color": "stop", "AsButton": False, "Info" : "基于当前对话或PDF生成多种Mermaid图表,图表类型由模型判断", "Function": HotReload(生成多种Mermaid图表), + "AdvancedArgs": True, + "ArgsReminder": "请输入图类型对应的数字:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图", }, "批量总结Word文档": { "Group": "学术", diff --git a/crazy_functions/生成多种Mermaid图表.py b/crazy_functions/生成多种Mermaid图表.py index a9a2f9b4..720c64e6 100644 --- a/crazy_functions/生成多种Mermaid图表.py +++ b/crazy_functions/生成多种Mermaid图表.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, update_ui +from toolbox import CatchException, update_ui, report_exception from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from .crazy_utils import read_and_clean_pdf_text import datetime @@ -136,7 +136,7 @@ graph LR ``` """ -def 解析历史输入(history,llm_kwargs,chatbot): +def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs): ############################## <第 0 步,切割输入> ################################## # 借用PDF切割中的函数对文本进行切割 TOKEN_LIMIT_PER_FRAGMENT = 2500 @@ -163,22 +163,25 @@ def 解析历史输入(history,llm_kwargs,chatbot): results.append(gpt_say) last_iteration_result = gpt_say ############################## <第 2 步,根据整理的摘要选择图表类型> ################################## - i_say_show_user = f'接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制'; gpt_say = "[Local Message] 收到。" # 用户提示 - chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI - results_txt = '\n'.join(results) - i_say = SELECT_PROMPT.format(subject=results_txt) - i_say_show_user = f'请判断适合使用的流程图类型,其中数字对应关系为:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图' - for i in range(3): - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="" - ) - if gpt_say in ['1','2','3','4','5','6','7','8']: #判断返回是否正确 - break - if gpt_say not in ['1','2','3','4','5','6','7','8']: - gpt_say = '1' + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") + gpt_say = plugin_kwargs.get("advanced_arg", "") #将图表类型参数赋值为插件参数 + results_txt = '\n'.join(results) #合并摘要 + if gpt_say not in ['1','2','3','4','5','6','7','8']: #如插件参数不正确则使用对话模型判断 + i_say_show_user = f'接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制'; gpt_say = "[Local Message] 收到。" # 用户提示 + chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI + i_say = SELECT_PROMPT.format(subject=results_txt) + i_say_show_user = f'请判断适合使用的流程图类型,其中数字对应关系为:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图' + for i in range(3): + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, + inputs_show_user=i_say_show_user, + llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], + sys_prompt="" + ) + if gpt_say in ['1','2','3','4','5','6','7','8']: #判断返回是否正确 + break + if gpt_say not in ['1','2','3','4','5','6','7','8']: + gpt_say = '1' ############################## <第 3 步,根据选择的图表类型绘制图表> ################################## if gpt_say == '1': i_say = PROMPT_1.format(subject=results_txt) @@ -193,7 +196,7 @@ def 解析历史输入(history,llm_kwargs,chatbot): elif gpt_say == '6': i_say = PROMPT_6.format(subject=results_txt) elif gpt_say == '7': - i_say = PROMPT_7.format(subject=results_txt) + i_say = PROMPT_7.replace("{subject}", results_txt) #由于实体关系图用到了{}符号 elif gpt_say == '8': i_say = PROMPT_8.format(subject=results_txt) i_say_show_user = f'请根据判断结果绘制相应的图表。' @@ -233,11 +236,28 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, web_port 当前软件运行的端口号 """ import os + + # 基本信息:功能、贡献者 chatbot.append([ "函数插件功能?", - "根据当前聊天历史或PDF中绘制多种mermaid图表的功能,将会首先判断适合的图表类型,随后绘制图表。函数插件贡献者: Menghuan1918"]) + "根据当前聊天历史或PDF中(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\ + \n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # 尝试导入依赖,如果缺少依赖,则给出安装建议 + try: + import fitz + except: + report_exception(chatbot, history, + a = f"解析项目: {txt}", + b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + if os.path.exists(txt): #如输入区无内容则直接解析历史记录 file_exist, txt = 输入区文件处理(txt) + + if file_exist : history = [] #如输入区内容为文件则清空历史记录 history.append(txt) #将解析后的txt传递加入到历史中 - yield from 解析历史输入(history,llm_kwargs,chatbot) \ No newline at end of file + + yield from 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs) \ No newline at end of file From 82795d38172d8eeecf41945d3340ee72c47997e7 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 24 Jan 2024 00:44:27 +0800 Subject: [PATCH 11/33] remove mask string feature for now (still buggy) --- core_functional.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core_functional.py b/core_functional.py index d5359965..f9b1abcd 100644 --- a/core_functional.py +++ b/core_functional.py @@ -43,13 +43,14 @@ def get_core_functions(): # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来 "Suffix": # dedent() 函数用于去除多行字符串的缩进 + # ```{build_gpt_academic_masked_string(text_show_llm="mermaid", text_show_render="")} dedent("\n"+f''' ============================== 使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如: 以下是对以上文本的总结,以mermaid flowchart的形式展示: - ```{build_gpt_academic_masked_string(text_show_llm="mermaid", text_show_render="")} + ```mermaid flowchart LR A["节点名1"] --> B("节点名2") B --> C{"节点名3"} From ef311c4859e3581af65f4b4fc28f3381c2d26b86 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 24 Jan 2024 01:06:58 +0800 Subject: [PATCH 12/33] localize mjs scripts --- shared_utils/advanced_markdown_format.py | 14 + themes/base64.mjs | 296 + themes/mermaid_editor.js | 4 +- themes/pako.esm.mjs | 6877 ++++++++++++++++++++++ 4 files changed, 7189 insertions(+), 2 deletions(-) create mode 100644 themes/base64.mjs create mode 100644 themes/pako.esm.mjs diff --git a/shared_utils/advanced_markdown_format.py b/shared_utils/advanced_markdown_format.py index 103319b6..653cf07b 100644 --- a/shared_utils/advanced_markdown_format.py +++ b/shared_utils/advanced_markdown_format.py @@ -31,6 +31,20 @@ code_highlight_configs = { }, } +code_highlight_configs_block_mermaid = { + "pymdownx.superfences": { + "css_class": "codehilite", + # "custom_fences": [ + # {"name": "mermaid", "class": "mermaid", "format": fence_code_format} + # ], + }, + "pymdownx.highlight": { + "css_class": "codehilite", + "guess_lang": True, + # 'auto_title': True, + # 'linenums': True + }, +} def tex2mathml_catch_exception(content, *args, **kwargs): try: diff --git a/themes/base64.mjs b/themes/base64.mjs new file mode 100644 index 00000000..5e64328d --- /dev/null +++ b/themes/base64.mjs @@ -0,0 +1,296 @@ +/** + * base64.ts + * + * Licensed under the BSD 3-Clause License. + * http://opensource.org/licenses/BSD-3-Clause + * + * References: + * http://en.wikipedia.org/wiki/Base64 + * + * @author Dan Kogai (https://github.com/dankogai) + */ +const version = '3.7.2'; +/** + * @deprecated use lowercase `version`. + */ +const VERSION = version; +const _hasatob = typeof atob === 'function'; +const _hasbtoa = typeof btoa === 'function'; +const _hasBuffer = typeof Buffer === 'function'; +const _TD = typeof TextDecoder === 'function' ? new TextDecoder() : undefined; +const _TE = typeof TextEncoder === 'function' ? new TextEncoder() : undefined; +const b64ch = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='; +const b64chs = Array.prototype.slice.call(b64ch); +const b64tab = ((a) => { + let tab = {}; + a.forEach((c, i) => tab[c] = i); + return tab; +})(b64chs); +const b64re = /^(?:[A-Za-z\d+\/]{4})*?(?:[A-Za-z\d+\/]{2}(?:==)?|[A-Za-z\d+\/]{3}=?)?$/; +const _fromCC = String.fromCharCode.bind(String); +const _U8Afrom = typeof Uint8Array.from === 'function' + ? Uint8Array.from.bind(Uint8Array) + : (it, fn = (x) => x) => new Uint8Array(Array.prototype.slice.call(it, 0).map(fn)); +const _mkUriSafe = (src) => src + .replace(/=/g, '').replace(/[+\/]/g, (m0) => m0 == '+' ? '-' : '_'); +const _tidyB64 = (s) => s.replace(/[^A-Za-z0-9\+\/]/g, ''); +/** + * polyfill version of `btoa` + */ +const btoaPolyfill = (bin) => { + // console.log('polyfilled'); + let u32, c0, c1, c2, asc = ''; + const pad = bin.length % 3; + for (let i = 0; i < bin.length;) { + if ((c0 = bin.charCodeAt(i++)) > 255 || + (c1 = bin.charCodeAt(i++)) > 255 || + (c2 = bin.charCodeAt(i++)) > 255) + throw new TypeError('invalid character found'); + u32 = (c0 << 16) | (c1 << 8) | c2; + asc += b64chs[u32 >> 18 & 63] + + b64chs[u32 >> 12 & 63] + + b64chs[u32 >> 6 & 63] + + b64chs[u32 & 63]; + } + return pad ? asc.slice(0, pad - 3) + "===".substring(pad) : asc; +}; +/** + * does what `window.btoa` of web browsers do. + * @param {String} bin binary string + * @returns {string} Base64-encoded string + */ +const _btoa = _hasbtoa ? (bin) => btoa(bin) + : _hasBuffer ? (bin) => Buffer.from(bin, 'binary').toString('base64') + : btoaPolyfill; +const _fromUint8Array = _hasBuffer + ? (u8a) => Buffer.from(u8a).toString('base64') + : (u8a) => { + // cf. https://stackoverflow.com/questions/12710001/how-to-convert-uint8-array-to-base64-encoded-string/12713326#12713326 + const maxargs = 0x1000; + let strs = []; + for (let i = 0, l = u8a.length; i < l; i += maxargs) { + strs.push(_fromCC.apply(null, u8a.subarray(i, i + maxargs))); + } + return _btoa(strs.join('')); + }; +/** + * converts a Uint8Array to a Base64 string. + * @param {boolean} [urlsafe] URL-and-filename-safe a la RFC4648 §5 + * @returns {string} Base64 string + */ +const fromUint8Array = (u8a, urlsafe = false) => urlsafe ? _mkUriSafe(_fromUint8Array(u8a)) : _fromUint8Array(u8a); +// This trick is found broken https://github.com/dankogai/js-base64/issues/130 +// const utob = (src: string) => unescape(encodeURIComponent(src)); +// reverting good old fationed regexp +const cb_utob = (c) => { + if (c.length < 2) { + var cc = c.charCodeAt(0); + return cc < 0x80 ? c + : cc < 0x800 ? (_fromCC(0xc0 | (cc >>> 6)) + + _fromCC(0x80 | (cc & 0x3f))) + : (_fromCC(0xe0 | ((cc >>> 12) & 0x0f)) + + _fromCC(0x80 | ((cc >>> 6) & 0x3f)) + + _fromCC(0x80 | (cc & 0x3f))); + } + else { + var cc = 0x10000 + + (c.charCodeAt(0) - 0xD800) * 0x400 + + (c.charCodeAt(1) - 0xDC00); + return (_fromCC(0xf0 | ((cc >>> 18) & 0x07)) + + _fromCC(0x80 | ((cc >>> 12) & 0x3f)) + + _fromCC(0x80 | ((cc >>> 6) & 0x3f)) + + _fromCC(0x80 | (cc & 0x3f))); + } +}; +const re_utob = /[\uD800-\uDBFF][\uDC00-\uDFFFF]|[^\x00-\x7F]/g; +/** + * @deprecated should have been internal use only. + * @param {string} src UTF-8 string + * @returns {string} UTF-16 string + */ +const utob = (u) => u.replace(re_utob, cb_utob); +// +const _encode = _hasBuffer + ? (s) => Buffer.from(s, 'utf8').toString('base64') + : _TE + ? (s) => _fromUint8Array(_TE.encode(s)) + : (s) => _btoa(utob(s)); +/** + * converts a UTF-8-encoded string to a Base64 string. + * @param {boolean} [urlsafe] if `true` make the result URL-safe + * @returns {string} Base64 string + */ +const encode = (src, urlsafe = false) => urlsafe + ? _mkUriSafe(_encode(src)) + : _encode(src); +/** + * converts a UTF-8-encoded string to URL-safe Base64 RFC4648 §5. + * @returns {string} Base64 string + */ +const encodeURI = (src) => encode(src, true); +// This trick is found broken https://github.com/dankogai/js-base64/issues/130 +// const btou = (src: string) => decodeURIComponent(escape(src)); +// reverting good old fationed regexp +const re_btou = /[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF]{2}|[\xF0-\xF7][\x80-\xBF]{3}/g; +const cb_btou = (cccc) => { + switch (cccc.length) { + case 4: + var cp = ((0x07 & cccc.charCodeAt(0)) << 18) + | ((0x3f & cccc.charCodeAt(1)) << 12) + | ((0x3f & cccc.charCodeAt(2)) << 6) + | (0x3f & cccc.charCodeAt(3)), offset = cp - 0x10000; + return (_fromCC((offset >>> 10) + 0xD800) + + _fromCC((offset & 0x3FF) + 0xDC00)); + case 3: + return _fromCC(((0x0f & cccc.charCodeAt(0)) << 12) + | ((0x3f & cccc.charCodeAt(1)) << 6) + | (0x3f & cccc.charCodeAt(2))); + default: + return _fromCC(((0x1f & cccc.charCodeAt(0)) << 6) + | (0x3f & cccc.charCodeAt(1))); + } +}; +/** + * @deprecated should have been internal use only. + * @param {string} src UTF-16 string + * @returns {string} UTF-8 string + */ +const btou = (b) => b.replace(re_btou, cb_btou); +/** + * polyfill version of `atob` + */ +const atobPolyfill = (asc) => { + // console.log('polyfilled'); + asc = asc.replace(/\s+/g, ''); + if (!b64re.test(asc)) + throw new TypeError('malformed base64.'); + asc += '=='.slice(2 - (asc.length & 3)); + let u24, bin = '', r1, r2; + for (let i = 0; i < asc.length;) { + u24 = b64tab[asc.charAt(i++)] << 18 + | b64tab[asc.charAt(i++)] << 12 + | (r1 = b64tab[asc.charAt(i++)]) << 6 + | (r2 = b64tab[asc.charAt(i++)]); + bin += r1 === 64 ? _fromCC(u24 >> 16 & 255) + : r2 === 64 ? _fromCC(u24 >> 16 & 255, u24 >> 8 & 255) + : _fromCC(u24 >> 16 & 255, u24 >> 8 & 255, u24 & 255); + } + return bin; +}; +/** + * does what `window.atob` of web browsers do. + * @param {String} asc Base64-encoded string + * @returns {string} binary string + */ +const _atob = _hasatob ? (asc) => atob(_tidyB64(asc)) + : _hasBuffer ? (asc) => Buffer.from(asc, 'base64').toString('binary') + : atobPolyfill; +// +const _toUint8Array = _hasBuffer + ? (a) => _U8Afrom(Buffer.from(a, 'base64')) + : (a) => _U8Afrom(_atob(a), c => c.charCodeAt(0)); +/** + * converts a Base64 string to a Uint8Array. + */ +const toUint8Array = (a) => _toUint8Array(_unURI(a)); +// +const _decode = _hasBuffer + ? (a) => Buffer.from(a, 'base64').toString('utf8') + : _TD + ? (a) => _TD.decode(_toUint8Array(a)) + : (a) => btou(_atob(a)); +const _unURI = (a) => _tidyB64(a.replace(/[-_]/g, (m0) => m0 == '-' ? '+' : '/')); +/** + * converts a Base64 string to a UTF-8 string. + * @param {String} src Base64 string. Both normal and URL-safe are supported + * @returns {string} UTF-8 string + */ +const decode = (src) => _decode(_unURI(src)); +/** + * check if a value is a valid Base64 string + * @param {String} src a value to check + */ +const isValid = (src) => { + if (typeof src !== 'string') + return false; + const s = src.replace(/\s+/g, '').replace(/={0,2}$/, ''); + return !/[^\s0-9a-zA-Z\+/]/.test(s) || !/[^\s0-9a-zA-Z\-_]/.test(s); +}; +// +const _noEnum = (v) => { + return { + value: v, enumerable: false, writable: true, configurable: true + }; +}; +/** + * extend String.prototype with relevant methods + */ +const extendString = function () { + const _add = (name, body) => Object.defineProperty(String.prototype, name, _noEnum(body)); + _add('fromBase64', function () { return decode(this); }); + _add('toBase64', function (urlsafe) { return encode(this, urlsafe); }); + _add('toBase64URI', function () { return encode(this, true); }); + _add('toBase64URL', function () { return encode(this, true); }); + _add('toUint8Array', function () { return toUint8Array(this); }); +}; +/** + * extend Uint8Array.prototype with relevant methods + */ +const extendUint8Array = function () { + const _add = (name, body) => Object.defineProperty(Uint8Array.prototype, name, _noEnum(body)); + _add('toBase64', function (urlsafe) { return fromUint8Array(this, urlsafe); }); + _add('toBase64URI', function () { return fromUint8Array(this, true); }); + _add('toBase64URL', function () { return fromUint8Array(this, true); }); +}; +/** + * extend Builtin prototypes with relevant methods + */ +const extendBuiltins = () => { + extendString(); + extendUint8Array(); +}; +const gBase64 = { + version: version, + VERSION: VERSION, + atob: _atob, + atobPolyfill: atobPolyfill, + btoa: _btoa, + btoaPolyfill: btoaPolyfill, + fromBase64: decode, + toBase64: encode, + encode: encode, + encodeURI: encodeURI, + encodeURL: encodeURI, + utob: utob, + btou: btou, + decode: decode, + isValid: isValid, + fromUint8Array: fromUint8Array, + toUint8Array: toUint8Array, + extendString: extendString, + extendUint8Array: extendUint8Array, + extendBuiltins: extendBuiltins, +}; +// makecjs:CUT // +export { version }; +export { VERSION }; +export { _atob as atob }; +export { atobPolyfill }; +export { _btoa as btoa }; +export { btoaPolyfill }; +export { decode as fromBase64 }; +export { encode as toBase64 }; +export { utob }; +export { encode }; +export { encodeURI }; +export { encodeURI as encodeURL }; +export { btou }; +export { decode }; +export { isValid }; +export { fromUint8Array }; +export { toUint8Array }; +export { extendString }; +export { extendUint8Array }; +export { extendBuiltins }; +// and finally, +export { gBase64 as Base64 }; \ No newline at end of file diff --git a/themes/mermaid_editor.js b/themes/mermaid_editor.js index 18e02a67..cd1d1a61 100644 --- a/themes/mermaid_editor.js +++ b/themes/mermaid_editor.js @@ -1,5 +1,5 @@ -import { deflate, inflate } from 'https://fastly.jsdelivr.net/gh/nodeca/pako@master/dist/pako.esm.mjs'; -import { toUint8Array, fromUint8Array, toBase64, fromBase64 } from 'https://cdn.jsdelivr.net/npm/js-base64@3.7.2/base64.mjs'; +import { deflate, inflate } from '/file=themes/pako.esm.mjs'; +import { toUint8Array, fromUint8Array, toBase64, fromBase64 } from '/file=themes/base64.mjs'; const base64Serde = { serialize: (state) => { diff --git a/themes/pako.esm.mjs b/themes/pako.esm.mjs new file mode 100644 index 00000000..59d34b99 --- /dev/null +++ b/themes/pako.esm.mjs @@ -0,0 +1,6877 @@ + +/*! pako 2.1.0 https://github.com/nodeca/pako @license (MIT AND Zlib) */ +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +/* eslint-disable space-unary-ops */ + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + + +//const Z_FILTERED = 1; +//const Z_HUFFMAN_ONLY = 2; +//const Z_RLE = 3; +const Z_FIXED$1 = 4; +//const Z_DEFAULT_STRATEGY = 0; + +/* Possible values of the data_type field (though see inflate()) */ +const Z_BINARY = 0; +const Z_TEXT = 1; +//const Z_ASCII = 1; // = Z_TEXT +const Z_UNKNOWN$1 = 2; + +/*============================================================================*/ + + +function zero$1(buf) { let len = buf.length; while (--len >= 0) { buf[len] = 0; } } + +// From zutil.h + +const STORED_BLOCK = 0; +const STATIC_TREES = 1; +const DYN_TREES = 2; +/* The three kinds of block type */ + +const MIN_MATCH$1 = 3; +const MAX_MATCH$1 = 258; +/* The minimum and maximum match lengths */ + +// From deflate.h +/* =========================================================================== + * Internal compression state. + */ + +const LENGTH_CODES$1 = 29; +/* number of length codes, not counting the special END_BLOCK code */ + +const LITERALS$1 = 256; +/* number of literal bytes 0..255 */ + +const L_CODES$1 = LITERALS$1 + 1 + LENGTH_CODES$1; +/* number of Literal or Length codes, including the END_BLOCK code */ + +const D_CODES$1 = 30; +/* number of distance codes */ + +const BL_CODES$1 = 19; +/* number of codes used to transfer the bit lengths */ + +const HEAP_SIZE$1 = 2 * L_CODES$1 + 1; +/* maximum heap size */ + +const MAX_BITS$1 = 15; +/* All codes must not exceed MAX_BITS bits */ + +const Buf_size = 16; +/* size of bit buffer in bi_buf */ + + +/* =========================================================================== + * Constants + */ + +const MAX_BL_BITS = 7; +/* Bit length codes must not exceed MAX_BL_BITS bits */ + +const END_BLOCK = 256; +/* end of block literal code */ + +const REP_3_6 = 16; +/* repeat previous bit length 3-6 times (2 bits of repeat count) */ + +const REPZ_3_10 = 17; +/* repeat a zero length 3-10 times (3 bits of repeat count) */ + +const REPZ_11_138 = 18; +/* repeat a zero length 11-138 times (7 bits of repeat count) */ + +/* eslint-disable comma-spacing,array-bracket-spacing */ +const extra_lbits = /* extra bits for each length code */ + new Uint8Array([0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0]); + +const extra_dbits = /* extra bits for each distance code */ + new Uint8Array([0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13]); + +const extra_blbits = /* extra bits for each bit length code */ + new Uint8Array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7]); + +const bl_order = + new Uint8Array([16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]); +/* eslint-enable comma-spacing,array-bracket-spacing */ + +/* The lengths of the bit length codes are sent in order of decreasing + * probability, to avoid transmitting the lengths for unused bit length codes. + */ + +/* =========================================================================== + * Local data. These are initialized only once. + */ + +// We pre-fill arrays with 0 to avoid uninitialized gaps + +const DIST_CODE_LEN = 512; /* see definition of array dist_code below */ + +// !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1 +const static_ltree = new Array((L_CODES$1 + 2) * 2); +zero$1(static_ltree); +/* The static literal tree. Since the bit lengths are imposed, there is no + * need for the L_CODES extra codes used during heap construction. However + * The codes 286 and 287 are needed to build a canonical tree (see _tr_init + * below). + */ + +const static_dtree = new Array(D_CODES$1 * 2); +zero$1(static_dtree); +/* The static distance tree. (Actually a trivial tree since all codes use + * 5 bits.) + */ + +const _dist_code = new Array(DIST_CODE_LEN); +zero$1(_dist_code); +/* Distance codes. The first 256 values correspond to the distances + * 3 .. 258, the last 256 values correspond to the top 8 bits of + * the 15 bit distances. + */ + +const _length_code = new Array(MAX_MATCH$1 - MIN_MATCH$1 + 1); +zero$1(_length_code); +/* length code for each normalized match length (0 == MIN_MATCH) */ + +const base_length = new Array(LENGTH_CODES$1); +zero$1(base_length); +/* First normalized length for each code (0 = MIN_MATCH) */ + +const base_dist = new Array(D_CODES$1); +zero$1(base_dist); +/* First normalized distance for each code (0 = distance of 1) */ + + +function StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) { + + this.static_tree = static_tree; /* static tree or NULL */ + this.extra_bits = extra_bits; /* extra bits for each code or NULL */ + this.extra_base = extra_base; /* base index for extra_bits */ + this.elems = elems; /* max number of elements in the tree */ + this.max_length = max_length; /* max bit length for the codes */ + + // show if `static_tree` has data or dummy - needed for monomorphic objects + this.has_stree = static_tree && static_tree.length; +} + + +let static_l_desc; +let static_d_desc; +let static_bl_desc; + + +function TreeDesc(dyn_tree, stat_desc) { + this.dyn_tree = dyn_tree; /* the dynamic tree */ + this.max_code = 0; /* largest code with non zero frequency */ + this.stat_desc = stat_desc; /* the corresponding static tree */ +} + + + +const d_code = (dist) => { + + return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)]; +}; + + +/* =========================================================================== + * Output a short LSB first on the stream. + * IN assertion: there is enough room in pendingBuf. + */ +const put_short = (s, w) => { +// put_byte(s, (uch)((w) & 0xff)); +// put_byte(s, (uch)((ush)(w) >> 8)); + s.pending_buf[s.pending++] = (w) & 0xff; + s.pending_buf[s.pending++] = (w >>> 8) & 0xff; +}; + + +/* =========================================================================== + * Send a value on a given number of bits. + * IN assertion: length <= 16 and value fits in length bits. + */ +const send_bits = (s, value, length) => { + + if (s.bi_valid > (Buf_size - length)) { + s.bi_buf |= (value << s.bi_valid) & 0xffff; + put_short(s, s.bi_buf); + s.bi_buf = value >> (Buf_size - s.bi_valid); + s.bi_valid += length - Buf_size; + } else { + s.bi_buf |= (value << s.bi_valid) & 0xffff; + s.bi_valid += length; + } +}; + + +const send_code = (s, c, tree) => { + + send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/); +}; + + +/* =========================================================================== + * Reverse the first len bits of a code, using straightforward code (a faster + * method would use a table) + * IN assertion: 1 <= len <= 15 + */ +const bi_reverse = (code, len) => { + + let res = 0; + do { + res |= code & 1; + code >>>= 1; + res <<= 1; + } while (--len > 0); + return res >>> 1; +}; + + +/* =========================================================================== + * Flush the bit buffer, keeping at most 7 bits in it. + */ +const bi_flush = (s) => { + + if (s.bi_valid === 16) { + put_short(s, s.bi_buf); + s.bi_buf = 0; + s.bi_valid = 0; + + } else if (s.bi_valid >= 8) { + s.pending_buf[s.pending++] = s.bi_buf & 0xff; + s.bi_buf >>= 8; + s.bi_valid -= 8; + } +}; + + +/* =========================================================================== + * Compute the optimal bit lengths for a tree and update the total bit length + * for the current block. + * IN assertion: the fields freq and dad are set, heap[heap_max] and + * above are the tree nodes sorted by increasing frequency. + * OUT assertions: the field len is set to the optimal bit length, the + * array bl_count contains the frequencies for each bit length. + * The length opt_len is updated; static_len is also updated if stree is + * not null. + */ +const gen_bitlen = (s, desc) => { +// deflate_state *s; +// tree_desc *desc; /* the tree descriptor */ + + const tree = desc.dyn_tree; + const max_code = desc.max_code; + const stree = desc.stat_desc.static_tree; + const has_stree = desc.stat_desc.has_stree; + const extra = desc.stat_desc.extra_bits; + const base = desc.stat_desc.extra_base; + const max_length = desc.stat_desc.max_length; + let h; /* heap index */ + let n, m; /* iterate over the tree elements */ + let bits; /* bit length */ + let xbits; /* extra bits */ + let f; /* frequency */ + let overflow = 0; /* number of elements with bit length too large */ + + for (bits = 0; bits <= MAX_BITS$1; bits++) { + s.bl_count[bits] = 0; + } + + /* In a first pass, compute the optimal bit lengths (which may + * overflow in the case of the bit length tree). + */ + tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */ + + for (h = s.heap_max + 1; h < HEAP_SIZE$1; h++) { + n = s.heap[h]; + bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1; + if (bits > max_length) { + bits = max_length; + overflow++; + } + tree[n * 2 + 1]/*.Len*/ = bits; + /* We overwrite tree[n].Dad which is no longer needed */ + + if (n > max_code) { continue; } /* not a leaf node */ + + s.bl_count[bits]++; + xbits = 0; + if (n >= base) { + xbits = extra[n - base]; + } + f = tree[n * 2]/*.Freq*/; + s.opt_len += f * (bits + xbits); + if (has_stree) { + s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits); + } + } + if (overflow === 0) { return; } + + // Tracev((stderr,"\nbit length overflow\n")); + /* This happens for example on obj2 and pic of the Calgary corpus */ + + /* Find the first bit length which could increase: */ + do { + bits = max_length - 1; + while (s.bl_count[bits] === 0) { bits--; } + s.bl_count[bits]--; /* move one leaf down the tree */ + s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */ + s.bl_count[max_length]--; + /* The brother of the overflow item also moves one step up, + * but this does not affect bl_count[max_length] + */ + overflow -= 2; + } while (overflow > 0); + + /* Now recompute all bit lengths, scanning in increasing frequency. + * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all + * lengths instead of fixing only the wrong ones. This idea is taken + * from 'ar' written by Haruhiko Okumura.) + */ + for (bits = max_length; bits !== 0; bits--) { + n = s.bl_count[bits]; + while (n !== 0) { + m = s.heap[--h]; + if (m > max_code) { continue; } + if (tree[m * 2 + 1]/*.Len*/ !== bits) { + // Tracev((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); + s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/; + tree[m * 2 + 1]/*.Len*/ = bits; + } + n--; + } + } +}; + + +/* =========================================================================== + * Generate the codes for a given tree and bit counts (which need not be + * optimal). + * IN assertion: the array bl_count contains the bit length statistics for + * the given tree and the field len is set for all tree elements. + * OUT assertion: the field code is set for all tree elements of non + * zero code length. + */ +const gen_codes = (tree, max_code, bl_count) => { +// ct_data *tree; /* the tree to decorate */ +// int max_code; /* largest code with non zero frequency */ +// ushf *bl_count; /* number of codes at each bit length */ + + const next_code = new Array(MAX_BITS$1 + 1); /* next code value for each bit length */ + let code = 0; /* running code value */ + let bits; /* bit index */ + let n; /* code index */ + + /* The distribution counts are first used to generate the code values + * without bit reversal. + */ + for (bits = 1; bits <= MAX_BITS$1; bits++) { + code = (code + bl_count[bits - 1]) << 1; + next_code[bits] = code; + } + /* Check that the bit counts in bl_count are consistent. The last code + * must be all ones. + */ + //Assert (code + bl_count[MAX_BITS]-1 == (1< { + + let n; /* iterates over tree elements */ + let bits; /* bit counter */ + let length; /* length value */ + let code; /* code value */ + let dist; /* distance index */ + const bl_count = new Array(MAX_BITS$1 + 1); + /* number of codes at each bit length for an optimal tree */ + + // do check in _tr_init() + //if (static_init_done) return; + + /* For some embedded targets, global variables are not initialized: */ +/*#ifdef NO_INIT_GLOBAL_POINTERS + static_l_desc.static_tree = static_ltree; + static_l_desc.extra_bits = extra_lbits; + static_d_desc.static_tree = static_dtree; + static_d_desc.extra_bits = extra_dbits; + static_bl_desc.extra_bits = extra_blbits; +#endif*/ + + /* Initialize the mapping length (0..255) -> length code (0..28) */ + length = 0; + for (code = 0; code < LENGTH_CODES$1 - 1; code++) { + base_length[code] = length; + for (n = 0; n < (1 << extra_lbits[code]); n++) { + _length_code[length++] = code; + } + } + //Assert (length == 256, "tr_static_init: length != 256"); + /* Note that the length 255 (match length 258) can be represented + * in two different ways: code 284 + 5 bits or code 285, so we + * overwrite length_code[255] to use the best encoding: + */ + _length_code[length - 1] = code; + + /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ + dist = 0; + for (code = 0; code < 16; code++) { + base_dist[code] = dist; + for (n = 0; n < (1 << extra_dbits[code]); n++) { + _dist_code[dist++] = code; + } + } + //Assert (dist == 256, "tr_static_init: dist != 256"); + dist >>= 7; /* from now on, all distances are divided by 128 */ + for (; code < D_CODES$1; code++) { + base_dist[code] = dist << 7; + for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) { + _dist_code[256 + dist++] = code; + } + } + //Assert (dist == 256, "tr_static_init: 256+dist != 512"); + + /* Construct the codes of the static literal tree */ + for (bits = 0; bits <= MAX_BITS$1; bits++) { + bl_count[bits] = 0; + } + + n = 0; + while (n <= 143) { + static_ltree[n * 2 + 1]/*.Len*/ = 8; + n++; + bl_count[8]++; + } + while (n <= 255) { + static_ltree[n * 2 + 1]/*.Len*/ = 9; + n++; + bl_count[9]++; + } + while (n <= 279) { + static_ltree[n * 2 + 1]/*.Len*/ = 7; + n++; + bl_count[7]++; + } + while (n <= 287) { + static_ltree[n * 2 + 1]/*.Len*/ = 8; + n++; + bl_count[8]++; + } + /* Codes 286 and 287 do not exist, but we must include them in the + * tree construction to get a canonical Huffman tree (longest code + * all ones) + */ + gen_codes(static_ltree, L_CODES$1 + 1, bl_count); + + /* The static distance tree is trivial: */ + for (n = 0; n < D_CODES$1; n++) { + static_dtree[n * 2 + 1]/*.Len*/ = 5; + static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5); + } + + // Now data ready and we can init static trees + static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS$1 + 1, L_CODES$1, MAX_BITS$1); + static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES$1, MAX_BITS$1); + static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES$1, MAX_BL_BITS); + + //static_init_done = true; +}; + + +/* =========================================================================== + * Initialize a new block. + */ +const init_block = (s) => { + + let n; /* iterates over tree elements */ + + /* Initialize the trees. */ + for (n = 0; n < L_CODES$1; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; } + for (n = 0; n < D_CODES$1; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; } + for (n = 0; n < BL_CODES$1; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; } + + s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1; + s.opt_len = s.static_len = 0; + s.sym_next = s.matches = 0; +}; + + +/* =========================================================================== + * Flush the bit buffer and align the output on a byte boundary + */ +const bi_windup = (s) => +{ + if (s.bi_valid > 8) { + put_short(s, s.bi_buf); + } else if (s.bi_valid > 0) { + //put_byte(s, (Byte)s->bi_buf); + s.pending_buf[s.pending++] = s.bi_buf; + } + s.bi_buf = 0; + s.bi_valid = 0; +}; + +/* =========================================================================== + * Compares to subtrees, using the tree depth as tie breaker when + * the subtrees have equal frequency. This minimizes the worst case length. + */ +const smaller = (tree, n, m, depth) => { + + const _n2 = n * 2; + const _m2 = m * 2; + return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ || + (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m])); +}; + +/* =========================================================================== + * Restore the heap property by moving down the tree starting at node k, + * exchanging a node with the smallest of its two sons if necessary, stopping + * when the heap property is re-established (each father smaller than its + * two sons). + */ +const pqdownheap = (s, tree, k) => { +// deflate_state *s; +// ct_data *tree; /* the tree to restore */ +// int k; /* node to move down */ + + const v = s.heap[k]; + let j = k << 1; /* left son of k */ + while (j <= s.heap_len) { + /* Set j to the smallest of the two sons: */ + if (j < s.heap_len && + smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) { + j++; + } + /* Exit if v is smaller than both sons */ + if (smaller(tree, v, s.heap[j], s.depth)) { break; } + + /* Exchange v with the smallest son */ + s.heap[k] = s.heap[j]; + k = j; + + /* And continue down the tree, setting j to the left son of k */ + j <<= 1; + } + s.heap[k] = v; +}; + + +// inlined manually +// const SMALLEST = 1; + +/* =========================================================================== + * Send the block data compressed using the given Huffman trees + */ +const compress_block = (s, ltree, dtree) => { +// deflate_state *s; +// const ct_data *ltree; /* literal tree */ +// const ct_data *dtree; /* distance tree */ + + let dist; /* distance of matched string */ + let lc; /* match length or unmatched char (if dist == 0) */ + let sx = 0; /* running index in sym_buf */ + let code; /* the code to send */ + let extra; /* number of extra bits to send */ + + if (s.sym_next !== 0) { + do { + dist = s.pending_buf[s.sym_buf + sx++] & 0xff; + dist += (s.pending_buf[s.sym_buf + sx++] & 0xff) << 8; + lc = s.pending_buf[s.sym_buf + sx++]; + if (dist === 0) { + send_code(s, lc, ltree); /* send a literal byte */ + //Tracecv(isgraph(lc), (stderr," '%c' ", lc)); + } else { + /* Here, lc is the match length - MIN_MATCH */ + code = _length_code[lc]; + send_code(s, code + LITERALS$1 + 1, ltree); /* send the length code */ + extra = extra_lbits[code]; + if (extra !== 0) { + lc -= base_length[code]; + send_bits(s, lc, extra); /* send the extra length bits */ + } + dist--; /* dist is now the match distance - 1 */ + code = d_code(dist); + //Assert (code < D_CODES, "bad d_code"); + + send_code(s, code, dtree); /* send the distance code */ + extra = extra_dbits[code]; + if (extra !== 0) { + dist -= base_dist[code]; + send_bits(s, dist, extra); /* send the extra distance bits */ + } + } /* literal or match pair ? */ + + /* Check that the overlay between pending_buf and sym_buf is ok: */ + //Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow"); + + } while (sx < s.sym_next); + } + + send_code(s, END_BLOCK, ltree); +}; + + +/* =========================================================================== + * Construct one Huffman tree and assigns the code bit strings and lengths. + * Update the total bit length for the current block. + * IN assertion: the field freq is set for all tree elements. + * OUT assertions: the fields len and code are set to the optimal bit length + * and corresponding code. The length opt_len is updated; static_len is + * also updated if stree is not null. The field max_code is set. + */ +const build_tree = (s, desc) => { +// deflate_state *s; +// tree_desc *desc; /* the tree descriptor */ + + const tree = desc.dyn_tree; + const stree = desc.stat_desc.static_tree; + const has_stree = desc.stat_desc.has_stree; + const elems = desc.stat_desc.elems; + let n, m; /* iterate over heap elements */ + let max_code = -1; /* largest code with non zero frequency */ + let node; /* new node being created */ + + /* Construct the initial heap, with least frequent element in + * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. + * heap[0] is not used. + */ + s.heap_len = 0; + s.heap_max = HEAP_SIZE$1; + + for (n = 0; n < elems; n++) { + if (tree[n * 2]/*.Freq*/ !== 0) { + s.heap[++s.heap_len] = max_code = n; + s.depth[n] = 0; + + } else { + tree[n * 2 + 1]/*.Len*/ = 0; + } + } + + /* The pkzip format requires that at least one distance code exists, + * and that at least one bit should be sent even if there is only one + * possible code. So to avoid special checks later on we force at least + * two codes of non zero frequency. + */ + while (s.heap_len < 2) { + node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0); + tree[node * 2]/*.Freq*/ = 1; + s.depth[node] = 0; + s.opt_len--; + + if (has_stree) { + s.static_len -= stree[node * 2 + 1]/*.Len*/; + } + /* node is 0 or 1 so it does not have extra bits */ + } + desc.max_code = max_code; + + /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, + * establish sub-heaps of increasing lengths: + */ + for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); } + + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + node = elems; /* next internal node of the tree */ + do { + //pqremove(s, tree, n); /* n = node of least frequency */ + /*** pqremove ***/ + n = s.heap[1/*SMALLEST*/]; + s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--]; + pqdownheap(s, tree, 1/*SMALLEST*/); + /***/ + + m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */ + + s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */ + s.heap[--s.heap_max] = m; + + /* Create a new node father of n and m */ + tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/; + s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1; + tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node; + + /* and insert the new node in the heap */ + s.heap[1/*SMALLEST*/] = node++; + pqdownheap(s, tree, 1/*SMALLEST*/); + + } while (s.heap_len >= 2); + + s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/]; + + /* At this point, the fields freq and dad are set. We can now + * generate the bit lengths. + */ + gen_bitlen(s, desc); + + /* The field len is now set, we can generate the bit codes */ + gen_codes(tree, max_code, s.bl_count); +}; + + +/* =========================================================================== + * Scan a literal or distance tree to determine the frequencies of the codes + * in the bit length tree. + */ +const scan_tree = (s, tree, max_code) => { +// deflate_state *s; +// ct_data *tree; /* the tree to be scanned */ +// int max_code; /* and its largest code of non zero frequency */ + + let n; /* iterates over all tree elements */ + let prevlen = -1; /* last emitted length */ + let curlen; /* length of current code */ + + let nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ + + let count = 0; /* repeat count of the current code */ + let max_count = 7; /* max repeat count */ + let min_count = 4; /* min repeat count */ + + if (nextlen === 0) { + max_count = 138; + min_count = 3; + } + tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */ + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; + + if (++count < max_count && curlen === nextlen) { + continue; + + } else if (count < min_count) { + s.bl_tree[curlen * 2]/*.Freq*/ += count; + + } else if (curlen !== 0) { + + if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; } + s.bl_tree[REP_3_6 * 2]/*.Freq*/++; + + } else if (count <= 10) { + s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++; + + } else { + s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++; + } + + count = 0; + prevlen = curlen; + + if (nextlen === 0) { + max_count = 138; + min_count = 3; + + } else if (curlen === nextlen) { + max_count = 6; + min_count = 3; + + } else { + max_count = 7; + min_count = 4; + } + } +}; + + +/* =========================================================================== + * Send a literal or distance tree in compressed form, using the codes in + * bl_tree. + */ +const send_tree = (s, tree, max_code) => { +// deflate_state *s; +// ct_data *tree; /* the tree to be scanned */ +// int max_code; /* and its largest code of non zero frequency */ + + let n; /* iterates over all tree elements */ + let prevlen = -1; /* last emitted length */ + let curlen; /* length of current code */ + + let nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */ + + let count = 0; /* repeat count of the current code */ + let max_count = 7; /* max repeat count */ + let min_count = 4; /* min repeat count */ + + /* tree[max_code+1].Len = -1; */ /* guard already set */ + if (nextlen === 0) { + max_count = 138; + min_count = 3; + } + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; + nextlen = tree[(n + 1) * 2 + 1]/*.Len*/; + + if (++count < max_count && curlen === nextlen) { + continue; + + } else if (count < min_count) { + do { send_code(s, curlen, s.bl_tree); } while (--count !== 0); + + } else if (curlen !== 0) { + if (curlen !== prevlen) { + send_code(s, curlen, s.bl_tree); + count--; + } + //Assert(count >= 3 && count <= 6, " 3_6?"); + send_code(s, REP_3_6, s.bl_tree); + send_bits(s, count - 3, 2); + + } else if (count <= 10) { + send_code(s, REPZ_3_10, s.bl_tree); + send_bits(s, count - 3, 3); + + } else { + send_code(s, REPZ_11_138, s.bl_tree); + send_bits(s, count - 11, 7); + } + + count = 0; + prevlen = curlen; + if (nextlen === 0) { + max_count = 138; + min_count = 3; + + } else if (curlen === nextlen) { + max_count = 6; + min_count = 3; + + } else { + max_count = 7; + min_count = 4; + } + } +}; + + +/* =========================================================================== + * Construct the Huffman tree for the bit lengths and return the index in + * bl_order of the last bit length code to send. + */ +const build_bl_tree = (s) => { + + let max_blindex; /* index of last bit length code of non zero freq */ + + /* Determine the bit length frequencies for literal and distance trees */ + scan_tree(s, s.dyn_ltree, s.l_desc.max_code); + scan_tree(s, s.dyn_dtree, s.d_desc.max_code); + + /* Build the bit length tree: */ + build_tree(s, s.bl_desc); + /* opt_len now includes the length of the tree representations, except + * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + */ + + /* Determine the number of bit length codes to send. The pkzip format + * requires that at least 4 bit length codes be sent. (appnote.txt says + * 3 but the actual value used is 4.) + */ + for (max_blindex = BL_CODES$1 - 1; max_blindex >= 3; max_blindex--) { + if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) { + break; + } + } + /* Update opt_len to include the bit length tree and counts */ + s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4; + //Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", + // s->opt_len, s->static_len)); + + return max_blindex; +}; + + +/* =========================================================================== + * Send the header for a block using dynamic Huffman trees: the counts, the + * lengths of the bit length codes, the literal tree and the distance tree. + * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + */ +const send_all_trees = (s, lcodes, dcodes, blcodes) => { +// deflate_state *s; +// int lcodes, dcodes, blcodes; /* number of codes for each tree */ + + let rank; /* index in bl_order */ + + //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); + //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, + // "too many codes"); + //Tracev((stderr, "\nbl counts: ")); + send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */ + send_bits(s, dcodes - 1, 5); + send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */ + for (rank = 0; rank < blcodes; rank++) { + //Tracev((stderr, "\nbl code %2d ", bl_order[rank])); + send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3); + } + //Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); + + send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */ + //Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); + + send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */ + //Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); +}; + + +/* =========================================================================== + * Check if the data type is TEXT or BINARY, using the following algorithm: + * - TEXT if the two conditions below are satisfied: + * a) There are no non-portable control characters belonging to the + * "block list" (0..6, 14..25, 28..31). + * b) There is at least one printable character belonging to the + * "allow list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). + * - BINARY otherwise. + * - The following partially-portable control characters form a + * "gray list" that is ignored in this detection algorithm: + * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). + * IN assertion: the fields Freq of dyn_ltree are set. + */ +const detect_data_type = (s) => { + /* block_mask is the bit mask of block-listed bytes + * set bits 0..6, 14..25, and 28..31 + * 0xf3ffc07f = binary 11110011111111111100000001111111 + */ + let block_mask = 0xf3ffc07f; + let n; + + /* Check for non-textual ("block-listed") bytes. */ + for (n = 0; n <= 31; n++, block_mask >>>= 1) { + if ((block_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) { + return Z_BINARY; + } + } + + /* Check for textual ("allow-listed") bytes. */ + if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 || + s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) { + return Z_TEXT; + } + for (n = 32; n < LITERALS$1; n++) { + if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) { + return Z_TEXT; + } + } + + /* There are no "block-listed" or "allow-listed" bytes: + * this stream either is empty or has tolerated ("gray-listed") bytes only. + */ + return Z_BINARY; +}; + + +let static_init_done = false; + +/* =========================================================================== + * Initialize the tree data structures for a new zlib stream. + */ +const _tr_init$1 = (s) => +{ + + if (!static_init_done) { + tr_static_init(); + static_init_done = true; + } + + s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc); + s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc); + s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc); + + s.bi_buf = 0; + s.bi_valid = 0; + + /* Initialize the first block of the first file: */ + init_block(s); +}; + + +/* =========================================================================== + * Send a stored block + */ +const _tr_stored_block$1 = (s, buf, stored_len, last) => { +//DeflateState *s; +//charf *buf; /* input block */ +//ulg stored_len; /* length of input block */ +//int last; /* one if this is the last block for a file */ + + send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */ + bi_windup(s); /* align on byte boundary */ + put_short(s, stored_len); + put_short(s, ~stored_len); + if (stored_len) { + s.pending_buf.set(s.window.subarray(buf, buf + stored_len), s.pending); + } + s.pending += stored_len; +}; + + +/* =========================================================================== + * Send one empty static block to give enough lookahead for inflate. + * This takes 10 bits, of which 7 may remain in the bit buffer. + */ +const _tr_align$1 = (s) => { + send_bits(s, STATIC_TREES << 1, 3); + send_code(s, END_BLOCK, static_ltree); + bi_flush(s); +}; + + +/* =========================================================================== + * Determine the best encoding for the current block: dynamic trees, static + * trees or store, and write out the encoded block. + */ +const _tr_flush_block$1 = (s, buf, stored_len, last) => { +//DeflateState *s; +//charf *buf; /* input block, or NULL if too old */ +//ulg stored_len; /* length of input block */ +//int last; /* one if this is the last block for a file */ + + let opt_lenb, static_lenb; /* opt_len and static_len in bytes */ + let max_blindex = 0; /* index of last bit length code of non zero freq */ + + /* Build the Huffman trees unless a stored block is forced */ + if (s.level > 0) { + + /* Check if the file is binary or text */ + if (s.strm.data_type === Z_UNKNOWN$1) { + s.strm.data_type = detect_data_type(s); + } + + /* Construct the literal and distance trees */ + build_tree(s, s.l_desc); + // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, + // s->static_len)); + + build_tree(s, s.d_desc); + // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, + // s->static_len)); + /* At this point, opt_len and static_len are the total bit lengths of + * the compressed block data, excluding the tree representations. + */ + + /* Build the bit length tree for the above two trees, and get the index + * in bl_order of the last bit length code to send. + */ + max_blindex = build_bl_tree(s); + + /* Determine the best encoding. Compute the block lengths in bytes. */ + opt_lenb = (s.opt_len + 3 + 7) >>> 3; + static_lenb = (s.static_len + 3 + 7) >>> 3; + + // Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, + // s->sym_next / 3)); + + if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; } + + } else { + // Assert(buf != (char*)0, "lost buf"); + opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ + } + + if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) { + /* 4: two words for the lengths */ + + /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + * Otherwise we can't have processed more than WSIZE input bytes since + * the last block flush, because compression would have been + * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + * transform a block into a stored block. + */ + _tr_stored_block$1(s, buf, stored_len, last); + + } else if (s.strategy === Z_FIXED$1 || static_lenb === opt_lenb) { + + send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3); + compress_block(s, static_ltree, static_dtree); + + } else { + send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3); + send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1); + compress_block(s, s.dyn_ltree, s.dyn_dtree); + } + // Assert (s->compressed_len == s->bits_sent, "bad compressed size"); + /* The above check is made mod 2^32, for files larger than 512 MB + * and uLong implemented on 32 bits. + */ + init_block(s); + + if (last) { + bi_windup(s); + } + // Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, + // s->compressed_len-7*last)); +}; + +/* =========================================================================== + * Save the match info and tally the frequency counts. Return true if + * the current block must be flushed. + */ +const _tr_tally$1 = (s, dist, lc) => { +// deflate_state *s; +// unsigned dist; /* distance of matched string */ +// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ + + s.pending_buf[s.sym_buf + s.sym_next++] = dist; + s.pending_buf[s.sym_buf + s.sym_next++] = dist >> 8; + s.pending_buf[s.sym_buf + s.sym_next++] = lc; + if (dist === 0) { + /* lc is the unmatched char */ + s.dyn_ltree[lc * 2]/*.Freq*/++; + } else { + s.matches++; + /* Here, lc is the match length - MIN_MATCH */ + dist--; /* dist = match distance - 1 */ + //Assert((ush)dist < (ush)MAX_DIST(s) && + // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && + // (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); + + s.dyn_ltree[(_length_code[lc] + LITERALS$1 + 1) * 2]/*.Freq*/++; + s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++; + } + + return (s.sym_next === s.sym_end); +}; + +var _tr_init_1 = _tr_init$1; +var _tr_stored_block_1 = _tr_stored_block$1; +var _tr_flush_block_1 = _tr_flush_block$1; +var _tr_tally_1 = _tr_tally$1; +var _tr_align_1 = _tr_align$1; + +var trees = { + _tr_init: _tr_init_1, + _tr_stored_block: _tr_stored_block_1, + _tr_flush_block: _tr_flush_block_1, + _tr_tally: _tr_tally_1, + _tr_align: _tr_align_1 +}; + +// Note: adler32 takes 12% for level 0 and 2% for level 6. +// It isn't worth it to make additional optimizations as in original. +// Small size is preferable. + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +const adler32 = (adler, buf, len, pos) => { + let s1 = (adler & 0xffff) |0, + s2 = ((adler >>> 16) & 0xffff) |0, + n = 0; + + while (len !== 0) { + // Set limit ~ twice less than 5552, to keep + // s2 in 31-bits, because we force signed ints. + // in other case %= will fail. + n = len > 2000 ? 2000 : len; + len -= n; + + do { + s1 = (s1 + buf[pos++]) |0; + s2 = (s2 + s1) |0; + } while (--n); + + s1 %= 65521; + s2 %= 65521; + } + + return (s1 | (s2 << 16)) |0; +}; + + +var adler32_1 = adler32; + +// Note: we can't get significant speed boost here. +// So write code to minimize size - no pregenerated tables +// and array tools dependencies. + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +// Use ordinary array, since untyped makes no boost here +const makeTable = () => { + let c, table = []; + + for (var n = 0; n < 256; n++) { + c = n; + for (var k = 0; k < 8; k++) { + c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1)); + } + table[n] = c; + } + + return table; +}; + +// Create table on load. Just 255 signed longs. Not a problem. +const crcTable = new Uint32Array(makeTable()); + + +const crc32 = (crc, buf, len, pos) => { + const t = crcTable; + const end = pos + len; + + crc ^= -1; + + for (let i = pos; i < end; i++) { + crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF]; + } + + return (crc ^ (-1)); // >>> 0; +}; + + +var crc32_1 = crc32; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +var messages = { + 2: 'need dictionary', /* Z_NEED_DICT 2 */ + 1: 'stream end', /* Z_STREAM_END 1 */ + 0: '', /* Z_OK 0 */ + '-1': 'file error', /* Z_ERRNO (-1) */ + '-2': 'stream error', /* Z_STREAM_ERROR (-2) */ + '-3': 'data error', /* Z_DATA_ERROR (-3) */ + '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */ + '-5': 'buffer error', /* Z_BUF_ERROR (-5) */ + '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */ +}; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +var constants$2 = { + + /* Allowed flush values; see deflate() and inflate() below for details */ + Z_NO_FLUSH: 0, + Z_PARTIAL_FLUSH: 1, + Z_SYNC_FLUSH: 2, + Z_FULL_FLUSH: 3, + Z_FINISH: 4, + Z_BLOCK: 5, + Z_TREES: 6, + + /* Return codes for the compression/decompression functions. Negative values + * are errors, positive values are used for special but normal events. + */ + Z_OK: 0, + Z_STREAM_END: 1, + Z_NEED_DICT: 2, + Z_ERRNO: -1, + Z_STREAM_ERROR: -2, + Z_DATA_ERROR: -3, + Z_MEM_ERROR: -4, + Z_BUF_ERROR: -5, + //Z_VERSION_ERROR: -6, + + /* compression levels */ + Z_NO_COMPRESSION: 0, + Z_BEST_SPEED: 1, + Z_BEST_COMPRESSION: 9, + Z_DEFAULT_COMPRESSION: -1, + + + Z_FILTERED: 1, + Z_HUFFMAN_ONLY: 2, + Z_RLE: 3, + Z_FIXED: 4, + Z_DEFAULT_STRATEGY: 0, + + /* Possible values of the data_type field (though see inflate()) */ + Z_BINARY: 0, + Z_TEXT: 1, + //Z_ASCII: 1, // = Z_TEXT (deprecated) + Z_UNKNOWN: 2, + + /* The deflate compression method */ + Z_DEFLATED: 8 + //Z_NULL: null // Use -1 or null inline, depending on var type +}; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +const { _tr_init, _tr_stored_block, _tr_flush_block, _tr_tally, _tr_align } = trees; + + + + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + +const { + Z_NO_FLUSH: Z_NO_FLUSH$2, Z_PARTIAL_FLUSH, Z_FULL_FLUSH: Z_FULL_FLUSH$1, Z_FINISH: Z_FINISH$3, Z_BLOCK: Z_BLOCK$1, + Z_OK: Z_OK$3, Z_STREAM_END: Z_STREAM_END$3, Z_STREAM_ERROR: Z_STREAM_ERROR$2, Z_DATA_ERROR: Z_DATA_ERROR$2, Z_BUF_ERROR: Z_BUF_ERROR$1, + Z_DEFAULT_COMPRESSION: Z_DEFAULT_COMPRESSION$1, + Z_FILTERED, Z_HUFFMAN_ONLY, Z_RLE, Z_FIXED, Z_DEFAULT_STRATEGY: Z_DEFAULT_STRATEGY$1, + Z_UNKNOWN, + Z_DEFLATED: Z_DEFLATED$2 +} = constants$2; + +/*============================================================================*/ + + +const MAX_MEM_LEVEL = 9; +/* Maximum value for memLevel in deflateInit2 */ +const MAX_WBITS$1 = 15; +/* 32K LZ77 window */ +const DEF_MEM_LEVEL = 8; + + +const LENGTH_CODES = 29; +/* number of length codes, not counting the special END_BLOCK code */ +const LITERALS = 256; +/* number of literal bytes 0..255 */ +const L_CODES = LITERALS + 1 + LENGTH_CODES; +/* number of Literal or Length codes, including the END_BLOCK code */ +const D_CODES = 30; +/* number of distance codes */ +const BL_CODES = 19; +/* number of codes used to transfer the bit lengths */ +const HEAP_SIZE = 2 * L_CODES + 1; +/* maximum heap size */ +const MAX_BITS = 15; +/* All codes must not exceed MAX_BITS bits */ + +const MIN_MATCH = 3; +const MAX_MATCH = 258; +const MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1); + +const PRESET_DICT = 0x20; + +const INIT_STATE = 42; /* zlib header -> BUSY_STATE */ +//#ifdef GZIP +const GZIP_STATE = 57; /* gzip header -> BUSY_STATE | EXTRA_STATE */ +//#endif +const EXTRA_STATE = 69; /* gzip extra block -> NAME_STATE */ +const NAME_STATE = 73; /* gzip file name -> COMMENT_STATE */ +const COMMENT_STATE = 91; /* gzip comment -> HCRC_STATE */ +const HCRC_STATE = 103; /* gzip header CRC -> BUSY_STATE */ +const BUSY_STATE = 113; /* deflate -> FINISH_STATE */ +const FINISH_STATE = 666; /* stream complete */ + +const BS_NEED_MORE = 1; /* block not completed, need more input or more output */ +const BS_BLOCK_DONE = 2; /* block flush performed */ +const BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */ +const BS_FINISH_DONE = 4; /* finish done, accept no more input or output */ + +const OS_CODE = 0x03; // Unix :) . Don't detect, use this default. + +const err = (strm, errorCode) => { + strm.msg = messages[errorCode]; + return errorCode; +}; + +const rank = (f) => { + return ((f) * 2) - ((f) > 4 ? 9 : 0); +}; + +const zero = (buf) => { + let len = buf.length; while (--len >= 0) { buf[len] = 0; } +}; + +/* =========================================================================== + * Slide the hash table when sliding the window down (could be avoided with 32 + * bit values at the expense of memory usage). We slide even when level == 0 to + * keep the hash table consistent if we switch back to level > 0 later. + */ +const slide_hash = (s) => { + let n, m; + let p; + let wsize = s.w_size; + + n = s.hash_size; + p = n; + do { + m = s.head[--p]; + s.head[p] = (m >= wsize ? m - wsize : 0); + } while (--n); + n = wsize; +//#ifndef FASTEST + p = n; + do { + m = s.prev[--p]; + s.prev[p] = (m >= wsize ? m - wsize : 0); + /* If n is not on any hash chain, prev[n] is garbage but + * its value will never be used. + */ + } while (--n); +//#endif +}; + +/* eslint-disable new-cap */ +let HASH_ZLIB = (s, prev, data) => ((prev << s.hash_shift) ^ data) & s.hash_mask; +// This hash causes less collisions, https://github.com/nodeca/pako/issues/135 +// But breaks binary compatibility +//let HASH_FAST = (s, prev, data) => ((prev << 8) + (prev >> 8) + (data << 4)) & s.hash_mask; +let HASH = HASH_ZLIB; + + +/* ========================================================================= + * Flush as much pending output as possible. All deflate() output, except for + * some deflate_stored() output, goes through this function so some + * applications may wish to modify it to avoid allocating a large + * strm->next_out buffer and copying into it. (See also read_buf()). + */ +const flush_pending = (strm) => { + const s = strm.state; + + //_tr_flush_bits(s); + let len = s.pending; + if (len > strm.avail_out) { + len = strm.avail_out; + } + if (len === 0) { return; } + + strm.output.set(s.pending_buf.subarray(s.pending_out, s.pending_out + len), strm.next_out); + strm.next_out += len; + s.pending_out += len; + strm.total_out += len; + strm.avail_out -= len; + s.pending -= len; + if (s.pending === 0) { + s.pending_out = 0; + } +}; + + +const flush_block_only = (s, last) => { + _tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last); + s.block_start = s.strstart; + flush_pending(s.strm); +}; + + +const put_byte = (s, b) => { + s.pending_buf[s.pending++] = b; +}; + + +/* ========================================================================= + * Put a short in the pending buffer. The 16-bit value is put in MSB order. + * IN assertion: the stream state is correct and there is enough room in + * pending_buf. + */ +const putShortMSB = (s, b) => { + + // put_byte(s, (Byte)(b >> 8)); +// put_byte(s, (Byte)(b & 0xff)); + s.pending_buf[s.pending++] = (b >>> 8) & 0xff; + s.pending_buf[s.pending++] = b & 0xff; +}; + + +/* =========================================================================== + * Read a new buffer from the current input stream, update the adler32 + * and total number of bytes read. All deflate() input goes through + * this function so some applications may wish to modify it to avoid + * allocating a large strm->input buffer and copying from it. + * (See also flush_pending()). + */ +const read_buf = (strm, buf, start, size) => { + + let len = strm.avail_in; + + if (len > size) { len = size; } + if (len === 0) { return 0; } + + strm.avail_in -= len; + + // zmemcpy(buf, strm->next_in, len); + buf.set(strm.input.subarray(strm.next_in, strm.next_in + len), start); + if (strm.state.wrap === 1) { + strm.adler = adler32_1(strm.adler, buf, len, start); + } + + else if (strm.state.wrap === 2) { + strm.adler = crc32_1(strm.adler, buf, len, start); + } + + strm.next_in += len; + strm.total_in += len; + + return len; +}; + + +/* =========================================================================== + * Set match_start to the longest match starting at the given string and + * return its length. Matches shorter or equal to prev_length are discarded, + * in which case the result is equal to prev_length and match_start is + * garbage. + * IN assertions: cur_match is the head of the hash chain for the current + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 + * OUT assertion: the match length is not greater than s->lookahead. + */ +const longest_match = (s, cur_match) => { + + let chain_length = s.max_chain_length; /* max hash chain length */ + let scan = s.strstart; /* current string */ + let match; /* matched string */ + let len; /* length of current match */ + let best_len = s.prev_length; /* best match length so far */ + let nice_match = s.nice_match; /* stop if match long enough */ + const limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ? + s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/; + + const _win = s.window; // shortcut + + const wmask = s.w_mask; + const prev = s.prev; + + /* Stop when cur_match becomes <= limit. To simplify the code, + * we prevent matches with the string of window index 0. + */ + + const strend = s.strstart + MAX_MATCH; + let scan_end1 = _win[scan + best_len - 1]; + let scan_end = _win[scan + best_len]; + + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + + /* Do not waste too much time if we already have a good match: */ + if (s.prev_length >= s.good_match) { + chain_length >>= 2; + } + /* Do not look for matches beyond the end of the input. This is necessary + * to make deflate deterministic. + */ + if (nice_match > s.lookahead) { nice_match = s.lookahead; } + + // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + + do { + // Assert(cur_match < s->strstart, "no future"); + match = cur_match; + + /* Skip to next match if the match length cannot increase + * or if the match length is less than 2. Note that the checks below + * for insufficient lookahead only occur occasionally for performance + * reasons. Therefore uninitialized memory will be accessed, and + * conditional jumps will be made that depend on those values. + * However the length of the match is limited to the lookahead, so + * the output of deflate is not affected by the uninitialized values. + */ + + if (_win[match + best_len] !== scan_end || + _win[match + best_len - 1] !== scan_end1 || + _win[match] !== _win[scan] || + _win[++match] !== _win[scan + 1]) { + continue; + } + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2; + match++; + // Assert(*scan == *match, "match[2]?"); + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + /*jshint noempty:false*/ + } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + _win[++scan] === _win[++match] && _win[++scan] === _win[++match] && + scan < strend); + + // Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + + len = MAX_MATCH - (strend - scan); + scan = strend - MAX_MATCH; + + if (len > best_len) { + s.match_start = cur_match; + best_len = len; + if (len >= nice_match) { + break; + } + scan_end1 = _win[scan + best_len - 1]; + scan_end = _win[scan + best_len]; + } + } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0); + + if (best_len <= s.lookahead) { + return best_len; + } + return s.lookahead; +}; + + +/* =========================================================================== + * Fill the window when the lookahead becomes insufficient. + * Updates strstart and lookahead. + * + * IN assertion: lookahead < MIN_LOOKAHEAD + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD + * At least one byte has been read, or avail_in == 0; reads are + * performed for at least two bytes (required for the zip translate_eol + * option -- not supported here). + */ +const fill_window = (s) => { + + const _w_size = s.w_size; + let n, more, str; + + //Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); + + do { + more = s.window_size - s.lookahead - s.strstart; + + // JS ints have 32 bit, block below not needed + /* Deal with !@#$% 64K limit: */ + //if (sizeof(int) <= 2) { + // if (more == 0 && s->strstart == 0 && s->lookahead == 0) { + // more = wsize; + // + // } else if (more == (unsigned)(-1)) { + // /* Very unlikely, but possible on 16 bit machine if + // * strstart == 0 && lookahead == 1 (input done a byte at time) + // */ + // more--; + // } + //} + + + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) { + + s.window.set(s.window.subarray(_w_size, _w_size + _w_size - more), 0); + s.match_start -= _w_size; + s.strstart -= _w_size; + /* we now have strstart >= MAX_DIST */ + s.block_start -= _w_size; + if (s.insert > s.strstart) { + s.insert = s.strstart; + } + slide_hash(s); + more += _w_size; + } + if (s.strm.avail_in === 0) { + break; + } + + /* If there was no sliding: + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && + * more == window_size - lookahead - strstart + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) + * => more >= window_size - 2*WSIZE + 2 + * In the BIG_MEM or MMAP case (not yet supported), + * window_size == input_size + MIN_LOOKAHEAD && + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. + * Otherwise, window_size == 2*WSIZE so more >= 2. + * If there was sliding, more >= WSIZE. So in all cases, more >= 2. + */ + //Assert(more >= 2, "more < 2"); + n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more); + s.lookahead += n; + + /* Initialize the hash value now that we have some input: */ + if (s.lookahead + s.insert >= MIN_MATCH) { + str = s.strstart - s.insert; + s.ins_h = s.window[str]; + + /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */ + s.ins_h = HASH(s, s.ins_h, s.window[str + 1]); +//#if MIN_MATCH != 3 +// Call update_hash() MIN_MATCH-3 more times +//#endif + while (s.insert) { + /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ + s.ins_h = HASH(s, s.ins_h, s.window[str + MIN_MATCH - 1]); + + s.prev[str & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = str; + str++; + s.insert--; + if (s.lookahead + s.insert < MIN_MATCH) { + break; + } + } + } + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, + * but this is not important since only literal bytes will be emitted. + */ + + } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0); + + /* If the WIN_INIT bytes after the end of the current data have never been + * written, then zero those bytes in order to avoid memory check reports of + * the use of uninitialized (or uninitialised as Julian writes) bytes by + * the longest match routines. Update the high water mark for the next + * time through here. WIN_INIT is set to MAX_MATCH since the longest match + * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. + */ +// if (s.high_water < s.window_size) { +// const curr = s.strstart + s.lookahead; +// let init = 0; +// +// if (s.high_water < curr) { +// /* Previous high water mark below current data -- zero WIN_INIT +// * bytes or up to end of window, whichever is less. +// */ +// init = s.window_size - curr; +// if (init > WIN_INIT) +// init = WIN_INIT; +// zmemzero(s->window + curr, (unsigned)init); +// s->high_water = curr + init; +// } +// else if (s->high_water < (ulg)curr + WIN_INIT) { +// /* High water mark at or above current data, but below current data +// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up +// * to end of window, whichever is less. +// */ +// init = (ulg)curr + WIN_INIT - s->high_water; +// if (init > s->window_size - s->high_water) +// init = s->window_size - s->high_water; +// zmemzero(s->window + s->high_water, (unsigned)init); +// s->high_water += init; +// } +// } +// +// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, +// "not enough room for search"); +}; + +/* =========================================================================== + * Copy without compression as much as possible from the input stream, return + * the current block state. + * + * In case deflateParams() is used to later switch to a non-zero compression + * level, s->matches (otherwise unused when storing) keeps track of the number + * of hash table slides to perform. If s->matches is 1, then one hash table + * slide will be done when switching. If s->matches is 2, the maximum value + * allowed here, then the hash table will be cleared, since two or more slides + * is the same as a clear. + * + * deflate_stored() is written to minimize the number of times an input byte is + * copied. It is most efficient with large input and output buffers, which + * maximizes the opportunites to have a single copy from next_in to next_out. + */ +const deflate_stored = (s, flush) => { + + /* Smallest worthy block size when not flushing or finishing. By default + * this is 32K. This can be as small as 507 bytes for memLevel == 1. For + * large input and output buffers, the stored block size will be larger. + */ + let min_block = s.pending_buf_size - 5 > s.w_size ? s.w_size : s.pending_buf_size - 5; + + /* Copy as many min_block or larger stored blocks directly to next_out as + * possible. If flushing, copy the remaining available input to next_out as + * stored blocks, if there is enough space. + */ + let len, left, have, last = 0; + let used = s.strm.avail_in; + do { + /* Set len to the maximum size block that we can copy directly with the + * available input data and output space. Set left to how much of that + * would be copied from what's left in the window. + */ + len = 65535/* MAX_STORED */; /* maximum deflate stored block length */ + have = (s.bi_valid + 42) >> 3; /* number of header bytes */ + if (s.strm.avail_out < have) { /* need room for header */ + break; + } + /* maximum stored block length that will fit in avail_out: */ + have = s.strm.avail_out - have; + left = s.strstart - s.block_start; /* bytes left in window */ + if (len > left + s.strm.avail_in) { + len = left + s.strm.avail_in; /* limit len to the input */ + } + if (len > have) { + len = have; /* limit len to the output */ + } + + /* If the stored block would be less than min_block in length, or if + * unable to copy all of the available input when flushing, then try + * copying to the window and the pending buffer instead. Also don't + * write an empty block when flushing -- deflate() does that. + */ + if (len < min_block && ((len === 0 && flush !== Z_FINISH$3) || + flush === Z_NO_FLUSH$2 || + len !== left + s.strm.avail_in)) { + break; + } + + /* Make a dummy stored block in pending to get the header bytes, + * including any pending bits. This also updates the debugging counts. + */ + last = flush === Z_FINISH$3 && len === left + s.strm.avail_in ? 1 : 0; + _tr_stored_block(s, 0, 0, last); + + /* Replace the lengths in the dummy stored block with len. */ + s.pending_buf[s.pending - 4] = len; + s.pending_buf[s.pending - 3] = len >> 8; + s.pending_buf[s.pending - 2] = ~len; + s.pending_buf[s.pending - 1] = ~len >> 8; + + /* Write the stored block header bytes. */ + flush_pending(s.strm); + +//#ifdef ZLIB_DEBUG +// /* Update debugging counts for the data about to be copied. */ +// s->compressed_len += len << 3; +// s->bits_sent += len << 3; +//#endif + + /* Copy uncompressed bytes from the window to next_out. */ + if (left) { + if (left > len) { + left = len; + } + //zmemcpy(s->strm->next_out, s->window + s->block_start, left); + s.strm.output.set(s.window.subarray(s.block_start, s.block_start + left), s.strm.next_out); + s.strm.next_out += left; + s.strm.avail_out -= left; + s.strm.total_out += left; + s.block_start += left; + len -= left; + } + + /* Copy uncompressed bytes directly from next_in to next_out, updating + * the check value. + */ + if (len) { + read_buf(s.strm, s.strm.output, s.strm.next_out, len); + s.strm.next_out += len; + s.strm.avail_out -= len; + s.strm.total_out += len; + } + } while (last === 0); + + /* Update the sliding window with the last s->w_size bytes of the copied + * data, or append all of the copied data to the existing window if less + * than s->w_size bytes were copied. Also update the number of bytes to + * insert in the hash tables, in the event that deflateParams() switches to + * a non-zero compression level. + */ + used -= s.strm.avail_in; /* number of input bytes directly copied */ + if (used) { + /* If any input was used, then no unused input remains in the window, + * therefore s->block_start == s->strstart. + */ + if (used >= s.w_size) { /* supplant the previous history */ + s.matches = 2; /* clear hash */ + //zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size); + s.window.set(s.strm.input.subarray(s.strm.next_in - s.w_size, s.strm.next_in), 0); + s.strstart = s.w_size; + s.insert = s.strstart; + } + else { + if (s.window_size - s.strstart <= used) { + /* Slide the window down. */ + s.strstart -= s.w_size; + //zmemcpy(s->window, s->window + s->w_size, s->strstart); + s.window.set(s.window.subarray(s.w_size, s.w_size + s.strstart), 0); + if (s.matches < 2) { + s.matches++; /* add a pending slide_hash() */ + } + if (s.insert > s.strstart) { + s.insert = s.strstart; + } + } + //zmemcpy(s->window + s->strstart, s->strm->next_in - used, used); + s.window.set(s.strm.input.subarray(s.strm.next_in - used, s.strm.next_in), s.strstart); + s.strstart += used; + s.insert += used > s.w_size - s.insert ? s.w_size - s.insert : used; + } + s.block_start = s.strstart; + } + if (s.high_water < s.strstart) { + s.high_water = s.strstart; + } + + /* If the last block was written to next_out, then done. */ + if (last) { + return BS_FINISH_DONE; + } + + /* If flushing and all input has been consumed, then done. */ + if (flush !== Z_NO_FLUSH$2 && flush !== Z_FINISH$3 && + s.strm.avail_in === 0 && s.strstart === s.block_start) { + return BS_BLOCK_DONE; + } + + /* Fill the window with any remaining input. */ + have = s.window_size - s.strstart; + if (s.strm.avail_in > have && s.block_start >= s.w_size) { + /* Slide the window down. */ + s.block_start -= s.w_size; + s.strstart -= s.w_size; + //zmemcpy(s->window, s->window + s->w_size, s->strstart); + s.window.set(s.window.subarray(s.w_size, s.w_size + s.strstart), 0); + if (s.matches < 2) { + s.matches++; /* add a pending slide_hash() */ + } + have += s.w_size; /* more space now */ + if (s.insert > s.strstart) { + s.insert = s.strstart; + } + } + if (have > s.strm.avail_in) { + have = s.strm.avail_in; + } + if (have) { + read_buf(s.strm, s.window, s.strstart, have); + s.strstart += have; + s.insert += have > s.w_size - s.insert ? s.w_size - s.insert : have; + } + if (s.high_water < s.strstart) { + s.high_water = s.strstart; + } + + /* There was not enough avail_out to write a complete worthy or flushed + * stored block to next_out. Write a stored block to pending instead, if we + * have enough input for a worthy block, or if flushing and there is enough + * room for the remaining input as a stored block in the pending buffer. + */ + have = (s.bi_valid + 42) >> 3; /* number of header bytes */ + /* maximum stored block length that will fit in pending: */ + have = s.pending_buf_size - have > 65535/* MAX_STORED */ ? 65535/* MAX_STORED */ : s.pending_buf_size - have; + min_block = have > s.w_size ? s.w_size : have; + left = s.strstart - s.block_start; + if (left >= min_block || + ((left || flush === Z_FINISH$3) && flush !== Z_NO_FLUSH$2 && + s.strm.avail_in === 0 && left <= have)) { + len = left > have ? have : left; + last = flush === Z_FINISH$3 && s.strm.avail_in === 0 && + len === left ? 1 : 0; + _tr_stored_block(s, s.block_start, len, last); + s.block_start += len; + flush_pending(s.strm); + } + + /* We've done all we can with the available input and output. */ + return last ? BS_FINISH_STARTED : BS_NEED_MORE; +}; + + +/* =========================================================================== + * Compress as much as possible from the input stream, return the current + * block state. + * This function does not perform lazy evaluation of matches and inserts + * new strings in the dictionary only for unmatched strings or for short + * matches. It is used only for the fast compression options. + */ +const deflate_fast = (s, flush) => { + + let hash_head; /* head of the hash chain */ + let bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s.lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH$2) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { + break; /* flush the current block */ + } + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = 0/*NIL*/; + if (s.lookahead >= MIN_MATCH) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]); + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } + + /* Find the longest match, discarding those <= prev_length. + * At this point we have always match_length < MIN_MATCH + */ + if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s.match_length = longest_match(s, hash_head); + /* longest_match() sets match_start */ + } + if (s.match_length >= MIN_MATCH) { + // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only + + /*** _tr_tally_dist(s, s.strstart - s.match_start, + s.match_length - MIN_MATCH, bflush); ***/ + bflush = _tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH); + + s.lookahead -= s.match_length; + + /* Insert new strings in the hash table only if the match length + * is not too large. This saves time but degrades compression. + */ + if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) { + s.match_length--; /* string at strstart already in table */ + do { + s.strstart++; + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]); + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. + */ + } while (--s.match_length !== 0); + s.strstart++; + } else + { + s.strstart += s.match_length; + s.match_length = 0; + s.ins_h = s.window[s.strstart]; + /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */ + s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + 1]); + +//#if MIN_MATCH != 3 +// Call UPDATE_HASH() MIN_MATCH-3 more times +//#endif + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not + * matter since it will be recomputed at next deflate call. + */ + } + } else { + /* No match, output a literal byte */ + //Tracevv((stderr,"%c", s.window[s.strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = _tr_tally(s, 0, s.window[s.strstart]); + + s.lookahead--; + s.strstart++; + } + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1); + if (flush === Z_FINISH$3) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.sym_next) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +}; + +/* =========================================================================== + * Same as above, but achieves better compression. We use a lazy + * evaluation for matches: a match is finally adopted only if there is + * no better match at the next window position. + */ +const deflate_slow = (s, flush) => { + + let hash_head; /* head of hash chain */ + let bflush; /* set if current block must be flushed */ + + let max_insert; + + /* Process the input block. */ + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s.lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH$2) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { break; } /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + hash_head = 0/*NIL*/; + if (s.lookahead >= MIN_MATCH) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]); + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } + + /* Find the longest match, discarding those <= prev_length. + */ + s.prev_length = s.match_length; + s.prev_match = s.match_start; + s.match_length = MIN_MATCH - 1; + + if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match && + s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + s.match_length = longest_match(s, hash_head); + /* longest_match() sets match_start */ + + if (s.match_length <= 5 && + (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) { + + /* If prev_match is also MIN_MATCH, match_start is garbage + * but we will ignore the current match anyway. + */ + s.match_length = MIN_MATCH - 1; + } + } + /* If there was a match at the previous step and the current + * match is not better, output the previous match: + */ + if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) { + max_insert = s.strstart + s.lookahead - MIN_MATCH; + /* Do not insert strings in hash table beyond this. */ + + //check_match(s, s.strstart-1, s.prev_match, s.prev_length); + + /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match, + s.prev_length - MIN_MATCH, bflush);***/ + bflush = _tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH); + /* Insert in hash table all strings up to the end of the match. + * strstart-1 and strstart are already inserted. If there is not + * enough lookahead, the last two strings are not inserted in + * the hash table. + */ + s.lookahead -= s.prev_length - 1; + s.prev_length -= 2; + do { + if (++s.strstart <= max_insert) { + /*** INSERT_STRING(s, s.strstart, hash_head); ***/ + s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]); + hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h]; + s.head[s.ins_h] = s.strstart; + /***/ + } + } while (--s.prev_length !== 0); + s.match_available = 0; + s.match_length = MIN_MATCH - 1; + s.strstart++; + + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + + } else if (s.match_available) { + /* If there was no match at the previous position, output a + * single literal. If there was a match but the current match + * is longer, truncate the previous match to a single literal. + */ + //Tracevv((stderr,"%c", s->window[s->strstart-1])); + /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ + bflush = _tr_tally(s, 0, s.window[s.strstart - 1]); + + if (bflush) { + /*** FLUSH_BLOCK_ONLY(s, 0) ***/ + flush_block_only(s, false); + /***/ + } + s.strstart++; + s.lookahead--; + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + } else { + /* There is no previous match to compare with, wait for + * the next step to decide. + */ + s.match_available = 1; + s.strstart++; + s.lookahead--; + } + } + //Assert (flush != Z_NO_FLUSH, "no flush?"); + if (s.match_available) { + //Tracevv((stderr,"%c", s->window[s->strstart-1])); + /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/ + bflush = _tr_tally(s, 0, s.window[s.strstart - 1]); + + s.match_available = 0; + } + s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1; + if (flush === Z_FINISH$3) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.sym_next) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + + return BS_BLOCK_DONE; +}; + + +/* =========================================================================== + * For Z_RLE, simply look for runs of bytes, generate matches only of distance + * one. Do not maintain a hash table. (It will be regenerated if this run of + * deflate switches away from Z_RLE.) + */ +const deflate_rle = (s, flush) => { + + let bflush; /* set if current block must be flushed */ + let prev; /* byte at distance one to match */ + let scan, strend; /* scan goes up to strend for length of run */ + + const _win = s.window; + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the longest run, plus one for the unrolled loop. + */ + if (s.lookahead <= MAX_MATCH) { + fill_window(s); + if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH$2) { + return BS_NEED_MORE; + } + if (s.lookahead === 0) { break; } /* flush the current block */ + } + + /* See how many times the previous byte repeats */ + s.match_length = 0; + if (s.lookahead >= MIN_MATCH && s.strstart > 0) { + scan = s.strstart - 1; + prev = _win[scan]; + if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) { + strend = s.strstart + MAX_MATCH; + do { + /*jshint noempty:false*/ + } while (prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + prev === _win[++scan] && prev === _win[++scan] && + scan < strend); + s.match_length = MAX_MATCH - (strend - scan); + if (s.match_length > s.lookahead) { + s.match_length = s.lookahead; + } + } + //Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); + } + + /* Emit match if have run of MIN_MATCH or longer, else emit literal */ + if (s.match_length >= MIN_MATCH) { + //check_match(s, s.strstart, s.strstart - 1, s.match_length); + + /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/ + bflush = _tr_tally(s, 1, s.match_length - MIN_MATCH); + + s.lookahead -= s.match_length; + s.strstart += s.match_length; + s.match_length = 0; + } else { + /* No match, output a literal byte */ + //Tracevv((stderr,"%c", s->window[s->strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = _tr_tally(s, 0, s.window[s.strstart]); + + s.lookahead--; + s.strstart++; + } + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = 0; + if (flush === Z_FINISH$3) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.sym_next) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +}; + +/* =========================================================================== + * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. + * (It will be regenerated if this run of deflate switches away from Huffman.) + */ +const deflate_huff = (s, flush) => { + + let bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we have a literal to write. */ + if (s.lookahead === 0) { + fill_window(s); + if (s.lookahead === 0) { + if (flush === Z_NO_FLUSH$2) { + return BS_NEED_MORE; + } + break; /* flush the current block */ + } + } + + /* Output a literal byte */ + s.match_length = 0; + //Tracevv((stderr,"%c", s->window[s->strstart])); + /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/ + bflush = _tr_tally(s, 0, s.window[s.strstart]); + s.lookahead--; + s.strstart++; + if (bflush) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + } + s.insert = 0; + if (flush === Z_FINISH$3) { + /*** FLUSH_BLOCK(s, 1); ***/ + flush_block_only(s, true); + if (s.strm.avail_out === 0) { + return BS_FINISH_STARTED; + } + /***/ + return BS_FINISH_DONE; + } + if (s.sym_next) { + /*** FLUSH_BLOCK(s, 0); ***/ + flush_block_only(s, false); + if (s.strm.avail_out === 0) { + return BS_NEED_MORE; + } + /***/ + } + return BS_BLOCK_DONE; +}; + +/* Values for max_lazy_match, good_match and max_chain_length, depending on + * the desired pack level (0..9). The values given below have been tuned to + * exclude worst case performance for pathological files. Better values may be + * found for specific files. + */ +function Config(good_length, max_lazy, nice_length, max_chain, func) { + + this.good_length = good_length; + this.max_lazy = max_lazy; + this.nice_length = nice_length; + this.max_chain = max_chain; + this.func = func; +} + +const configuration_table = [ + /* good lazy nice chain */ + new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */ + new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */ + new Config(4, 5, 16, 8, deflate_fast), /* 2 */ + new Config(4, 6, 32, 32, deflate_fast), /* 3 */ + + new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */ + new Config(8, 16, 32, 32, deflate_slow), /* 5 */ + new Config(8, 16, 128, 128, deflate_slow), /* 6 */ + new Config(8, 32, 128, 256, deflate_slow), /* 7 */ + new Config(32, 128, 258, 1024, deflate_slow), /* 8 */ + new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */ +]; + + +/* =========================================================================== + * Initialize the "longest match" routines for a new zlib stream + */ +const lm_init = (s) => { + + s.window_size = 2 * s.w_size; + + /*** CLEAR_HASH(s); ***/ + zero(s.head); // Fill with NIL (= 0); + + /* Set the default configuration parameters: + */ + s.max_lazy_match = configuration_table[s.level].max_lazy; + s.good_match = configuration_table[s.level].good_length; + s.nice_match = configuration_table[s.level].nice_length; + s.max_chain_length = configuration_table[s.level].max_chain; + + s.strstart = 0; + s.block_start = 0; + s.lookahead = 0; + s.insert = 0; + s.match_length = s.prev_length = MIN_MATCH - 1; + s.match_available = 0; + s.ins_h = 0; +}; + + +function DeflateState() { + this.strm = null; /* pointer back to this zlib stream */ + this.status = 0; /* as the name implies */ + this.pending_buf = null; /* output still pending */ + this.pending_buf_size = 0; /* size of pending_buf */ + this.pending_out = 0; /* next pending byte to output to the stream */ + this.pending = 0; /* nb of bytes in the pending buffer */ + this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */ + this.gzhead = null; /* gzip header information to write */ + this.gzindex = 0; /* where in extra, name, or comment */ + this.method = Z_DEFLATED$2; /* can only be DEFLATED */ + this.last_flush = -1; /* value of flush param for previous deflate call */ + + this.w_size = 0; /* LZ77 window size (32K by default) */ + this.w_bits = 0; /* log2(w_size) (8..16) */ + this.w_mask = 0; /* w_size - 1 */ + + this.window = null; + /* Sliding window. Input bytes are read into the second half of the window, + * and move to the first half later to keep a dictionary of at least wSize + * bytes. With this organization, matches are limited to a distance of + * wSize-MAX_MATCH bytes, but this ensures that IO is always + * performed with a length multiple of the block size. + */ + + this.window_size = 0; + /* Actual size of window: 2*wSize, except when the user input buffer + * is directly used as sliding window. + */ + + this.prev = null; + /* Link to older string with same hash index. To limit the size of this + * array to 64K, this link is maintained only for the last 32K strings. + * An index in this array is thus a window index modulo 32K. + */ + + this.head = null; /* Heads of the hash chains or NIL. */ + + this.ins_h = 0; /* hash index of string to be inserted */ + this.hash_size = 0; /* number of elements in hash table */ + this.hash_bits = 0; /* log2(hash_size) */ + this.hash_mask = 0; /* hash_size-1 */ + + this.hash_shift = 0; + /* Number of bits by which ins_h must be shifted at each input + * step. It must be such that after MIN_MATCH steps, the oldest + * byte no longer takes part in the hash key, that is: + * hash_shift * MIN_MATCH >= hash_bits + */ + + this.block_start = 0; + /* Window position at the beginning of the current output block. Gets + * negative when the window is moved backwards. + */ + + this.match_length = 0; /* length of best match */ + this.prev_match = 0; /* previous match */ + this.match_available = 0; /* set if previous match exists */ + this.strstart = 0; /* start of string to insert */ + this.match_start = 0; /* start of matching string */ + this.lookahead = 0; /* number of valid bytes ahead in window */ + + this.prev_length = 0; + /* Length of the best match at previous step. Matches not greater than this + * are discarded. This is used in the lazy match evaluation. + */ + + this.max_chain_length = 0; + /* To speed up deflation, hash chains are never searched beyond this + * length. A higher limit improves compression ratio but degrades the + * speed. + */ + + this.max_lazy_match = 0; + /* Attempt to find a better match only when the current match is strictly + * smaller than this value. This mechanism is used only for compression + * levels >= 4. + */ + // That's alias to max_lazy_match, don't use directly + //this.max_insert_length = 0; + /* Insert new strings in the hash table only if the match length is not + * greater than this length. This saves time but degrades compression. + * max_insert_length is used only for compression levels <= 3. + */ + + this.level = 0; /* compression level (1..9) */ + this.strategy = 0; /* favor or force Huffman coding*/ + + this.good_match = 0; + /* Use a faster search when the previous match is longer than this */ + + this.nice_match = 0; /* Stop searching when current match exceeds this */ + + /* used by trees.c: */ + + /* Didn't use ct_data typedef below to suppress compiler warning */ + + // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ + // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ + // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ + + // Use flat array of DOUBLE size, with interleaved fata, + // because JS does not support effective + this.dyn_ltree = new Uint16Array(HEAP_SIZE * 2); + this.dyn_dtree = new Uint16Array((2 * D_CODES + 1) * 2); + this.bl_tree = new Uint16Array((2 * BL_CODES + 1) * 2); + zero(this.dyn_ltree); + zero(this.dyn_dtree); + zero(this.bl_tree); + + this.l_desc = null; /* desc. for literal tree */ + this.d_desc = null; /* desc. for distance tree */ + this.bl_desc = null; /* desc. for bit length tree */ + + //ush bl_count[MAX_BITS+1]; + this.bl_count = new Uint16Array(MAX_BITS + 1); + /* number of codes at each bit length for an optimal tree */ + + //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ + this.heap = new Uint16Array(2 * L_CODES + 1); /* heap used to build the Huffman trees */ + zero(this.heap); + + this.heap_len = 0; /* number of elements in the heap */ + this.heap_max = 0; /* element of largest frequency */ + /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + * The same heap array is used to build all trees. + */ + + this.depth = new Uint16Array(2 * L_CODES + 1); //uch depth[2*L_CODES+1]; + zero(this.depth); + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + + this.sym_buf = 0; /* buffer for distances and literals/lengths */ + + this.lit_bufsize = 0; + /* Size of match buffer for literals/lengths. There are 4 reasons for + * limiting lit_bufsize to 64K: + * - frequencies can be kept in 16 bit counters + * - if compression is not successful for the first block, all input + * data is still in the window so we can still emit a stored block even + * when input comes from standard input. (This can also be done for + * all blocks if lit_bufsize is not greater than 32K.) + * - if compression is not successful for a file smaller than 64K, we can + * even emit a stored file instead of a stored block (saving 5 bytes). + * This is applicable only for zip (not gzip or zlib). + * - creating new Huffman trees less frequently may not provide fast + * adaptation to changes in the input data statistics. (Take for + * example a binary file with poorly compressible code followed by + * a highly compressible string table.) Smaller buffer sizes give + * fast adaptation but have of course the overhead of transmitting + * trees more frequently. + * - I can't count above 4 + */ + + this.sym_next = 0; /* running index in sym_buf */ + this.sym_end = 0; /* symbol table full when sym_next reaches this */ + + this.opt_len = 0; /* bit length of current block with optimal trees */ + this.static_len = 0; /* bit length of current block with static trees */ + this.matches = 0; /* number of string matches in current block */ + this.insert = 0; /* bytes at end of window left to insert */ + + + this.bi_buf = 0; + /* Output buffer. bits are inserted starting at the bottom (least + * significant bits). + */ + this.bi_valid = 0; + /* Number of valid bits in bi_buf. All bits above the last valid bit + * are always zero. + */ + + // Used for window memory init. We safely ignore it for JS. That makes + // sense only for pointers and memory check tools. + //this.high_water = 0; + /* High water mark offset in window for initialized bytes -- bytes above + * this are set to zero in order to avoid memory check warnings when + * longest match routines access bytes past the input. This is then + * updated to the new high water mark. + */ +} + + +/* ========================================================================= + * Check for a valid deflate stream state. Return 0 if ok, 1 if not. + */ +const deflateStateCheck = (strm) => { + + if (!strm) { + return 1; + } + const s = strm.state; + if (!s || s.strm !== strm || (s.status !== INIT_STATE && +//#ifdef GZIP + s.status !== GZIP_STATE && +//#endif + s.status !== EXTRA_STATE && + s.status !== NAME_STATE && + s.status !== COMMENT_STATE && + s.status !== HCRC_STATE && + s.status !== BUSY_STATE && + s.status !== FINISH_STATE)) { + return 1; + } + return 0; +}; + + +const deflateResetKeep = (strm) => { + + if (deflateStateCheck(strm)) { + return err(strm, Z_STREAM_ERROR$2); + } + + strm.total_in = strm.total_out = 0; + strm.data_type = Z_UNKNOWN; + + const s = strm.state; + s.pending = 0; + s.pending_out = 0; + + if (s.wrap < 0) { + s.wrap = -s.wrap; + /* was made negative by deflate(..., Z_FINISH); */ + } + s.status = +//#ifdef GZIP + s.wrap === 2 ? GZIP_STATE : +//#endif + s.wrap ? INIT_STATE : BUSY_STATE; + strm.adler = (s.wrap === 2) ? + 0 // crc32(0, Z_NULL, 0) + : + 1; // adler32(0, Z_NULL, 0) + s.last_flush = -2; + _tr_init(s); + return Z_OK$3; +}; + + +const deflateReset = (strm) => { + + const ret = deflateResetKeep(strm); + if (ret === Z_OK$3) { + lm_init(strm.state); + } + return ret; +}; + + +const deflateSetHeader = (strm, head) => { + + if (deflateStateCheck(strm) || strm.state.wrap !== 2) { + return Z_STREAM_ERROR$2; + } + strm.state.gzhead = head; + return Z_OK$3; +}; + + +const deflateInit2 = (strm, level, method, windowBits, memLevel, strategy) => { + + if (!strm) { // === Z_NULL + return Z_STREAM_ERROR$2; + } + let wrap = 1; + + if (level === Z_DEFAULT_COMPRESSION$1) { + level = 6; + } + + if (windowBits < 0) { /* suppress zlib wrapper */ + wrap = 0; + windowBits = -windowBits; + } + + else if (windowBits > 15) { + wrap = 2; /* write gzip wrapper instead */ + windowBits -= 16; + } + + + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED$2 || + windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || + strategy < 0 || strategy > Z_FIXED || (windowBits === 8 && wrap !== 1)) { + return err(strm, Z_STREAM_ERROR$2); + } + + + if (windowBits === 8) { + windowBits = 9; + } + /* until 256-byte window bug fixed */ + + const s = new DeflateState(); + + strm.state = s; + s.strm = strm; + s.status = INIT_STATE; /* to pass state test in deflateReset() */ + + s.wrap = wrap; + s.gzhead = null; + s.w_bits = windowBits; + s.w_size = 1 << s.w_bits; + s.w_mask = s.w_size - 1; + + s.hash_bits = memLevel + 7; + s.hash_size = 1 << s.hash_bits; + s.hash_mask = s.hash_size - 1; + s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH); + + s.window = new Uint8Array(s.w_size * 2); + s.head = new Uint16Array(s.hash_size); + s.prev = new Uint16Array(s.w_size); + + // Don't need mem init magic for JS. + //s.high_water = 0; /* nothing written to s->window yet */ + + s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + + /* We overlay pending_buf and sym_buf. This works since the average size + * for length/distance pairs over any compressed block is assured to be 31 + * bits or less. + * + * Analysis: The longest fixed codes are a length code of 8 bits plus 5 + * extra bits, for lengths 131 to 257. The longest fixed distance codes are + * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest + * possible fixed-codes length/distance pair is then 31 bits total. + * + * sym_buf starts one-fourth of the way into pending_buf. So there are + * three bytes in sym_buf for every four bytes in pending_buf. Each symbol + * in sym_buf is three bytes -- two for the distance and one for the + * literal/length. As each symbol is consumed, the pointer to the next + * sym_buf value to read moves forward three bytes. From that symbol, up to + * 31 bits are written to pending_buf. The closest the written pending_buf + * bits gets to the next sym_buf symbol to read is just before the last + * code is written. At that time, 31*(n-2) bits have been written, just + * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at + * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1 + * symbols are written.) The closest the writing gets to what is unread is + * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and + * can range from 128 to 32768. + * + * Therefore, at a minimum, there are 142 bits of space between what is + * written and what is read in the overlain buffers, so the symbols cannot + * be overwritten by the compressed data. That space is actually 139 bits, + * due to the three-bit fixed-code block header. + * + * That covers the case where either Z_FIXED is specified, forcing fixed + * codes, or when the use of fixed codes is chosen, because that choice + * results in a smaller compressed block than dynamic codes. That latter + * condition then assures that the above analysis also covers all dynamic + * blocks. A dynamic-code block will only be chosen to be emitted if it has + * fewer bits than a fixed-code block would for the same set of symbols. + * Therefore its average symbol length is assured to be less than 31. So + * the compressed data for a dynamic block also cannot overwrite the + * symbols from which it is being constructed. + */ + + s.pending_buf_size = s.lit_bufsize * 4; + s.pending_buf = new Uint8Array(s.pending_buf_size); + + // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`) + //s->sym_buf = s->pending_buf + s->lit_bufsize; + s.sym_buf = s.lit_bufsize; + + //s->sym_end = (s->lit_bufsize - 1) * 3; + s.sym_end = (s.lit_bufsize - 1) * 3; + /* We avoid equality with lit_bufsize*3 because of wraparound at 64K + * on 16 bit machines and because stored blocks are restricted to + * 64K-1 bytes. + */ + + s.level = level; + s.strategy = strategy; + s.method = method; + + return deflateReset(strm); +}; + +const deflateInit = (strm, level) => { + + return deflateInit2(strm, level, Z_DEFLATED$2, MAX_WBITS$1, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY$1); +}; + + +/* ========================================================================= */ +const deflate$2 = (strm, flush) => { + + if (deflateStateCheck(strm) || flush > Z_BLOCK$1 || flush < 0) { + return strm ? err(strm, Z_STREAM_ERROR$2) : Z_STREAM_ERROR$2; + } + + const s = strm.state; + + if (!strm.output || + (strm.avail_in !== 0 && !strm.input) || + (s.status === FINISH_STATE && flush !== Z_FINISH$3)) { + return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR$1 : Z_STREAM_ERROR$2); + } + + const old_flush = s.last_flush; + s.last_flush = flush; + + /* Flush as much pending output as possible */ + if (s.pending !== 0) { + flush_pending(strm); + if (strm.avail_out === 0) { + /* Since avail_out is 0, deflate will be called again with + * more output space, but possibly with both pending and + * avail_in equal to zero. There won't be anything to do, + * but this is not an error situation so make sure we + * return OK instead of BUF_ERROR at next call of deflate: + */ + s.last_flush = -1; + return Z_OK$3; + } + + /* Make sure there is something to do and avoid duplicate consecutive + * flushes. For repeated and useless calls with Z_FINISH, we keep + * returning Z_STREAM_END instead of Z_BUF_ERROR. + */ + } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) && + flush !== Z_FINISH$3) { + return err(strm, Z_BUF_ERROR$1); + } + + /* User must not provide more input after the first FINISH: */ + if (s.status === FINISH_STATE && strm.avail_in !== 0) { + return err(strm, Z_BUF_ERROR$1); + } + + /* Write the header */ + if (s.status === INIT_STATE && s.wrap === 0) { + s.status = BUSY_STATE; + } + if (s.status === INIT_STATE) { + /* zlib header */ + let header = (Z_DEFLATED$2 + ((s.w_bits - 8) << 4)) << 8; + let level_flags = -1; + + if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) { + level_flags = 0; + } else if (s.level < 6) { + level_flags = 1; + } else if (s.level === 6) { + level_flags = 2; + } else { + level_flags = 3; + } + header |= (level_flags << 6); + if (s.strstart !== 0) { header |= PRESET_DICT; } + header += 31 - (header % 31); + + putShortMSB(s, header); + + /* Save the adler32 of the preset dictionary: */ + if (s.strstart !== 0) { + putShortMSB(s, strm.adler >>> 16); + putShortMSB(s, strm.adler & 0xffff); + } + strm.adler = 1; // adler32(0L, Z_NULL, 0); + s.status = BUSY_STATE; + + /* Compression must start with an empty pending buffer */ + flush_pending(strm); + if (s.pending !== 0) { + s.last_flush = -1; + return Z_OK$3; + } + } +//#ifdef GZIP + if (s.status === GZIP_STATE) { + /* gzip header */ + strm.adler = 0; //crc32(0L, Z_NULL, 0); + put_byte(s, 31); + put_byte(s, 139); + put_byte(s, 8); + if (!s.gzhead) { // s->gzhead == Z_NULL + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, 0); + put_byte(s, s.level === 9 ? 2 : + (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? + 4 : 0)); + put_byte(s, OS_CODE); + s.status = BUSY_STATE; + + /* Compression must start with an empty pending buffer */ + flush_pending(strm); + if (s.pending !== 0) { + s.last_flush = -1; + return Z_OK$3; + } + } + else { + put_byte(s, (s.gzhead.text ? 1 : 0) + + (s.gzhead.hcrc ? 2 : 0) + + (!s.gzhead.extra ? 0 : 4) + + (!s.gzhead.name ? 0 : 8) + + (!s.gzhead.comment ? 0 : 16) + ); + put_byte(s, s.gzhead.time & 0xff); + put_byte(s, (s.gzhead.time >> 8) & 0xff); + put_byte(s, (s.gzhead.time >> 16) & 0xff); + put_byte(s, (s.gzhead.time >> 24) & 0xff); + put_byte(s, s.level === 9 ? 2 : + (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ? + 4 : 0)); + put_byte(s, s.gzhead.os & 0xff); + if (s.gzhead.extra && s.gzhead.extra.length) { + put_byte(s, s.gzhead.extra.length & 0xff); + put_byte(s, (s.gzhead.extra.length >> 8) & 0xff); + } + if (s.gzhead.hcrc) { + strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending, 0); + } + s.gzindex = 0; + s.status = EXTRA_STATE; + } + } + if (s.status === EXTRA_STATE) { + if (s.gzhead.extra/* != Z_NULL*/) { + let beg = s.pending; /* start of bytes to update crc */ + let left = (s.gzhead.extra.length & 0xffff) - s.gzindex; + while (s.pending + left > s.pending_buf_size) { + let copy = s.pending_buf_size - s.pending; + // zmemcpy(s.pending_buf + s.pending, + // s.gzhead.extra + s.gzindex, copy); + s.pending_buf.set(s.gzhead.extra.subarray(s.gzindex, s.gzindex + copy), s.pending); + s.pending = s.pending_buf_size; + //--- HCRC_UPDATE(beg) ---// + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg); + } + //---// + s.gzindex += copy; + flush_pending(strm); + if (s.pending !== 0) { + s.last_flush = -1; + return Z_OK$3; + } + beg = 0; + left -= copy; + } + // JS specific: s.gzhead.extra may be TypedArray or Array for backward compatibility + // TypedArray.slice and TypedArray.from don't exist in IE10-IE11 + let gzhead_extra = new Uint8Array(s.gzhead.extra); + // zmemcpy(s->pending_buf + s->pending, + // s->gzhead->extra + s->gzindex, left); + s.pending_buf.set(gzhead_extra.subarray(s.gzindex, s.gzindex + left), s.pending); + s.pending += left; + //--- HCRC_UPDATE(beg) ---// + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg); + } + //---// + s.gzindex = 0; + } + s.status = NAME_STATE; + } + if (s.status === NAME_STATE) { + if (s.gzhead.name/* != Z_NULL*/) { + let beg = s.pending; /* start of bytes to update crc */ + let val; + do { + if (s.pending === s.pending_buf_size) { + //--- HCRC_UPDATE(beg) ---// + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg); + } + //---// + flush_pending(strm); + if (s.pending !== 0) { + s.last_flush = -1; + return Z_OK$3; + } + beg = 0; + } + // JS specific: little magic to add zero terminator to end of string + if (s.gzindex < s.gzhead.name.length) { + val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff; + } else { + val = 0; + } + put_byte(s, val); + } while (val !== 0); + //--- HCRC_UPDATE(beg) ---// + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg); + } + //---// + s.gzindex = 0; + } + s.status = COMMENT_STATE; + } + if (s.status === COMMENT_STATE) { + if (s.gzhead.comment/* != Z_NULL*/) { + let beg = s.pending; /* start of bytes to update crc */ + let val; + do { + if (s.pending === s.pending_buf_size) { + //--- HCRC_UPDATE(beg) ---// + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg); + } + //---// + flush_pending(strm); + if (s.pending !== 0) { + s.last_flush = -1; + return Z_OK$3; + } + beg = 0; + } + // JS specific: little magic to add zero terminator to end of string + if (s.gzindex < s.gzhead.comment.length) { + val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff; + } else { + val = 0; + } + put_byte(s, val); + } while (val !== 0); + //--- HCRC_UPDATE(beg) ---// + if (s.gzhead.hcrc && s.pending > beg) { + strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg); + } + //---// + } + s.status = HCRC_STATE; + } + if (s.status === HCRC_STATE) { + if (s.gzhead.hcrc) { + if (s.pending + 2 > s.pending_buf_size) { + flush_pending(strm); + if (s.pending !== 0) { + s.last_flush = -1; + return Z_OK$3; + } + } + put_byte(s, strm.adler & 0xff); + put_byte(s, (strm.adler >> 8) & 0xff); + strm.adler = 0; //crc32(0L, Z_NULL, 0); + } + s.status = BUSY_STATE; + + /* Compression must start with an empty pending buffer */ + flush_pending(strm); + if (s.pending !== 0) { + s.last_flush = -1; + return Z_OK$3; + } + } +//#endif + + /* Start a new block or continue the current one. + */ + if (strm.avail_in !== 0 || s.lookahead !== 0 || + (flush !== Z_NO_FLUSH$2 && s.status !== FINISH_STATE)) { + let bstate = s.level === 0 ? deflate_stored(s, flush) : + s.strategy === Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : + s.strategy === Z_RLE ? deflate_rle(s, flush) : + configuration_table[s.level].func(s, flush); + + if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) { + s.status = FINISH_STATE; + } + if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) { + if (strm.avail_out === 0) { + s.last_flush = -1; + /* avoid BUF_ERROR next call, see above */ + } + return Z_OK$3; + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call + * of deflate should use the same flush parameter to make sure + * that the flush is complete. So we don't have to output an + * empty block here, this will be done at next call. This also + * ensures that for a very small output buffer, we emit at most + * one empty block. + */ + } + if (bstate === BS_BLOCK_DONE) { + if (flush === Z_PARTIAL_FLUSH) { + _tr_align(s); + } + else if (flush !== Z_BLOCK$1) { /* FULL_FLUSH or SYNC_FLUSH */ + + _tr_stored_block(s, 0, 0, false); + /* For a full flush, this empty block will be recognized + * as a special marker by inflate_sync(). + */ + if (flush === Z_FULL_FLUSH$1) { + /*** CLEAR_HASH(s); ***/ /* forget history */ + zero(s.head); // Fill with NIL (= 0); + + if (s.lookahead === 0) { + s.strstart = 0; + s.block_start = 0; + s.insert = 0; + } + } + } + flush_pending(strm); + if (strm.avail_out === 0) { + s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */ + return Z_OK$3; + } + } + } + + if (flush !== Z_FINISH$3) { return Z_OK$3; } + if (s.wrap <= 0) { return Z_STREAM_END$3; } + + /* Write the trailer */ + if (s.wrap === 2) { + put_byte(s, strm.adler & 0xff); + put_byte(s, (strm.adler >> 8) & 0xff); + put_byte(s, (strm.adler >> 16) & 0xff); + put_byte(s, (strm.adler >> 24) & 0xff); + put_byte(s, strm.total_in & 0xff); + put_byte(s, (strm.total_in >> 8) & 0xff); + put_byte(s, (strm.total_in >> 16) & 0xff); + put_byte(s, (strm.total_in >> 24) & 0xff); + } + else + { + putShortMSB(s, strm.adler >>> 16); + putShortMSB(s, strm.adler & 0xffff); + } + + flush_pending(strm); + /* If avail_out is zero, the application will call deflate again + * to flush the rest. + */ + if (s.wrap > 0) { s.wrap = -s.wrap; } + /* write the trailer only once! */ + return s.pending !== 0 ? Z_OK$3 : Z_STREAM_END$3; +}; + + +const deflateEnd = (strm) => { + + if (deflateStateCheck(strm)) { + return Z_STREAM_ERROR$2; + } + + const status = strm.state.status; + + strm.state = null; + + return status === BUSY_STATE ? err(strm, Z_DATA_ERROR$2) : Z_OK$3; +}; + + +/* ========================================================================= + * Initializes the compression dictionary from the given byte + * sequence without producing any compressed output. + */ +const deflateSetDictionary = (strm, dictionary) => { + + let dictLength = dictionary.length; + + if (deflateStateCheck(strm)) { + return Z_STREAM_ERROR$2; + } + + const s = strm.state; + const wrap = s.wrap; + + if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) { + return Z_STREAM_ERROR$2; + } + + /* when using zlib wrappers, compute Adler-32 for provided dictionary */ + if (wrap === 1) { + /* adler32(strm->adler, dictionary, dictLength); */ + strm.adler = adler32_1(strm.adler, dictionary, dictLength, 0); + } + + s.wrap = 0; /* avoid computing Adler-32 in read_buf */ + + /* if dictionary would fill window, just replace the history */ + if (dictLength >= s.w_size) { + if (wrap === 0) { /* already empty otherwise */ + /*** CLEAR_HASH(s); ***/ + zero(s.head); // Fill with NIL (= 0); + s.strstart = 0; + s.block_start = 0; + s.insert = 0; + } + /* use the tail */ + // dictionary = dictionary.slice(dictLength - s.w_size); + let tmpDict = new Uint8Array(s.w_size); + tmpDict.set(dictionary.subarray(dictLength - s.w_size, dictLength), 0); + dictionary = tmpDict; + dictLength = s.w_size; + } + /* insert dictionary into window and hash */ + const avail = strm.avail_in; + const next = strm.next_in; + const input = strm.input; + strm.avail_in = dictLength; + strm.next_in = 0; + strm.input = dictionary; + fill_window(s); + while (s.lookahead >= MIN_MATCH) { + let str = s.strstart; + let n = s.lookahead - (MIN_MATCH - 1); + do { + /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */ + s.ins_h = HASH(s, s.ins_h, s.window[str + MIN_MATCH - 1]); + + s.prev[str & s.w_mask] = s.head[s.ins_h]; + + s.head[s.ins_h] = str; + str++; + } while (--n); + s.strstart = str; + s.lookahead = MIN_MATCH - 1; + fill_window(s); + } + s.strstart += s.lookahead; + s.block_start = s.strstart; + s.insert = s.lookahead; + s.lookahead = 0; + s.match_length = s.prev_length = MIN_MATCH - 1; + s.match_available = 0; + strm.next_in = next; + strm.input = input; + strm.avail_in = avail; + s.wrap = wrap; + return Z_OK$3; +}; + + +var deflateInit_1 = deflateInit; +var deflateInit2_1 = deflateInit2; +var deflateReset_1 = deflateReset; +var deflateResetKeep_1 = deflateResetKeep; +var deflateSetHeader_1 = deflateSetHeader; +var deflate_2$1 = deflate$2; +var deflateEnd_1 = deflateEnd; +var deflateSetDictionary_1 = deflateSetDictionary; +var deflateInfo = 'pako deflate (from Nodeca project)'; + +/* Not implemented +module.exports.deflateBound = deflateBound; +module.exports.deflateCopy = deflateCopy; +module.exports.deflateGetDictionary = deflateGetDictionary; +module.exports.deflateParams = deflateParams; +module.exports.deflatePending = deflatePending; +module.exports.deflatePrime = deflatePrime; +module.exports.deflateTune = deflateTune; +*/ + +var deflate_1$2 = { + deflateInit: deflateInit_1, + deflateInit2: deflateInit2_1, + deflateReset: deflateReset_1, + deflateResetKeep: deflateResetKeep_1, + deflateSetHeader: deflateSetHeader_1, + deflate: deflate_2$1, + deflateEnd: deflateEnd_1, + deflateSetDictionary: deflateSetDictionary_1, + deflateInfo: deflateInfo +}; + +const _has = (obj, key) => { + return Object.prototype.hasOwnProperty.call(obj, key); +}; + +var assign = function (obj /*from1, from2, from3, ...*/) { + const sources = Array.prototype.slice.call(arguments, 1); + while (sources.length) { + const source = sources.shift(); + if (!source) { continue; } + + if (typeof source !== 'object') { + throw new TypeError(source + 'must be non-object'); + } + + for (const p in source) { + if (_has(source, p)) { + obj[p] = source[p]; + } + } + } + + return obj; +}; + + +// Join array of chunks to single array. +var flattenChunks = (chunks) => { + // calculate data length + let len = 0; + + for (let i = 0, l = chunks.length; i < l; i++) { + len += chunks[i].length; + } + + // join chunks + const result = new Uint8Array(len); + + for (let i = 0, pos = 0, l = chunks.length; i < l; i++) { + let chunk = chunks[i]; + result.set(chunk, pos); + pos += chunk.length; + } + + return result; +}; + +var common = { + assign: assign, + flattenChunks: flattenChunks +}; + +// String encode/decode helpers + + +// Quick check if we can use fast array to bin string conversion +// +// - apply(Array) can fail on Android 2.2 +// - apply(Uint8Array) can fail on iOS 5.1 Safari +// +let STR_APPLY_UIA_OK = true; + +try { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; } + + +// Table with utf8 lengths (calculated by first byte of sequence) +// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS, +// because max possible codepoint is 0x10ffff +const _utf8len = new Uint8Array(256); +for (let q = 0; q < 256; q++) { + _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1); +} +_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start + + +// convert string to array (typed, when possible) +var string2buf = (str) => { + if (typeof TextEncoder === 'function' && TextEncoder.prototype.encode) { + return new TextEncoder().encode(str); + } + + let buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0; + + // count binary size + for (m_pos = 0; m_pos < str_len; m_pos++) { + c = str.charCodeAt(m_pos); + if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { + c2 = str.charCodeAt(m_pos + 1); + if ((c2 & 0xfc00) === 0xdc00) { + c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); + m_pos++; + } + } + buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4; + } + + // allocate buffer + buf = new Uint8Array(buf_len); + + // convert + for (i = 0, m_pos = 0; i < buf_len; m_pos++) { + c = str.charCodeAt(m_pos); + if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) { + c2 = str.charCodeAt(m_pos + 1); + if ((c2 & 0xfc00) === 0xdc00) { + c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00); + m_pos++; + } + } + if (c < 0x80) { + /* one byte */ + buf[i++] = c; + } else if (c < 0x800) { + /* two bytes */ + buf[i++] = 0xC0 | (c >>> 6); + buf[i++] = 0x80 | (c & 0x3f); + } else if (c < 0x10000) { + /* three bytes */ + buf[i++] = 0xE0 | (c >>> 12); + buf[i++] = 0x80 | (c >>> 6 & 0x3f); + buf[i++] = 0x80 | (c & 0x3f); + } else { + /* four bytes */ + buf[i++] = 0xf0 | (c >>> 18); + buf[i++] = 0x80 | (c >>> 12 & 0x3f); + buf[i++] = 0x80 | (c >>> 6 & 0x3f); + buf[i++] = 0x80 | (c & 0x3f); + } + } + + return buf; +}; + +// Helper +const buf2binstring = (buf, len) => { + // On Chrome, the arguments in a function call that are allowed is `65534`. + // If the length of the buffer is smaller than that, we can use this optimization, + // otherwise we will take a slower path. + if (len < 65534) { + if (buf.subarray && STR_APPLY_UIA_OK) { + return String.fromCharCode.apply(null, buf.length === len ? buf : buf.subarray(0, len)); + } + } + + let result = ''; + for (let i = 0; i < len; i++) { + result += String.fromCharCode(buf[i]); + } + return result; +}; + + +// convert array to string +var buf2string = (buf, max) => { + const len = max || buf.length; + + if (typeof TextDecoder === 'function' && TextDecoder.prototype.decode) { + return new TextDecoder().decode(buf.subarray(0, max)); + } + + let i, out; + + // Reserve max possible length (2 words per char) + // NB: by unknown reasons, Array is significantly faster for + // String.fromCharCode.apply than Uint16Array. + const utf16buf = new Array(len * 2); + + for (out = 0, i = 0; i < len;) { + let c = buf[i++]; + // quick process ascii + if (c < 0x80) { utf16buf[out++] = c; continue; } + + let c_len = _utf8len[c]; + // skip 5 & 6 byte codes + if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; } + + // apply mask on first byte + c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07; + // join the rest + while (c_len > 1 && i < len) { + c = (c << 6) | (buf[i++] & 0x3f); + c_len--; + } + + // terminated by end of string? + if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; } + + if (c < 0x10000) { + utf16buf[out++] = c; + } else { + c -= 0x10000; + utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff); + utf16buf[out++] = 0xdc00 | (c & 0x3ff); + } + } + + return buf2binstring(utf16buf, out); +}; + + +// Calculate max possible position in utf8 buffer, +// that will not break sequence. If that's not possible +// - (very small limits) return max size as is. +// +// buf[] - utf8 bytes array +// max - length limit (mandatory); +var utf8border = (buf, max) => { + + max = max || buf.length; + if (max > buf.length) { max = buf.length; } + + // go back from last position, until start of sequence found + let pos = max - 1; + while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; } + + // Very small and broken sequence, + // return max, because we should return something anyway. + if (pos < 0) { return max; } + + // If we came to start of buffer - that means buffer is too small, + // return max too. + if (pos === 0) { return max; } + + return (pos + _utf8len[buf[pos]] > max) ? pos : max; +}; + +var strings = { + string2buf: string2buf, + buf2string: buf2string, + utf8border: utf8border +}; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +function ZStream() { + /* next input byte */ + this.input = null; // JS specific, because we have no pointers + this.next_in = 0; + /* number of bytes available at input */ + this.avail_in = 0; + /* total number of input bytes read so far */ + this.total_in = 0; + /* next output byte should be put there */ + this.output = null; // JS specific, because we have no pointers + this.next_out = 0; + /* remaining free space at output */ + this.avail_out = 0; + /* total number of bytes output so far */ + this.total_out = 0; + /* last error message, NULL if no error */ + this.msg = ''/*Z_NULL*/; + /* not visible by applications */ + this.state = null; + /* best guess about the data type: binary or text */ + this.data_type = 2/*Z_UNKNOWN*/; + /* adler32 value of the uncompressed data */ + this.adler = 0; +} + +var zstream = ZStream; + +const toString$1 = Object.prototype.toString; + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + +const { + Z_NO_FLUSH: Z_NO_FLUSH$1, Z_SYNC_FLUSH, Z_FULL_FLUSH, Z_FINISH: Z_FINISH$2, + Z_OK: Z_OK$2, Z_STREAM_END: Z_STREAM_END$2, + Z_DEFAULT_COMPRESSION, + Z_DEFAULT_STRATEGY, + Z_DEFLATED: Z_DEFLATED$1 +} = constants$2; + +/* ===========================================================================*/ + + +/** + * class Deflate + * + * Generic JS-style wrapper for zlib calls. If you don't need + * streaming behaviour - use more simple functions: [[deflate]], + * [[deflateRaw]] and [[gzip]]. + **/ + +/* internal + * Deflate.chunks -> Array + * + * Chunks of output data, if [[Deflate#onData]] not overridden. + **/ + +/** + * Deflate.result -> Uint8Array + * + * Compressed result, generated by default [[Deflate#onData]] + * and [[Deflate#onEnd]] handlers. Filled after you push last chunk + * (call [[Deflate#push]] with `Z_FINISH` / `true` param). + **/ + +/** + * Deflate.err -> Number + * + * Error code after deflate finished. 0 (Z_OK) on success. + * You will not need it in real life, because deflate errors + * are possible only on wrong options or bad `onData` / `onEnd` + * custom handlers. + **/ + +/** + * Deflate.msg -> String + * + * Error message, if [[Deflate.err]] != 0 + **/ + + +/** + * new Deflate(options) + * - options (Object): zlib deflate options. + * + * Creates new deflator instance with specified params. Throws exception + * on bad params. Supported options: + * + * - `level` + * - `windowBits` + * - `memLevel` + * - `strategy` + * - `dictionary` + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Additional options, for internal needs: + * + * - `chunkSize` - size of generated data chunks (16K by default) + * - `raw` (Boolean) - do raw deflate + * - `gzip` (Boolean) - create gzip wrapper + * - `header` (Object) - custom header for gzip + * - `text` (Boolean) - true if compressed data believed to be text + * - `time` (Number) - modification time, unix timestamp + * - `os` (Number) - operation system code + * - `extra` (Array) - array of bytes with extra data (max 65536) + * - `name` (String) - file name (binary string) + * - `comment` (String) - comment (binary string) + * - `hcrc` (Boolean) - true if header crc should be added + * + * ##### Example: + * + * ```javascript + * const pako = require('pako') + * , chunk1 = new Uint8Array([1,2,3,4,5,6,7,8,9]) + * , chunk2 = new Uint8Array([10,11,12,13,14,15,16,17,18,19]); + * + * const deflate = new pako.Deflate({ level: 3}); + * + * deflate.push(chunk1, false); + * deflate.push(chunk2, true); // true -> last chunk + * + * if (deflate.err) { throw new Error(deflate.err); } + * + * console.log(deflate.result); + * ``` + **/ +function Deflate$1(options) { + this.options = common.assign({ + level: Z_DEFAULT_COMPRESSION, + method: Z_DEFLATED$1, + chunkSize: 16384, + windowBits: 15, + memLevel: 8, + strategy: Z_DEFAULT_STRATEGY + }, options || {}); + + let opt = this.options; + + if (opt.raw && (opt.windowBits > 0)) { + opt.windowBits = -opt.windowBits; + } + + else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) { + opt.windowBits += 16; + } + + this.err = 0; // error code, if happens (0 = Z_OK) + this.msg = ''; // error message + this.ended = false; // used to avoid multiple onEnd() calls + this.chunks = []; // chunks of compressed data + + this.strm = new zstream(); + this.strm.avail_out = 0; + + let status = deflate_1$2.deflateInit2( + this.strm, + opt.level, + opt.method, + opt.windowBits, + opt.memLevel, + opt.strategy + ); + + if (status !== Z_OK$2) { + throw new Error(messages[status]); + } + + if (opt.header) { + deflate_1$2.deflateSetHeader(this.strm, opt.header); + } + + if (opt.dictionary) { + let dict; + // Convert data if needed + if (typeof opt.dictionary === 'string') { + // If we need to compress text, change encoding to utf8. + dict = strings.string2buf(opt.dictionary); + } else if (toString$1.call(opt.dictionary) === '[object ArrayBuffer]') { + dict = new Uint8Array(opt.dictionary); + } else { + dict = opt.dictionary; + } + + status = deflate_1$2.deflateSetDictionary(this.strm, dict); + + if (status !== Z_OK$2) { + throw new Error(messages[status]); + } + + this._dict_set = true; + } +} + +/** + * Deflate#push(data[, flush_mode]) -> Boolean + * - data (Uint8Array|ArrayBuffer|String): input data. Strings will be + * converted to utf8 byte sequence. + * - flush_mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes. + * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH. + * + * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with + * new compressed chunks. Returns `true` on success. The last data block must + * have `flush_mode` Z_FINISH (or `true`). That will flush internal pending + * buffers and call [[Deflate#onEnd]]. + * + * On fail call [[Deflate#onEnd]] with error code and return false. + * + * ##### Example + * + * ```javascript + * push(chunk, false); // push one of data chunks + * ... + * push(chunk, true); // push last chunk + * ``` + **/ +Deflate$1.prototype.push = function (data, flush_mode) { + const strm = this.strm; + const chunkSize = this.options.chunkSize; + let status, _flush_mode; + + if (this.ended) { return false; } + + if (flush_mode === ~~flush_mode) _flush_mode = flush_mode; + else _flush_mode = flush_mode === true ? Z_FINISH$2 : Z_NO_FLUSH$1; + + // Convert data if needed + if (typeof data === 'string') { + // If we need to compress text, change encoding to utf8. + strm.input = strings.string2buf(data); + } else if (toString$1.call(data) === '[object ArrayBuffer]') { + strm.input = new Uint8Array(data); + } else { + strm.input = data; + } + + strm.next_in = 0; + strm.avail_in = strm.input.length; + + for (;;) { + if (strm.avail_out === 0) { + strm.output = new Uint8Array(chunkSize); + strm.next_out = 0; + strm.avail_out = chunkSize; + } + + // Make sure avail_out > 6 to avoid repeating markers + if ((_flush_mode === Z_SYNC_FLUSH || _flush_mode === Z_FULL_FLUSH) && strm.avail_out <= 6) { + this.onData(strm.output.subarray(0, strm.next_out)); + strm.avail_out = 0; + continue; + } + + status = deflate_1$2.deflate(strm, _flush_mode); + + // Ended => flush and finish + if (status === Z_STREAM_END$2) { + if (strm.next_out > 0) { + this.onData(strm.output.subarray(0, strm.next_out)); + } + status = deflate_1$2.deflateEnd(this.strm); + this.onEnd(status); + this.ended = true; + return status === Z_OK$2; + } + + // Flush if out buffer full + if (strm.avail_out === 0) { + this.onData(strm.output); + continue; + } + + // Flush if requested and has data + if (_flush_mode > 0 && strm.next_out > 0) { + this.onData(strm.output.subarray(0, strm.next_out)); + strm.avail_out = 0; + continue; + } + + if (strm.avail_in === 0) break; + } + + return true; +}; + + +/** + * Deflate#onData(chunk) -> Void + * - chunk (Uint8Array): output data. + * + * By default, stores data blocks in `chunks[]` property and glue + * those in `onEnd`. Override this handler, if you need another behaviour. + **/ +Deflate$1.prototype.onData = function (chunk) { + this.chunks.push(chunk); +}; + + +/** + * Deflate#onEnd(status) -> Void + * - status (Number): deflate status. 0 (Z_OK) on success, + * other if not. + * + * Called once after you tell deflate that the input stream is + * complete (Z_FINISH). By default - join collected chunks, + * free memory and fill `results` / `err` properties. + **/ +Deflate$1.prototype.onEnd = function (status) { + // On success - join + if (status === Z_OK$2) { + this.result = common.flattenChunks(this.chunks); + } + this.chunks = []; + this.err = status; + this.msg = this.strm.msg; +}; + + +/** + * deflate(data[, options]) -> Uint8Array + * - data (Uint8Array|ArrayBuffer|String): input data to compress. + * - options (Object): zlib deflate options. + * + * Compress `data` with deflate algorithm and `options`. + * + * Supported options are: + * + * - level + * - windowBits + * - memLevel + * - strategy + * - dictionary + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Sugar (options): + * + * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify + * negative windowBits implicitly. + * + * ##### Example: + * + * ```javascript + * const pako = require('pako') + * const data = new Uint8Array([1,2,3,4,5,6,7,8,9]); + * + * console.log(pako.deflate(data)); + * ``` + **/ +function deflate$1(input, options) { + const deflator = new Deflate$1(options); + + deflator.push(input, true); + + // That will never happens, if you don't cheat with options :) + if (deflator.err) { throw deflator.msg || messages[deflator.err]; } + + return deflator.result; +} + + +/** + * deflateRaw(data[, options]) -> Uint8Array + * - data (Uint8Array|ArrayBuffer|String): input data to compress. + * - options (Object): zlib deflate options. + * + * The same as [[deflate]], but creates raw data, without wrapper + * (header and adler32 crc). + **/ +function deflateRaw$1(input, options) { + options = options || {}; + options.raw = true; + return deflate$1(input, options); +} + + +/** + * gzip(data[, options]) -> Uint8Array + * - data (Uint8Array|ArrayBuffer|String): input data to compress. + * - options (Object): zlib deflate options. + * + * The same as [[deflate]], but create gzip wrapper instead of + * deflate one. + **/ +function gzip$1(input, options) { + options = options || {}; + options.gzip = true; + return deflate$1(input, options); +} + + +var Deflate_1$1 = Deflate$1; +var deflate_2 = deflate$1; +var deflateRaw_1$1 = deflateRaw$1; +var gzip_1$1 = gzip$1; +var constants$1 = constants$2; + +var deflate_1$1 = { + Deflate: Deflate_1$1, + deflate: deflate_2, + deflateRaw: deflateRaw_1$1, + gzip: gzip_1$1, + constants: constants$1 +}; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +// See state defs from inflate.js +const BAD$1 = 16209; /* got a data error -- remain here until reset */ +const TYPE$1 = 16191; /* i: waiting for type bits, including last-flag bit */ + +/* + Decode literal, length, and distance codes and write out the resulting + literal and match bytes until either not enough input or output is + available, an end-of-block is encountered, or a data error is encountered. + When large enough input and output buffers are supplied to inflate(), for + example, a 16K input buffer and a 64K output buffer, more than 95% of the + inflate execution time is spent in this routine. + + Entry assumptions: + + state.mode === LEN + strm.avail_in >= 6 + strm.avail_out >= 258 + start >= strm.avail_out + state.bits < 8 + + On return, state.mode is one of: + + LEN -- ran out of enough output space or enough available input + TYPE -- reached end of block code, inflate() to interpret next block + BAD -- error in block data + + Notes: + + - The maximum input bits used by a length/distance pair is 15 bits for the + length code, 5 bits for the length extra, 15 bits for the distance code, + and 13 bits for the distance extra. This totals 48 bits, or six bytes. + Therefore if strm.avail_in >= 6, then there is enough input to avoid + checking for available input while decoding. + + - The maximum bytes that a single length/distance pair can output is 258 + bytes, which is the maximum length that can be coded. inflate_fast() + requires strm.avail_out >= 258 for each loop to avoid checking for + output space. + */ +var inffast = function inflate_fast(strm, start) { + let _in; /* local strm.input */ + let last; /* have enough input while in < last */ + let _out; /* local strm.output */ + let beg; /* inflate()'s initial strm.output */ + let end; /* while out < end, enough space available */ +//#ifdef INFLATE_STRICT + let dmax; /* maximum distance from zlib header */ +//#endif + let wsize; /* window size or zero if not using window */ + let whave; /* valid bytes in the window */ + let wnext; /* window write index */ + // Use `s_window` instead `window`, avoid conflict with instrumentation tools + let s_window; /* allocated sliding window, if wsize != 0 */ + let hold; /* local strm.hold */ + let bits; /* local strm.bits */ + let lcode; /* local strm.lencode */ + let dcode; /* local strm.distcode */ + let lmask; /* mask for first level of length codes */ + let dmask; /* mask for first level of distance codes */ + let here; /* retrieved table entry */ + let op; /* code bits, operation, extra bits, or */ + /* window position, window bytes to copy */ + let len; /* match length, unused bytes */ + let dist; /* match distance */ + let from; /* where to copy match from */ + let from_source; + + + let input, output; // JS specific, because we have no pointers + + /* copy state to local variables */ + const state = strm.state; + //here = state.here; + _in = strm.next_in; + input = strm.input; + last = _in + (strm.avail_in - 5); + _out = strm.next_out; + output = strm.output; + beg = _out - (start - strm.avail_out); + end = _out + (strm.avail_out - 257); +//#ifdef INFLATE_STRICT + dmax = state.dmax; +//#endif + wsize = state.wsize; + whave = state.whave; + wnext = state.wnext; + s_window = state.window; + hold = state.hold; + bits = state.bits; + lcode = state.lencode; + dcode = state.distcode; + lmask = (1 << state.lenbits) - 1; + dmask = (1 << state.distbits) - 1; + + + /* decode literals and length/distances until end-of-block or not enough + input data or output space */ + + top: + do { + if (bits < 15) { + hold += input[_in++] << bits; + bits += 8; + hold += input[_in++] << bits; + bits += 8; + } + + here = lcode[hold & lmask]; + + dolen: + for (;;) { // Goto emulation + op = here >>> 24/*here.bits*/; + hold >>>= op; + bits -= op; + op = (here >>> 16) & 0xff/*here.op*/; + if (op === 0) { /* literal */ + //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + // "inflate: literal '%c'\n" : + // "inflate: literal 0x%02x\n", here.val)); + output[_out++] = here & 0xffff/*here.val*/; + } + else if (op & 16) { /* length base */ + len = here & 0xffff/*here.val*/; + op &= 15; /* number of extra bits */ + if (op) { + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + } + len += hold & ((1 << op) - 1); + hold >>>= op; + bits -= op; + } + //Tracevv((stderr, "inflate: length %u\n", len)); + if (bits < 15) { + hold += input[_in++] << bits; + bits += 8; + hold += input[_in++] << bits; + bits += 8; + } + here = dcode[hold & dmask]; + + dodist: + for (;;) { // goto emulation + op = here >>> 24/*here.bits*/; + hold >>>= op; + bits -= op; + op = (here >>> 16) & 0xff/*here.op*/; + + if (op & 16) { /* distance base */ + dist = here & 0xffff/*here.val*/; + op &= 15; /* number of extra bits */ + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + if (bits < op) { + hold += input[_in++] << bits; + bits += 8; + } + } + dist += hold & ((1 << op) - 1); +//#ifdef INFLATE_STRICT + if (dist > dmax) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD$1; + break top; + } +//#endif + hold >>>= op; + bits -= op; + //Tracevv((stderr, "inflate: distance %u\n", dist)); + op = _out - beg; /* max distance in output */ + if (dist > op) { /* see if copy from window */ + op = dist - op; /* distance back in window */ + if (op > whave) { + if (state.sane) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD$1; + break top; + } + +// (!) This block is disabled in zlib defaults, +// don't enable it for binary compatibility +//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR +// if (len <= op - whave) { +// do { +// output[_out++] = 0; +// } while (--len); +// continue top; +// } +// len -= op - whave; +// do { +// output[_out++] = 0; +// } while (--op > whave); +// if (op === 0) { +// from = _out - dist; +// do { +// output[_out++] = output[from++]; +// } while (--len); +// continue top; +// } +//#endif + } + from = 0; // window index + from_source = s_window; + if (wnext === 0) { /* very common case */ + from += wsize - op; + if (op < len) { /* some from window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + else if (wnext < op) { /* wrap around window */ + from += wsize + wnext - op; + op -= wnext; + if (op < len) { /* some from end of window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = 0; + if (wnext < len) { /* some from start of window */ + op = wnext; + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + } + else { /* contiguous in window */ + from += wnext - op; + if (op < len) { /* some from window */ + len -= op; + do { + output[_out++] = s_window[from++]; + } while (--op); + from = _out - dist; /* rest from output */ + from_source = output; + } + } + while (len > 2) { + output[_out++] = from_source[from++]; + output[_out++] = from_source[from++]; + output[_out++] = from_source[from++]; + len -= 3; + } + if (len) { + output[_out++] = from_source[from++]; + if (len > 1) { + output[_out++] = from_source[from++]; + } + } + } + else { + from = _out - dist; /* copy direct from output */ + do { /* minimum length is three */ + output[_out++] = output[from++]; + output[_out++] = output[from++]; + output[_out++] = output[from++]; + len -= 3; + } while (len > 2); + if (len) { + output[_out++] = output[from++]; + if (len > 1) { + output[_out++] = output[from++]; + } + } + } + } + else if ((op & 64) === 0) { /* 2nd level distance code */ + here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; + continue dodist; + } + else { + strm.msg = 'invalid distance code'; + state.mode = BAD$1; + break top; + } + + break; // need to emulate goto via "continue" + } + } + else if ((op & 64) === 0) { /* 2nd level length code */ + here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))]; + continue dolen; + } + else if (op & 32) { /* end-of-block */ + //Tracevv((stderr, "inflate: end of block\n")); + state.mode = TYPE$1; + break top; + } + else { + strm.msg = 'invalid literal/length code'; + state.mode = BAD$1; + break top; + } + + break; // need to emulate goto via "continue" + } + } while (_in < last && _out < end); + + /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ + len = bits >> 3; + _in -= len; + bits -= len << 3; + hold &= (1 << bits) - 1; + + /* update state and return */ + strm.next_in = _in; + strm.next_out = _out; + strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last)); + strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end)); + state.hold = hold; + state.bits = bits; + return; +}; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +const MAXBITS = 15; +const ENOUGH_LENS$1 = 852; +const ENOUGH_DISTS$1 = 592; +//const ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); + +const CODES$1 = 0; +const LENS$1 = 1; +const DISTS$1 = 2; + +const lbase = new Uint16Array([ /* Length codes 257..285 base */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 +]); + +const lext = new Uint8Array([ /* Length codes 257..285 extra */ + 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, + 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78 +]); + +const dbase = new Uint16Array([ /* Distance codes 0..29 base */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577, 0, 0 +]); + +const dext = new Uint8Array([ /* Distance codes 0..29 extra */ + 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, + 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, + 28, 28, 29, 29, 64, 64 +]); + +const inflate_table = (type, lens, lens_index, codes, table, table_index, work, opts) => +{ + const bits = opts.bits; + //here = opts.here; /* table entry for duplication */ + + let len = 0; /* a code's length in bits */ + let sym = 0; /* index of code symbols */ + let min = 0, max = 0; /* minimum and maximum code lengths */ + let root = 0; /* number of index bits for root table */ + let curr = 0; /* number of index bits for current table */ + let drop = 0; /* code bits to drop for sub-table */ + let left = 0; /* number of prefix codes available */ + let used = 0; /* code entries in table used */ + let huff = 0; /* Huffman code */ + let incr; /* for incrementing code, index */ + let fill; /* index for replicating entries */ + let low; /* low bits for current root entry */ + let mask; /* mask for low root bits */ + let next; /* next available space in table */ + let base = null; /* base value table to use */ +// let shoextra; /* extra bits table to use */ + let match; /* use base and extra for symbol >= match */ + const count = new Uint16Array(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */ + const offs = new Uint16Array(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */ + let extra = null; + + let here_bits, here_op, here_val; + + /* + Process a set of code lengths to create a canonical Huffman code. The + code lengths are lens[0..codes-1]. Each length corresponds to the + symbols 0..codes-1. The Huffman code is generated by first sorting the + symbols by length from short to long, and retaining the symbol order + for codes with equal lengths. Then the code starts with all zero bits + for the first code of the shortest length, and the codes are integer + increments for the same length, and zeros are appended as the length + increases. For the deflate format, these bits are stored backwards + from their more natural integer increment ordering, and so when the + decoding tables are built in the large loop below, the integer codes + are incremented backwards. + + This routine assumes, but does not check, that all of the entries in + lens[] are in the range 0..MAXBITS. The caller must assure this. + 1..MAXBITS is interpreted as that code length. zero means that that + symbol does not occur in this code. + + The codes are sorted by computing a count of codes for each length, + creating from that a table of starting indices for each length in the + sorted table, and then entering the symbols in order in the sorted + table. The sorted table is work[], with that space being provided by + the caller. + + The length counts are used for other purposes as well, i.e. finding + the minimum and maximum length codes, determining if there are any + codes at all, checking for a valid set of lengths, and looking ahead + at length counts to determine sub-table sizes when building the + decoding tables. + */ + + /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ + for (len = 0; len <= MAXBITS; len++) { + count[len] = 0; + } + for (sym = 0; sym < codes; sym++) { + count[lens[lens_index + sym]]++; + } + + /* bound code lengths, force root to be within code lengths */ + root = bits; + for (max = MAXBITS; max >= 1; max--) { + if (count[max] !== 0) { break; } + } + if (root > max) { + root = max; + } + if (max === 0) { /* no symbols to code at all */ + //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */ + //table.bits[opts.table_index] = 1; //here.bits = (var char)1; + //table.val[opts.table_index++] = 0; //here.val = (var short)0; + table[table_index++] = (1 << 24) | (64 << 16) | 0; + + + //table.op[opts.table_index] = 64; + //table.bits[opts.table_index] = 1; + //table.val[opts.table_index++] = 0; + table[table_index++] = (1 << 24) | (64 << 16) | 0; + + opts.bits = 1; + return 0; /* no symbols, but wait for decoding to report error */ + } + for (min = 1; min < max; min++) { + if (count[min] !== 0) { break; } + } + if (root < min) { + root = min; + } + + /* check for an over-subscribed or incomplete set of lengths */ + left = 1; + for (len = 1; len <= MAXBITS; len++) { + left <<= 1; + left -= count[len]; + if (left < 0) { + return -1; + } /* over-subscribed */ + } + if (left > 0 && (type === CODES$1 || max !== 1)) { + return -1; /* incomplete set */ + } + + /* generate offsets into symbol table for each length for sorting */ + offs[1] = 0; + for (len = 1; len < MAXBITS; len++) { + offs[len + 1] = offs[len] + count[len]; + } + + /* sort symbols by length, by symbol order within each length */ + for (sym = 0; sym < codes; sym++) { + if (lens[lens_index + sym] !== 0) { + work[offs[lens[lens_index + sym]]++] = sym; + } + } + + /* + Create and fill in decoding tables. In this loop, the table being + filled is at next and has curr index bits. The code being used is huff + with length len. That code is converted to an index by dropping drop + bits off of the bottom. For codes where len is less than drop + curr, + those top drop + curr - len bits are incremented through all values to + fill the table with replicated entries. + + root is the number of index bits for the root table. When len exceeds + root, sub-tables are created pointed to by the root entry with an index + of the low root bits of huff. This is saved in low to check for when a + new sub-table should be started. drop is zero when the root table is + being filled, and drop is root when sub-tables are being filled. + + When a new sub-table is needed, it is necessary to look ahead in the + code lengths to determine what size sub-table is needed. The length + counts are used for this, and so count[] is decremented as codes are + entered in the tables. + + used keeps track of how many table entries have been allocated from the + provided *table space. It is checked for LENS and DIST tables against + the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in + the initial root table size constants. See the comments in inftrees.h + for more information. + + sym increments through all symbols, and the loop terminates when + all codes of length max, i.e. all codes, have been processed. This + routine permits incomplete codes, so another loop after this one fills + in the rest of the decoding tables with invalid code markers. + */ + + /* set up for code type */ + // poor man optimization - use if-else instead of switch, + // to avoid deopts in old v8 + if (type === CODES$1) { + base = extra = work; /* dummy value--not used */ + match = 20; + + } else if (type === LENS$1) { + base = lbase; + extra = lext; + match = 257; + + } else { /* DISTS */ + base = dbase; + extra = dext; + match = 0; + } + + /* initialize opts for loop */ + huff = 0; /* starting code */ + sym = 0; /* starting code symbol */ + len = min; /* starting code length */ + next = table_index; /* current table to fill in */ + curr = root; /* current table index bits */ + drop = 0; /* current bits to drop from code for index */ + low = -1; /* trigger new sub-table when len > root */ + used = 1 << root; /* use root table entries */ + mask = used - 1; /* mask for comparing low */ + + /* check available table space */ + if ((type === LENS$1 && used > ENOUGH_LENS$1) || + (type === DISTS$1 && used > ENOUGH_DISTS$1)) { + return 1; + } + + /* process all codes and make table entries */ + for (;;) { + /* create table entry */ + here_bits = len - drop; + if (work[sym] + 1 < match) { + here_op = 0; + here_val = work[sym]; + } + else if (work[sym] >= match) { + here_op = extra[work[sym] - match]; + here_val = base[work[sym] - match]; + } + else { + here_op = 32 + 64; /* end of block */ + here_val = 0; + } + + /* replicate for those indices with low len bits equal to huff */ + incr = 1 << (len - drop); + fill = 1 << curr; + min = fill; /* save offset to next table */ + do { + fill -= incr; + table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0; + } while (fill !== 0); + + /* backwards increment the len-bit code huff */ + incr = 1 << (len - 1); + while (huff & incr) { + incr >>= 1; + } + if (incr !== 0) { + huff &= incr - 1; + huff += incr; + } else { + huff = 0; + } + + /* go to next symbol, update count, len */ + sym++; + if (--count[len] === 0) { + if (len === max) { break; } + len = lens[lens_index + work[sym]]; + } + + /* create new sub-table if needed */ + if (len > root && (huff & mask) !== low) { + /* if first time, transition to sub-tables */ + if (drop === 0) { + drop = root; + } + + /* increment past last table */ + next += min; /* here min is 1 << curr */ + + /* determine length of next table */ + curr = len - drop; + left = 1 << curr; + while (curr + drop < max) { + left -= count[curr + drop]; + if (left <= 0) { break; } + curr++; + left <<= 1; + } + + /* check for enough space */ + used += 1 << curr; + if ((type === LENS$1 && used > ENOUGH_LENS$1) || + (type === DISTS$1 && used > ENOUGH_DISTS$1)) { + return 1; + } + + /* point entry in root table to sub-table */ + low = huff & mask; + /*table.op[low] = curr; + table.bits[low] = root; + table.val[low] = next - opts.table_index;*/ + table[low] = (root << 24) | (curr << 16) | (next - table_index) |0; + } + } + + /* fill in remaining table entry if code is incomplete (guaranteed to have + at most one remaining entry, since if the code is incomplete, the + maximum code length that was allowed to get this far is one bit) */ + if (huff !== 0) { + //table.op[next + huff] = 64; /* invalid code marker */ + //table.bits[next + huff] = len - drop; + //table.val[next + huff] = 0; + table[next + huff] = ((len - drop) << 24) | (64 << 16) |0; + } + + /* set return parameters */ + //opts.table_index += used; + opts.bits = root; + return 0; +}; + + +var inftrees = inflate_table; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + + + + + + +const CODES = 0; +const LENS = 1; +const DISTS = 2; + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + +const { + Z_FINISH: Z_FINISH$1, Z_BLOCK, Z_TREES, + Z_OK: Z_OK$1, Z_STREAM_END: Z_STREAM_END$1, Z_NEED_DICT: Z_NEED_DICT$1, Z_STREAM_ERROR: Z_STREAM_ERROR$1, Z_DATA_ERROR: Z_DATA_ERROR$1, Z_MEM_ERROR: Z_MEM_ERROR$1, Z_BUF_ERROR, + Z_DEFLATED +} = constants$2; + + +/* STATES ====================================================================*/ +/* ===========================================================================*/ + + +const HEAD = 16180; /* i: waiting for magic header */ +const FLAGS = 16181; /* i: waiting for method and flags (gzip) */ +const TIME = 16182; /* i: waiting for modification time (gzip) */ +const OS = 16183; /* i: waiting for extra flags and operating system (gzip) */ +const EXLEN = 16184; /* i: waiting for extra length (gzip) */ +const EXTRA = 16185; /* i: waiting for extra bytes (gzip) */ +const NAME = 16186; /* i: waiting for end of file name (gzip) */ +const COMMENT = 16187; /* i: waiting for end of comment (gzip) */ +const HCRC = 16188; /* i: waiting for header crc (gzip) */ +const DICTID = 16189; /* i: waiting for dictionary check value */ +const DICT = 16190; /* waiting for inflateSetDictionary() call */ +const TYPE = 16191; /* i: waiting for type bits, including last-flag bit */ +const TYPEDO = 16192; /* i: same, but skip check to exit inflate on new block */ +const STORED = 16193; /* i: waiting for stored size (length and complement) */ +const COPY_ = 16194; /* i/o: same as COPY below, but only first time in */ +const COPY = 16195; /* i/o: waiting for input or output to copy stored block */ +const TABLE = 16196; /* i: waiting for dynamic block table lengths */ +const LENLENS = 16197; /* i: waiting for code length code lengths */ +const CODELENS = 16198; /* i: waiting for length/lit and distance code lengths */ +const LEN_ = 16199; /* i: same as LEN below, but only first time in */ +const LEN = 16200; /* i: waiting for length/lit/eob code */ +const LENEXT = 16201; /* i: waiting for length extra bits */ +const DIST = 16202; /* i: waiting for distance code */ +const DISTEXT = 16203; /* i: waiting for distance extra bits */ +const MATCH = 16204; /* o: waiting for output space to copy string */ +const LIT = 16205; /* o: waiting for output space to write literal */ +const CHECK = 16206; /* i: waiting for 32-bit check value */ +const LENGTH = 16207; /* i: waiting for 32-bit length (gzip) */ +const DONE = 16208; /* finished check, done -- remain here until reset */ +const BAD = 16209; /* got a data error -- remain here until reset */ +const MEM = 16210; /* got an inflate() memory error -- remain here until reset */ +const SYNC = 16211; /* looking for synchronization bytes to restart inflate() */ + +/* ===========================================================================*/ + + + +const ENOUGH_LENS = 852; +const ENOUGH_DISTS = 592; +//const ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS); + +const MAX_WBITS = 15; +/* 32K LZ77 window */ +const DEF_WBITS = MAX_WBITS; + + +const zswap32 = (q) => { + + return (((q >>> 24) & 0xff) + + ((q >>> 8) & 0xff00) + + ((q & 0xff00) << 8) + + ((q & 0xff) << 24)); +}; + + +function InflateState() { + this.strm = null; /* pointer back to this zlib stream */ + this.mode = 0; /* current inflate mode */ + this.last = false; /* true if processing last block */ + this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip, + bit 2 true to validate check value */ + this.havedict = false; /* true if dictionary provided */ + this.flags = 0; /* gzip header method and flags (0 if zlib), or + -1 if raw or no header yet */ + this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */ + this.check = 0; /* protected copy of check value */ + this.total = 0; /* protected copy of output count */ + // TODO: may be {} + this.head = null; /* where to save gzip header information */ + + /* sliding window */ + this.wbits = 0; /* log base 2 of requested window size */ + this.wsize = 0; /* window size or zero if not using window */ + this.whave = 0; /* valid bytes in the window */ + this.wnext = 0; /* window write index */ + this.window = null; /* allocated sliding window, if needed */ + + /* bit accumulator */ + this.hold = 0; /* input bit accumulator */ + this.bits = 0; /* number of bits in "in" */ + + /* for string and stored block copying */ + this.length = 0; /* literal or length of data to copy */ + this.offset = 0; /* distance back to copy string from */ + + /* for table and code decoding */ + this.extra = 0; /* extra bits needed */ + + /* fixed and dynamic code tables */ + this.lencode = null; /* starting table for length/literal codes */ + this.distcode = null; /* starting table for distance codes */ + this.lenbits = 0; /* index bits for lencode */ + this.distbits = 0; /* index bits for distcode */ + + /* dynamic table building */ + this.ncode = 0; /* number of code length code lengths */ + this.nlen = 0; /* number of length code lengths */ + this.ndist = 0; /* number of distance code lengths */ + this.have = 0; /* number of code lengths in lens[] */ + this.next = null; /* next available space in codes[] */ + + this.lens = new Uint16Array(320); /* temporary storage for code lengths */ + this.work = new Uint16Array(288); /* work area for code table building */ + + /* + because we don't have pointers in js, we use lencode and distcode directly + as buffers so we don't need codes + */ + //this.codes = new Int32Array(ENOUGH); /* space for code tables */ + this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */ + this.distdyn = null; /* dynamic table for distance codes (JS specific) */ + this.sane = 0; /* if false, allow invalid distance too far */ + this.back = 0; /* bits back of last unprocessed length/lit */ + this.was = 0; /* initial length of match */ +} + + +const inflateStateCheck = (strm) => { + + if (!strm) { + return 1; + } + const state = strm.state; + if (!state || state.strm !== strm || + state.mode < HEAD || state.mode > SYNC) { + return 1; + } + return 0; +}; + + +const inflateResetKeep = (strm) => { + + if (inflateStateCheck(strm)) { return Z_STREAM_ERROR$1; } + const state = strm.state; + strm.total_in = strm.total_out = state.total = 0; + strm.msg = ''; /*Z_NULL*/ + if (state.wrap) { /* to support ill-conceived Java test suite */ + strm.adler = state.wrap & 1; + } + state.mode = HEAD; + state.last = 0; + state.havedict = 0; + state.flags = -1; + state.dmax = 32768; + state.head = null/*Z_NULL*/; + state.hold = 0; + state.bits = 0; + //state.lencode = state.distcode = state.next = state.codes; + state.lencode = state.lendyn = new Int32Array(ENOUGH_LENS); + state.distcode = state.distdyn = new Int32Array(ENOUGH_DISTS); + + state.sane = 1; + state.back = -1; + //Tracev((stderr, "inflate: reset\n")); + return Z_OK$1; +}; + + +const inflateReset = (strm) => { + + if (inflateStateCheck(strm)) { return Z_STREAM_ERROR$1; } + const state = strm.state; + state.wsize = 0; + state.whave = 0; + state.wnext = 0; + return inflateResetKeep(strm); + +}; + + +const inflateReset2 = (strm, windowBits) => { + let wrap; + + /* get the state */ + if (inflateStateCheck(strm)) { return Z_STREAM_ERROR$1; } + const state = strm.state; + + /* extract wrap request from windowBits parameter */ + if (windowBits < 0) { + wrap = 0; + windowBits = -windowBits; + } + else { + wrap = (windowBits >> 4) + 5; + if (windowBits < 48) { + windowBits &= 15; + } + } + + /* set number of window bits, free window if different */ + if (windowBits && (windowBits < 8 || windowBits > 15)) { + return Z_STREAM_ERROR$1; + } + if (state.window !== null && state.wbits !== windowBits) { + state.window = null; + } + + /* update state and reset the rest of it */ + state.wrap = wrap; + state.wbits = windowBits; + return inflateReset(strm); +}; + + +const inflateInit2 = (strm, windowBits) => { + + if (!strm) { return Z_STREAM_ERROR$1; } + //strm.msg = Z_NULL; /* in case we return an error */ + + const state = new InflateState(); + + //if (state === Z_NULL) return Z_MEM_ERROR; + //Tracev((stderr, "inflate: allocated\n")); + strm.state = state; + state.strm = strm; + state.window = null/*Z_NULL*/; + state.mode = HEAD; /* to pass state test in inflateReset2() */ + const ret = inflateReset2(strm, windowBits); + if (ret !== Z_OK$1) { + strm.state = null/*Z_NULL*/; + } + return ret; +}; + + +const inflateInit = (strm) => { + + return inflateInit2(strm, DEF_WBITS); +}; + + +/* + Return state with length and distance decoding tables and index sizes set to + fixed code decoding. Normally this returns fixed tables from inffixed.h. + If BUILDFIXED is defined, then instead this routine builds the tables the + first time it's called, and returns those tables the first time and + thereafter. This reduces the size of the code by about 2K bytes, in + exchange for a little execution time. However, BUILDFIXED should not be + used for threaded applications, since the rewriting of the tables and virgin + may not be thread-safe. + */ +let virgin = true; + +let lenfix, distfix; // We have no pointers in JS, so keep tables separate + + +const fixedtables = (state) => { + + /* build fixed huffman tables if first call (may not be thread safe) */ + if (virgin) { + lenfix = new Int32Array(512); + distfix = new Int32Array(32); + + /* literal/length table */ + let sym = 0; + while (sym < 144) { state.lens[sym++] = 8; } + while (sym < 256) { state.lens[sym++] = 9; } + while (sym < 280) { state.lens[sym++] = 7; } + while (sym < 288) { state.lens[sym++] = 8; } + + inftrees(LENS, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 }); + + /* distance table */ + sym = 0; + while (sym < 32) { state.lens[sym++] = 5; } + + inftrees(DISTS, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 }); + + /* do this just once */ + virgin = false; + } + + state.lencode = lenfix; + state.lenbits = 9; + state.distcode = distfix; + state.distbits = 5; +}; + + +/* + Update the window with the last wsize (normally 32K) bytes written before + returning. If window does not exist yet, create it. This is only called + when a window is already in use, or when output has been written during this + inflate call, but the end of the deflate stream has not been reached yet. + It is also called to create a window for dictionary data when a dictionary + is loaded. + + Providing output buffers larger than 32K to inflate() should provide a speed + advantage, since only the last 32K of output is copied to the sliding window + upon return from inflate(), and since all distances after the first 32K of + output will fall in the output data, making match copies simpler and faster. + The advantage may be dependent on the size of the processor's data caches. + */ +const updatewindow = (strm, src, end, copy) => { + + let dist; + const state = strm.state; + + /* if it hasn't been done already, allocate space for the window */ + if (state.window === null) { + state.wsize = 1 << state.wbits; + state.wnext = 0; + state.whave = 0; + + state.window = new Uint8Array(state.wsize); + } + + /* copy state->wsize or less output bytes into the circular window */ + if (copy >= state.wsize) { + state.window.set(src.subarray(end - state.wsize, end), 0); + state.wnext = 0; + state.whave = state.wsize; + } + else { + dist = state.wsize - state.wnext; + if (dist > copy) { + dist = copy; + } + //zmemcpy(state->window + state->wnext, end - copy, dist); + state.window.set(src.subarray(end - copy, end - copy + dist), state.wnext); + copy -= dist; + if (copy) { + //zmemcpy(state->window, end - copy, copy); + state.window.set(src.subarray(end - copy, end), 0); + state.wnext = copy; + state.whave = state.wsize; + } + else { + state.wnext += dist; + if (state.wnext === state.wsize) { state.wnext = 0; } + if (state.whave < state.wsize) { state.whave += dist; } + } + } + return 0; +}; + + +const inflate$2 = (strm, flush) => { + + let state; + let input, output; // input/output buffers + let next; /* next input INDEX */ + let put; /* next output INDEX */ + let have, left; /* available input and output */ + let hold; /* bit buffer */ + let bits; /* bits in bit buffer */ + let _in, _out; /* save starting available input and output */ + let copy; /* number of stored or match bytes to copy */ + let from; /* where to copy match bytes from */ + let from_source; + let here = 0; /* current decoding table entry */ + let here_bits, here_op, here_val; // paked "here" denormalized (JS specific) + //let last; /* parent table entry */ + let last_bits, last_op, last_val; // paked "last" denormalized (JS specific) + let len; /* length to copy for repeats, bits to drop */ + let ret; /* return code */ + const hbuf = new Uint8Array(4); /* buffer for gzip header crc calculation */ + let opts; + + let n; // temporary variable for NEED_BITS + + const order = /* permutation of code lengths */ + new Uint8Array([ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]); + + + if (inflateStateCheck(strm) || !strm.output || + (!strm.input && strm.avail_in !== 0)) { + return Z_STREAM_ERROR$1; + } + + state = strm.state; + if (state.mode === TYPE) { state.mode = TYPEDO; } /* skip check */ + + + //--- LOAD() --- + put = strm.next_out; + output = strm.output; + left = strm.avail_out; + next = strm.next_in; + input = strm.input; + have = strm.avail_in; + hold = state.hold; + bits = state.bits; + //--- + + _in = have; + _out = left; + ret = Z_OK$1; + + inf_leave: // goto emulation + for (;;) { + switch (state.mode) { + case HEAD: + if (state.wrap === 0) { + state.mode = TYPEDO; + break; + } + //=== NEEDBITS(16); + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */ + if (state.wbits === 0) { + state.wbits = 15; + } + state.check = 0/*crc32(0L, Z_NULL, 0)*/; + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32_1(state.check, hbuf, 2, 0); + //===// + + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = FLAGS; + break; + } + if (state.head) { + state.head.done = false; + } + if (!(state.wrap & 1) || /* check if zlib header allowed */ + (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) { + strm.msg = 'incorrect header check'; + state.mode = BAD; + break; + } + if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED) { + strm.msg = 'unknown compression method'; + state.mode = BAD; + break; + } + //--- DROPBITS(4) ---// + hold >>>= 4; + bits -= 4; + //---// + len = (hold & 0x0f)/*BITS(4)*/ + 8; + if (state.wbits === 0) { + state.wbits = len; + } + if (len > 15 || len > state.wbits) { + strm.msg = 'invalid window size'; + state.mode = BAD; + break; + } + + // !!! pako patch. Force use `options.windowBits` if passed. + // Required to always use max window size by default. + state.dmax = 1 << state.wbits; + //state.dmax = 1 << len; + + state.flags = 0; /* indicate zlib header */ + //Tracev((stderr, "inflate: zlib header ok\n")); + strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; + state.mode = hold & 0x200 ? DICTID : TYPE; + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + break; + case FLAGS: + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.flags = hold; + if ((state.flags & 0xff) !== Z_DEFLATED) { + strm.msg = 'unknown compression method'; + state.mode = BAD; + break; + } + if (state.flags & 0xe000) { + strm.msg = 'unknown header flags set'; + state.mode = BAD; + break; + } + if (state.head) { + state.head.text = ((hold >> 8) & 1); + } + if ((state.flags & 0x0200) && (state.wrap & 4)) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32_1(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = TIME; + /* falls through */ + case TIME: + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (state.head) { + state.head.time = hold; + } + if ((state.flags & 0x0200) && (state.wrap & 4)) { + //=== CRC4(state.check, hold) + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + hbuf[2] = (hold >>> 16) & 0xff; + hbuf[3] = (hold >>> 24) & 0xff; + state.check = crc32_1(state.check, hbuf, 4, 0); + //=== + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = OS; + /* falls through */ + case OS: + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if (state.head) { + state.head.xflags = (hold & 0xff); + state.head.os = (hold >> 8); + } + if ((state.flags & 0x0200) && (state.wrap & 4)) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32_1(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = EXLEN; + /* falls through */ + case EXLEN: + if (state.flags & 0x0400) { + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.length = hold; + if (state.head) { + state.head.extra_len = hold; + } + if ((state.flags & 0x0200) && (state.wrap & 4)) { + //=== CRC2(state.check, hold); + hbuf[0] = hold & 0xff; + hbuf[1] = (hold >>> 8) & 0xff; + state.check = crc32_1(state.check, hbuf, 2, 0); + //===// + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + } + else if (state.head) { + state.head.extra = null/*Z_NULL*/; + } + state.mode = EXTRA; + /* falls through */ + case EXTRA: + if (state.flags & 0x0400) { + copy = state.length; + if (copy > have) { copy = have; } + if (copy) { + if (state.head) { + len = state.head.extra_len - state.length; + if (!state.head.extra) { + // Use untyped array for more convenient processing later + state.head.extra = new Uint8Array(state.head.extra_len); + } + state.head.extra.set( + input.subarray( + next, + // extra field is limited to 65536 bytes + // - no need for additional size check + next + copy + ), + /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/ + len + ); + //zmemcpy(state.head.extra + len, next, + // len + copy > state.head.extra_max ? + // state.head.extra_max - len : copy); + } + if ((state.flags & 0x0200) && (state.wrap & 4)) { + state.check = crc32_1(state.check, input, copy, next); + } + have -= copy; + next += copy; + state.length -= copy; + } + if (state.length) { break inf_leave; } + } + state.length = 0; + state.mode = NAME; + /* falls through */ + case NAME: + if (state.flags & 0x0800) { + if (have === 0) { break inf_leave; } + copy = 0; + do { + // TODO: 2 or 1 bytes? + len = input[next + copy++]; + /* use constant limit because in js we should not preallocate memory */ + if (state.head && len && + (state.length < 65536 /*state.head.name_max*/)) { + state.head.name += String.fromCharCode(len); + } + } while (len && copy < have); + + if ((state.flags & 0x0200) && (state.wrap & 4)) { + state.check = crc32_1(state.check, input, copy, next); + } + have -= copy; + next += copy; + if (len) { break inf_leave; } + } + else if (state.head) { + state.head.name = null; + } + state.length = 0; + state.mode = COMMENT; + /* falls through */ + case COMMENT: + if (state.flags & 0x1000) { + if (have === 0) { break inf_leave; } + copy = 0; + do { + len = input[next + copy++]; + /* use constant limit because in js we should not preallocate memory */ + if (state.head && len && + (state.length < 65536 /*state.head.comm_max*/)) { + state.head.comment += String.fromCharCode(len); + } + } while (len && copy < have); + if ((state.flags & 0x0200) && (state.wrap & 4)) { + state.check = crc32_1(state.check, input, copy, next); + } + have -= copy; + next += copy; + if (len) { break inf_leave; } + } + else if (state.head) { + state.head.comment = null; + } + state.mode = HCRC; + /* falls through */ + case HCRC: + if (state.flags & 0x0200) { + //=== NEEDBITS(16); */ + while (bits < 16) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if ((state.wrap & 4) && hold !== (state.check & 0xffff)) { + strm.msg = 'header crc mismatch'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + } + if (state.head) { + state.head.hcrc = ((state.flags >> 9) & 1); + state.head.done = true; + } + strm.adler = state.check = 0; + state.mode = TYPE; + break; + case DICTID: + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + strm.adler = state.check = zswap32(hold); + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = DICT; + /* falls through */ + case DICT: + if (state.havedict === 0) { + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + return Z_NEED_DICT$1; + } + strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/; + state.mode = TYPE; + /* falls through */ + case TYPE: + if (flush === Z_BLOCK || flush === Z_TREES) { break inf_leave; } + /* falls through */ + case TYPEDO: + if (state.last) { + //--- BYTEBITS() ---// + hold >>>= bits & 7; + bits -= bits & 7; + //---// + state.mode = CHECK; + break; + } + //=== NEEDBITS(3); */ + while (bits < 3) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.last = (hold & 0x01)/*BITS(1)*/; + //--- DROPBITS(1) ---// + hold >>>= 1; + bits -= 1; + //---// + + switch ((hold & 0x03)/*BITS(2)*/) { + case 0: /* stored block */ + //Tracev((stderr, "inflate: stored block%s\n", + // state.last ? " (last)" : "")); + state.mode = STORED; + break; + case 1: /* fixed block */ + fixedtables(state); + //Tracev((stderr, "inflate: fixed codes block%s\n", + // state.last ? " (last)" : "")); + state.mode = LEN_; /* decode codes */ + if (flush === Z_TREES) { + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + break inf_leave; + } + break; + case 2: /* dynamic block */ + //Tracev((stderr, "inflate: dynamic codes block%s\n", + // state.last ? " (last)" : "")); + state.mode = TABLE; + break; + case 3: + strm.msg = 'invalid block type'; + state.mode = BAD; + } + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + break; + case STORED: + //--- BYTEBITS() ---// /* go to byte boundary */ + hold >>>= bits & 7; + bits -= bits & 7; + //---// + //=== NEEDBITS(32); */ + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) { + strm.msg = 'invalid stored block lengths'; + state.mode = BAD; + break; + } + state.length = hold & 0xffff; + //Tracev((stderr, "inflate: stored length %u\n", + // state.length)); + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + state.mode = COPY_; + if (flush === Z_TREES) { break inf_leave; } + /* falls through */ + case COPY_: + state.mode = COPY; + /* falls through */ + case COPY: + copy = state.length; + if (copy) { + if (copy > have) { copy = have; } + if (copy > left) { copy = left; } + if (copy === 0) { break inf_leave; } + //--- zmemcpy(put, next, copy); --- + output.set(input.subarray(next, next + copy), put); + //---// + have -= copy; + next += copy; + left -= copy; + put += copy; + state.length -= copy; + break; + } + //Tracev((stderr, "inflate: stored end\n")); + state.mode = TYPE; + break; + case TABLE: + //=== NEEDBITS(14); */ + while (bits < 14) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257; + //--- DROPBITS(5) ---// + hold >>>= 5; + bits -= 5; + //---// + state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1; + //--- DROPBITS(5) ---// + hold >>>= 5; + bits -= 5; + //---// + state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4; + //--- DROPBITS(4) ---// + hold >>>= 4; + bits -= 4; + //---// +//#ifndef PKZIP_BUG_WORKAROUND + if (state.nlen > 286 || state.ndist > 30) { + strm.msg = 'too many length or distance symbols'; + state.mode = BAD; + break; + } +//#endif + //Tracev((stderr, "inflate: table sizes ok\n")); + state.have = 0; + state.mode = LENLENS; + /* falls through */ + case LENLENS: + while (state.have < state.ncode) { + //=== NEEDBITS(3); + while (bits < 3) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.lens[order[state.have++]] = (hold & 0x07);//BITS(3); + //--- DROPBITS(3) ---// + hold >>>= 3; + bits -= 3; + //---// + } + while (state.have < 19) { + state.lens[order[state.have++]] = 0; + } + // We have separate tables & no pointers. 2 commented lines below not needed. + //state.next = state.codes; + //state.lencode = state.next; + // Switch to use dynamic table + state.lencode = state.lendyn; + state.lenbits = 7; + + opts = { bits: state.lenbits }; + ret = inftrees(CODES, state.lens, 0, 19, state.lencode, 0, state.work, opts); + state.lenbits = opts.bits; + + if (ret) { + strm.msg = 'invalid code lengths set'; + state.mode = BAD; + break; + } + //Tracev((stderr, "inflate: code lengths ok\n")); + state.have = 0; + state.mode = CODELENS; + /* falls through */ + case CODELENS: + while (state.have < state.nlen + state.ndist) { + for (;;) { + here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if (here_val < 16) { + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.lens[state.have++] = here_val; + } + else { + if (here_val === 16) { + //=== NEEDBITS(here.bits + 2); + n = here_bits + 2; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + if (state.have === 0) { + strm.msg = 'invalid bit length repeat'; + state.mode = BAD; + break; + } + len = state.lens[state.have - 1]; + copy = 3 + (hold & 0x03);//BITS(2); + //--- DROPBITS(2) ---// + hold >>>= 2; + bits -= 2; + //---// + } + else if (here_val === 17) { + //=== NEEDBITS(here.bits + 3); + n = here_bits + 3; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + len = 0; + copy = 3 + (hold & 0x07);//BITS(3); + //--- DROPBITS(3) ---// + hold >>>= 3; + bits -= 3; + //---// + } + else { + //=== NEEDBITS(here.bits + 7); + n = here_bits + 7; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + len = 0; + copy = 11 + (hold & 0x7f);//BITS(7); + //--- DROPBITS(7) ---// + hold >>>= 7; + bits -= 7; + //---// + } + if (state.have + copy > state.nlen + state.ndist) { + strm.msg = 'invalid bit length repeat'; + state.mode = BAD; + break; + } + while (copy--) { + state.lens[state.have++] = len; + } + } + } + + /* handle error breaks in while */ + if (state.mode === BAD) { break; } + + /* check for end-of-block code (better have one) */ + if (state.lens[256] === 0) { + strm.msg = 'invalid code -- missing end-of-block'; + state.mode = BAD; + break; + } + + /* build code tables -- note: do not change the lenbits or distbits + values here (9 and 6) without reading the comments in inftrees.h + concerning the ENOUGH constants, which depend on those values */ + state.lenbits = 9; + + opts = { bits: state.lenbits }; + ret = inftrees(LENS, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts); + // We have separate tables & no pointers. 2 commented lines below not needed. + // state.next_index = opts.table_index; + state.lenbits = opts.bits; + // state.lencode = state.next; + + if (ret) { + strm.msg = 'invalid literal/lengths set'; + state.mode = BAD; + break; + } + + state.distbits = 6; + //state.distcode.copy(state.codes); + // Switch to use dynamic table + state.distcode = state.distdyn; + opts = { bits: state.distbits }; + ret = inftrees(DISTS, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts); + // We have separate tables & no pointers. 2 commented lines below not needed. + // state.next_index = opts.table_index; + state.distbits = opts.bits; + // state.distcode = state.next; + + if (ret) { + strm.msg = 'invalid distances set'; + state.mode = BAD; + break; + } + //Tracev((stderr, 'inflate: codes ok\n')); + state.mode = LEN_; + if (flush === Z_TREES) { break inf_leave; } + /* falls through */ + case LEN_: + state.mode = LEN; + /* falls through */ + case LEN: + if (have >= 6 && left >= 258) { + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + inffast(strm, _out); + //--- LOAD() --- + put = strm.next_out; + output = strm.output; + left = strm.avail_out; + next = strm.next_in; + input = strm.input; + have = strm.avail_in; + hold = state.hold; + bits = state.bits; + //--- + + if (state.mode === TYPE) { + state.back = -1; + } + break; + } + state.back = 0; + for (;;) { + here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if (here_bits <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if (here_op && (here_op & 0xf0) === 0) { + last_bits = here_bits; + last_op = here_op; + last_val = here_val; + for (;;) { + here = state.lencode[last_val + + ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((last_bits + here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + //--- DROPBITS(last.bits) ---// + hold >>>= last_bits; + bits -= last_bits; + //---// + state.back += last_bits; + } + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.back += here_bits; + state.length = here_val; + if (here_op === 0) { + //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? + // "inflate: literal '%c'\n" : + // "inflate: literal 0x%02x\n", here.val)); + state.mode = LIT; + break; + } + if (here_op & 32) { + //Tracevv((stderr, "inflate: end of block\n")); + state.back = -1; + state.mode = TYPE; + break; + } + if (here_op & 64) { + strm.msg = 'invalid literal/length code'; + state.mode = BAD; + break; + } + state.extra = here_op & 15; + state.mode = LENEXT; + /* falls through */ + case LENEXT: + if (state.extra) { + //=== NEEDBITS(state.extra); + n = state.extra; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; + //--- DROPBITS(state.extra) ---// + hold >>>= state.extra; + bits -= state.extra; + //---// + state.back += state.extra; + } + //Tracevv((stderr, "inflate: length %u\n", state.length)); + state.was = state.length; + state.mode = DIST; + /* falls through */ + case DIST: + for (;;) { + here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/ + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + if ((here_op & 0xf0) === 0) { + last_bits = here_bits; + last_op = here_op; + last_val = here_val; + for (;;) { + here = state.distcode[last_val + + ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)]; + here_bits = here >>> 24; + here_op = (here >>> 16) & 0xff; + here_val = here & 0xffff; + + if ((last_bits + here_bits) <= bits) { break; } + //--- PULLBYTE() ---// + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + //---// + } + //--- DROPBITS(last.bits) ---// + hold >>>= last_bits; + bits -= last_bits; + //---// + state.back += last_bits; + } + //--- DROPBITS(here.bits) ---// + hold >>>= here_bits; + bits -= here_bits; + //---// + state.back += here_bits; + if (here_op & 64) { + strm.msg = 'invalid distance code'; + state.mode = BAD; + break; + } + state.offset = here_val; + state.extra = (here_op) & 15; + state.mode = DISTEXT; + /* falls through */ + case DISTEXT: + if (state.extra) { + //=== NEEDBITS(state.extra); + n = state.extra; + while (bits < n) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/; + //--- DROPBITS(state.extra) ---// + hold >>>= state.extra; + bits -= state.extra; + //---// + state.back += state.extra; + } +//#ifdef INFLATE_STRICT + if (state.offset > state.dmax) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break; + } +//#endif + //Tracevv((stderr, "inflate: distance %u\n", state.offset)); + state.mode = MATCH; + /* falls through */ + case MATCH: + if (left === 0) { break inf_leave; } + copy = _out - left; + if (state.offset > copy) { /* copy from window */ + copy = state.offset - copy; + if (copy > state.whave) { + if (state.sane) { + strm.msg = 'invalid distance too far back'; + state.mode = BAD; + break; + } +// (!) This block is disabled in zlib defaults, +// don't enable it for binary compatibility +//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR +// Trace((stderr, "inflate.c too far\n")); +// copy -= state.whave; +// if (copy > state.length) { copy = state.length; } +// if (copy > left) { copy = left; } +// left -= copy; +// state.length -= copy; +// do { +// output[put++] = 0; +// } while (--copy); +// if (state.length === 0) { state.mode = LEN; } +// break; +//#endif + } + if (copy > state.wnext) { + copy -= state.wnext; + from = state.wsize - copy; + } + else { + from = state.wnext - copy; + } + if (copy > state.length) { copy = state.length; } + from_source = state.window; + } + else { /* copy from output */ + from_source = output; + from = put - state.offset; + copy = state.length; + } + if (copy > left) { copy = left; } + left -= copy; + state.length -= copy; + do { + output[put++] = from_source[from++]; + } while (--copy); + if (state.length === 0) { state.mode = LEN; } + break; + case LIT: + if (left === 0) { break inf_leave; } + output[put++] = state.length; + left--; + state.mode = LEN; + break; + case CHECK: + if (state.wrap) { + //=== NEEDBITS(32); + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + // Use '|' instead of '+' to make sure that result is signed + hold |= input[next++] << bits; + bits += 8; + } + //===// + _out -= left; + strm.total_out += _out; + state.total += _out; + if ((state.wrap & 4) && _out) { + strm.adler = state.check = + /*UPDATE_CHECK(state.check, put - _out, _out);*/ + (state.flags ? crc32_1(state.check, output, _out, put - _out) : adler32_1(state.check, output, _out, put - _out)); + + } + _out = left; + // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too + if ((state.wrap & 4) && (state.flags ? hold : zswap32(hold)) !== state.check) { + strm.msg = 'incorrect data check'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + //Tracev((stderr, "inflate: check matches trailer\n")); + } + state.mode = LENGTH; + /* falls through */ + case LENGTH: + if (state.wrap && state.flags) { + //=== NEEDBITS(32); + while (bits < 32) { + if (have === 0) { break inf_leave; } + have--; + hold += input[next++] << bits; + bits += 8; + } + //===// + if ((state.wrap & 4) && hold !== (state.total & 0xffffffff)) { + strm.msg = 'incorrect length check'; + state.mode = BAD; + break; + } + //=== INITBITS(); + hold = 0; + bits = 0; + //===// + //Tracev((stderr, "inflate: length matches trailer\n")); + } + state.mode = DONE; + /* falls through */ + case DONE: + ret = Z_STREAM_END$1; + break inf_leave; + case BAD: + ret = Z_DATA_ERROR$1; + break inf_leave; + case MEM: + return Z_MEM_ERROR$1; + case SYNC: + /* falls through */ + default: + return Z_STREAM_ERROR$1; + } + } + + // inf_leave <- here is real place for "goto inf_leave", emulated via "break inf_leave" + + /* + Return from inflate(), updating the total counts and the check value. + If there was no progress during the inflate() call, return a buffer + error. Call updatewindow() to create and/or update the window state. + Note: a memory error from inflate() is non-recoverable. + */ + + //--- RESTORE() --- + strm.next_out = put; + strm.avail_out = left; + strm.next_in = next; + strm.avail_in = have; + state.hold = hold; + state.bits = bits; + //--- + + if (state.wsize || (_out !== strm.avail_out && state.mode < BAD && + (state.mode < CHECK || flush !== Z_FINISH$1))) { + if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) ; + } + _in -= strm.avail_in; + _out -= strm.avail_out; + strm.total_in += _in; + strm.total_out += _out; + state.total += _out; + if ((state.wrap & 4) && _out) { + strm.adler = state.check = /*UPDATE_CHECK(state.check, strm.next_out - _out, _out);*/ + (state.flags ? crc32_1(state.check, output, _out, strm.next_out - _out) : adler32_1(state.check, output, _out, strm.next_out - _out)); + } + strm.data_type = state.bits + (state.last ? 64 : 0) + + (state.mode === TYPE ? 128 : 0) + + (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0); + if (((_in === 0 && _out === 0) || flush === Z_FINISH$1) && ret === Z_OK$1) { + ret = Z_BUF_ERROR; + } + return ret; +}; + + +const inflateEnd = (strm) => { + + if (inflateStateCheck(strm)) { + return Z_STREAM_ERROR$1; + } + + let state = strm.state; + if (state.window) { + state.window = null; + } + strm.state = null; + return Z_OK$1; +}; + + +const inflateGetHeader = (strm, head) => { + + /* check state */ + if (inflateStateCheck(strm)) { return Z_STREAM_ERROR$1; } + const state = strm.state; + if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR$1; } + + /* save header structure */ + state.head = head; + head.done = false; + return Z_OK$1; +}; + + +const inflateSetDictionary = (strm, dictionary) => { + const dictLength = dictionary.length; + + let state; + let dictid; + let ret; + + /* check state */ + if (inflateStateCheck(strm)) { return Z_STREAM_ERROR$1; } + state = strm.state; + + if (state.wrap !== 0 && state.mode !== DICT) { + return Z_STREAM_ERROR$1; + } + + /* check for correct dictionary identifier */ + if (state.mode === DICT) { + dictid = 1; /* adler32(0, null, 0)*/ + /* dictid = adler32(dictid, dictionary, dictLength); */ + dictid = adler32_1(dictid, dictionary, dictLength, 0); + if (dictid !== state.check) { + return Z_DATA_ERROR$1; + } + } + /* copy dictionary to window using updatewindow(), which will amend the + existing dictionary if appropriate */ + ret = updatewindow(strm, dictionary, dictLength, dictLength); + if (ret) { + state.mode = MEM; + return Z_MEM_ERROR$1; + } + state.havedict = 1; + // Tracev((stderr, "inflate: dictionary set\n")); + return Z_OK$1; +}; + + +var inflateReset_1 = inflateReset; +var inflateReset2_1 = inflateReset2; +var inflateResetKeep_1 = inflateResetKeep; +var inflateInit_1 = inflateInit; +var inflateInit2_1 = inflateInit2; +var inflate_2$1 = inflate$2; +var inflateEnd_1 = inflateEnd; +var inflateGetHeader_1 = inflateGetHeader; +var inflateSetDictionary_1 = inflateSetDictionary; +var inflateInfo = 'pako inflate (from Nodeca project)'; + +/* Not implemented +module.exports.inflateCodesUsed = inflateCodesUsed; +module.exports.inflateCopy = inflateCopy; +module.exports.inflateGetDictionary = inflateGetDictionary; +module.exports.inflateMark = inflateMark; +module.exports.inflatePrime = inflatePrime; +module.exports.inflateSync = inflateSync; +module.exports.inflateSyncPoint = inflateSyncPoint; +module.exports.inflateUndermine = inflateUndermine; +module.exports.inflateValidate = inflateValidate; +*/ + +var inflate_1$2 = { + inflateReset: inflateReset_1, + inflateReset2: inflateReset2_1, + inflateResetKeep: inflateResetKeep_1, + inflateInit: inflateInit_1, + inflateInit2: inflateInit2_1, + inflate: inflate_2$1, + inflateEnd: inflateEnd_1, + inflateGetHeader: inflateGetHeader_1, + inflateSetDictionary: inflateSetDictionary_1, + inflateInfo: inflateInfo +}; + +// (C) 1995-2013 Jean-loup Gailly and Mark Adler +// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. + +function GZheader() { + /* true if compressed data believed to be text */ + this.text = 0; + /* modification time */ + this.time = 0; + /* extra flags (not used when writing a gzip file) */ + this.xflags = 0; + /* operating system */ + this.os = 0; + /* pointer to extra field or Z_NULL if none */ + this.extra = null; + /* extra field length (valid if extra != Z_NULL) */ + this.extra_len = 0; // Actually, we don't need it in JS, + // but leave for few code modifications + + // + // Setup limits is not necessary because in js we should not preallocate memory + // for inflate use constant limit in 65536 bytes + // + + /* space at extra (only when reading header) */ + // this.extra_max = 0; + /* pointer to zero-terminated file name or Z_NULL */ + this.name = ''; + /* space at name (only when reading header) */ + // this.name_max = 0; + /* pointer to zero-terminated comment or Z_NULL */ + this.comment = ''; + /* space at comment (only when reading header) */ + // this.comm_max = 0; + /* true if there was or will be a header crc */ + this.hcrc = 0; + /* true when done reading gzip header (not used when writing a gzip file) */ + this.done = false; +} + +var gzheader = GZheader; + +const toString = Object.prototype.toString; + +/* Public constants ==========================================================*/ +/* ===========================================================================*/ + +const { + Z_NO_FLUSH, Z_FINISH, + Z_OK, Z_STREAM_END, Z_NEED_DICT, Z_STREAM_ERROR, Z_DATA_ERROR, Z_MEM_ERROR +} = constants$2; + +/* ===========================================================================*/ + + +/** + * class Inflate + * + * Generic JS-style wrapper for zlib calls. If you don't need + * streaming behaviour - use more simple functions: [[inflate]] + * and [[inflateRaw]]. + **/ + +/* internal + * inflate.chunks -> Array + * + * Chunks of output data, if [[Inflate#onData]] not overridden. + **/ + +/** + * Inflate.result -> Uint8Array|String + * + * Uncompressed result, generated by default [[Inflate#onData]] + * and [[Inflate#onEnd]] handlers. Filled after you push last chunk + * (call [[Inflate#push]] with `Z_FINISH` / `true` param). + **/ + +/** + * Inflate.err -> Number + * + * Error code after inflate finished. 0 (Z_OK) on success. + * Should be checked if broken data possible. + **/ + +/** + * Inflate.msg -> String + * + * Error message, if [[Inflate.err]] != 0 + **/ + + +/** + * new Inflate(options) + * - options (Object): zlib inflate options. + * + * Creates new inflator instance with specified params. Throws exception + * on bad params. Supported options: + * + * - `windowBits` + * - `dictionary` + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information on these. + * + * Additional options, for internal needs: + * + * - `chunkSize` - size of generated data chunks (16K by default) + * - `raw` (Boolean) - do raw inflate + * - `to` (String) - if equal to 'string', then result will be converted + * from utf8 to utf16 (javascript) string. When string output requested, + * chunk length can differ from `chunkSize`, depending on content. + * + * By default, when no options set, autodetect deflate/gzip data format via + * wrapper header. + * + * ##### Example: + * + * ```javascript + * const pako = require('pako') + * const chunk1 = new Uint8Array([1,2,3,4,5,6,7,8,9]) + * const chunk2 = new Uint8Array([10,11,12,13,14,15,16,17,18,19]); + * + * const inflate = new pako.Inflate({ level: 3}); + * + * inflate.push(chunk1, false); + * inflate.push(chunk2, true); // true -> last chunk + * + * if (inflate.err) { throw new Error(inflate.err); } + * + * console.log(inflate.result); + * ``` + **/ +function Inflate$1(options) { + this.options = common.assign({ + chunkSize: 1024 * 64, + windowBits: 15, + to: '' + }, options || {}); + + const opt = this.options; + + // Force window size for `raw` data, if not set directly, + // because we have no header for autodetect. + if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) { + opt.windowBits = -opt.windowBits; + if (opt.windowBits === 0) { opt.windowBits = -15; } + } + + // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate + if ((opt.windowBits >= 0) && (opt.windowBits < 16) && + !(options && options.windowBits)) { + opt.windowBits += 32; + } + + // Gzip header has no info about windows size, we can do autodetect only + // for deflate. So, if window size not set, force it to max when gzip possible + if ((opt.windowBits > 15) && (opt.windowBits < 48)) { + // bit 3 (16) -> gzipped data + // bit 4 (32) -> autodetect gzip/deflate + if ((opt.windowBits & 15) === 0) { + opt.windowBits |= 15; + } + } + + this.err = 0; // error code, if happens (0 = Z_OK) + this.msg = ''; // error message + this.ended = false; // used to avoid multiple onEnd() calls + this.chunks = []; // chunks of compressed data + + this.strm = new zstream(); + this.strm.avail_out = 0; + + let status = inflate_1$2.inflateInit2( + this.strm, + opt.windowBits + ); + + if (status !== Z_OK) { + throw new Error(messages[status]); + } + + this.header = new gzheader(); + + inflate_1$2.inflateGetHeader(this.strm, this.header); + + // Setup dictionary + if (opt.dictionary) { + // Convert data if needed + if (typeof opt.dictionary === 'string') { + opt.dictionary = strings.string2buf(opt.dictionary); + } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') { + opt.dictionary = new Uint8Array(opt.dictionary); + } + if (opt.raw) { //In raw mode we need to set the dictionary early + status = inflate_1$2.inflateSetDictionary(this.strm, opt.dictionary); + if (status !== Z_OK) { + throw new Error(messages[status]); + } + } + } +} + +/** + * Inflate#push(data[, flush_mode]) -> Boolean + * - data (Uint8Array|ArrayBuffer): input data + * - flush_mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE + * flush modes. See constants. Skipped or `false` means Z_NO_FLUSH, + * `true` means Z_FINISH. + * + * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with + * new output chunks. Returns `true` on success. If end of stream detected, + * [[Inflate#onEnd]] will be called. + * + * `flush_mode` is not needed for normal operation, because end of stream + * detected automatically. You may try to use it for advanced things, but + * this functionality was not tested. + * + * On fail call [[Inflate#onEnd]] with error code and return false. + * + * ##### Example + * + * ```javascript + * push(chunk, false); // push one of data chunks + * ... + * push(chunk, true); // push last chunk + * ``` + **/ +Inflate$1.prototype.push = function (data, flush_mode) { + const strm = this.strm; + const chunkSize = this.options.chunkSize; + const dictionary = this.options.dictionary; + let status, _flush_mode, last_avail_out; + + if (this.ended) return false; + + if (flush_mode === ~~flush_mode) _flush_mode = flush_mode; + else _flush_mode = flush_mode === true ? Z_FINISH : Z_NO_FLUSH; + + // Convert data if needed + if (toString.call(data) === '[object ArrayBuffer]') { + strm.input = new Uint8Array(data); + } else { + strm.input = data; + } + + strm.next_in = 0; + strm.avail_in = strm.input.length; + + for (;;) { + if (strm.avail_out === 0) { + strm.output = new Uint8Array(chunkSize); + strm.next_out = 0; + strm.avail_out = chunkSize; + } + + status = inflate_1$2.inflate(strm, _flush_mode); + + if (status === Z_NEED_DICT && dictionary) { + status = inflate_1$2.inflateSetDictionary(strm, dictionary); + + if (status === Z_OK) { + status = inflate_1$2.inflate(strm, _flush_mode); + } else if (status === Z_DATA_ERROR) { + // Replace code with more verbose + status = Z_NEED_DICT; + } + } + + // Skip snyc markers if more data follows and not raw mode + while (strm.avail_in > 0 && + status === Z_STREAM_END && + strm.state.wrap > 0 && + data[strm.next_in] !== 0) + { + inflate_1$2.inflateReset(strm); + status = inflate_1$2.inflate(strm, _flush_mode); + } + + switch (status) { + case Z_STREAM_ERROR: + case Z_DATA_ERROR: + case Z_NEED_DICT: + case Z_MEM_ERROR: + this.onEnd(status); + this.ended = true; + return false; + } + + // Remember real `avail_out` value, because we may patch out buffer content + // to align utf8 strings boundaries. + last_avail_out = strm.avail_out; + + if (strm.next_out) { + if (strm.avail_out === 0 || status === Z_STREAM_END) { + + if (this.options.to === 'string') { + + let next_out_utf8 = strings.utf8border(strm.output, strm.next_out); + + let tail = strm.next_out - next_out_utf8; + let utf8str = strings.buf2string(strm.output, next_out_utf8); + + // move tail & realign counters + strm.next_out = tail; + strm.avail_out = chunkSize - tail; + if (tail) strm.output.set(strm.output.subarray(next_out_utf8, next_out_utf8 + tail), 0); + + this.onData(utf8str); + + } else { + this.onData(strm.output.length === strm.next_out ? strm.output : strm.output.subarray(0, strm.next_out)); + } + } + } + + // Must repeat iteration if out buffer is full + if (status === Z_OK && last_avail_out === 0) continue; + + // Finalize if end of stream reached. + if (status === Z_STREAM_END) { + status = inflate_1$2.inflateEnd(this.strm); + this.onEnd(status); + this.ended = true; + return true; + } + + if (strm.avail_in === 0) break; + } + + return true; +}; + + +/** + * Inflate#onData(chunk) -> Void + * - chunk (Uint8Array|String): output data. When string output requested, + * each chunk will be string. + * + * By default, stores data blocks in `chunks[]` property and glue + * those in `onEnd`. Override this handler, if you need another behaviour. + **/ +Inflate$1.prototype.onData = function (chunk) { + this.chunks.push(chunk); +}; + + +/** + * Inflate#onEnd(status) -> Void + * - status (Number): inflate status. 0 (Z_OK) on success, + * other if not. + * + * Called either after you tell inflate that the input stream is + * complete (Z_FINISH). By default - join collected chunks, + * free memory and fill `results` / `err` properties. + **/ +Inflate$1.prototype.onEnd = function (status) { + // On success - join + if (status === Z_OK) { + if (this.options.to === 'string') { + this.result = this.chunks.join(''); + } else { + this.result = common.flattenChunks(this.chunks); + } + } + this.chunks = []; + this.err = status; + this.msg = this.strm.msg; +}; + + +/** + * inflate(data[, options]) -> Uint8Array|String + * - data (Uint8Array|ArrayBuffer): input data to decompress. + * - options (Object): zlib inflate options. + * + * Decompress `data` with inflate/ungzip and `options`. Autodetect + * format via wrapper header by default. That's why we don't provide + * separate `ungzip` method. + * + * Supported options are: + * + * - windowBits + * + * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced) + * for more information. + * + * Sugar (options): + * + * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify + * negative windowBits implicitly. + * - `to` (String) - if equal to 'string', then result will be converted + * from utf8 to utf16 (javascript) string. When string output requested, + * chunk length can differ from `chunkSize`, depending on content. + * + * + * ##### Example: + * + * ```javascript + * const pako = require('pako'); + * const input = pako.deflate(new Uint8Array([1,2,3,4,5,6,7,8,9])); + * let output; + * + * try { + * output = pako.inflate(input); + * } catch (err) { + * console.log(err); + * } + * ``` + **/ +function inflate$1(input, options) { + const inflator = new Inflate$1(options); + + inflator.push(input); + + // That will never happens, if you don't cheat with options :) + if (inflator.err) throw inflator.msg || messages[inflator.err]; + + return inflator.result; +} + + +/** + * inflateRaw(data[, options]) -> Uint8Array|String + * - data (Uint8Array|ArrayBuffer): input data to decompress. + * - options (Object): zlib inflate options. + * + * The same as [[inflate]], but creates raw data, without wrapper + * (header and adler32 crc). + **/ +function inflateRaw$1(input, options) { + options = options || {}; + options.raw = true; + return inflate$1(input, options); +} + + +/** + * ungzip(data[, options]) -> Uint8Array|String + * - data (Uint8Array|ArrayBuffer): input data to decompress. + * - options (Object): zlib inflate options. + * + * Just shortcut to [[inflate]], because it autodetects format + * by header.content. Done for convenience. + **/ + + +var Inflate_1$1 = Inflate$1; +var inflate_2 = inflate$1; +var inflateRaw_1$1 = inflateRaw$1; +var ungzip$1 = inflate$1; +var constants = constants$2; + +var inflate_1$1 = { + Inflate: Inflate_1$1, + inflate: inflate_2, + inflateRaw: inflateRaw_1$1, + ungzip: ungzip$1, + constants: constants +}; + +const { Deflate, deflate, deflateRaw, gzip } = deflate_1$1; + +const { Inflate, inflate, inflateRaw, ungzip } = inflate_1$1; + + + +var Deflate_1 = Deflate; +var deflate_1 = deflate; +var deflateRaw_1 = deflateRaw; +var gzip_1 = gzip; +var Inflate_1 = Inflate; +var inflate_1 = inflate; +var inflateRaw_1 = inflateRaw; +var ungzip_1 = ungzip; +var constants_1 = constants$2; + +var pako = { + Deflate: Deflate_1, + deflate: deflate_1, + deflateRaw: deflateRaw_1, + gzip: gzip_1, + Inflate: Inflate_1, + inflate: inflate_1, + inflateRaw: inflateRaw_1, + ungzip: ungzip_1, + constants: constants_1 +}; + +export { Deflate_1 as Deflate, Inflate_1 as Inflate, constants_1 as constants, pako as default, deflate_1 as deflate, deflateRaw_1 as deflateRaw, gzip_1 as gzip, inflate_1 as inflate, inflateRaw_1 as inflateRaw, ungzip_1 as ungzip }; \ No newline at end of file From 91926d24b717e54fe31382e14515638c68ff535d Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 24 Jan 2024 01:38:06 +0800 Subject: [PATCH 13/33] =?UTF-8?q?=E5=A4=84=E7=90=86=E4=B8=80=E4=B8=AAcore?= =?UTF-8?q?=5Ffunctional.py=E4=B8=AD=E5=87=BA=E7=8E=B0=E7=9A=84mermaid?= =?UTF-8?q?=E6=B8=B2=E6=9F=93=E7=89=B9=E4=BE=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- shared_utils/advanced_markdown_format.py | 29 ++++++++++++++++-------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/shared_utils/advanced_markdown_format.py b/shared_utils/advanced_markdown_format.py index 653cf07b..a015fd60 100644 --- a/shared_utils/advanced_markdown_format.py +++ b/shared_utils/advanced_markdown_format.py @@ -292,13 +292,25 @@ def close_up_code_segment_during_stream(gpt_reply): return gpt_reply +def special_render_issues_for_mermaid(text): + # 用不太优雅的方式处理一个core_functional.py中出现的mermaid渲染特例: + # 我不希望"总结绘制脑图"prompt中的mermaid渲染出来 + @lru_cache(maxsize=1) + def get_special_case(): + from core_functional import get_core_functions + special_case = get_core_functions()["总结绘制脑图"]["Suffix"] + return special_case + if text.endswith(get_special_case()): text = text.replace("```mermaid", "```") + return text + + def compat_non_markdown_input(text): """ 改善非markdown输入的显示效果,例如将空格转换为 ,将换行符转换为
等。 """ - if "```" in text: # careful input:markdown输入 + text = special_render_issues_for_mermaid(text) # 处理特殊的渲染问题 return text elif "" in text: # careful input:html输入 @@ -313,19 +325,18 @@ def compat_non_markdown_input(text): @lru_cache(maxsize=128) # 使用lru缓存 -def simple_markdown_convertion(txt): +def simple_markdown_convertion(text): pre = '
' suf = "
" - if txt.startswith(pre) and txt.endswith(suf): - return txt # 已经被转化过,不需要再次转化 - - txt = compat_non_markdown_input(txt) # 兼容非markdown输入 - txt = markdown.markdown( - txt, + if text.startswith(pre) and text.endswith(suf): + return text # 已经被转化过,不需要再次转化 + text = compat_non_markdown_input(text) # 兼容非markdown输入 + text = markdown.markdown( + text, extensions=["pymdownx.superfences", "tables", "pymdownx.highlight"], extension_configs=code_highlight_configs, ) - return pre + txt + suf + return pre + text + suf def format_io(self, y): From 4f9d40c14fa4424ac4654e3b703516ddb756b0e6 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 24 Jan 2024 01:42:31 +0800 Subject: [PATCH 14/33] =?UTF-8?q?=E5=88=A0=E9=99=A4=E5=86=97=E4=BD=99?= =?UTF-8?q?=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- core_functional.py | 2 -- crazy_functions/高级功能函数模板.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/core_functional.py b/core_functional.py index f9b1abcd..1f697929 100644 --- a/core_functional.py +++ b/core_functional.py @@ -3,7 +3,6 @@ # 'stop' 颜色对应 theme.py 中的 color_er import importlib from toolbox import clear_line_break -from toolbox import build_gpt_academic_masked_string from toolbox import apply_gpt_academic_string_mask_langbased from toolbox import build_gpt_academic_masked_string_langbased from textwrap import dedent @@ -43,7 +42,6 @@ def get_core_functions(): # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来 "Suffix": # dedent() 函数用于去除多行字符串的缩进 - # ```{build_gpt_academic_masked_string(text_show_llm="mermaid", text_show_render="")} dedent("\n"+f''' ============================== diff --git a/crazy_functions/高级功能函数模板.py b/crazy_functions/高级功能函数模板.py index dfbc3d73..d22a6741 100644 --- a/crazy_functions/高级功能函数模板.py +++ b/crazy_functions/高级功能函数模板.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, update_ui, build_gpt_academic_masked_string +from toolbox import CatchException, update_ui from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive import datetime From c847209ac9fbba08fef9b22c863e113695ff78aa Mon Sep 17 00:00:00 2001 From: Menghuan1918 Date: Wed, 24 Jan 2024 17:44:54 +0800 Subject: [PATCH 15/33] Update "Generate multiple Mermaid charts" plugin with md file read (#1506) * Update crazy_functional.py with new functionality deal with PDF * Update crazy_functional.py and Mermaid.py for plugin_kwargs * Update crazy_functional.py with new chart type: mind map * Update SELECT_PROMPT and i_say_show_user messages * Update ArgsReminder message in get_crazy_functions() function * Update with read md file and update PROMPTS * Return the PROMPTS as the test found that the initial version worked best * Update Mermaid chart generation function --- crazy_functional.py | 4 +- crazy_functions/生成多种Mermaid图表.py | 67 ++++++++++++++++++++------ 2 files changed, 55 insertions(+), 16 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index f957b74e..b6f4a032 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -70,14 +70,14 @@ def get_crazy_functions(): "Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数", "Function": HotReload(清除缓存), }, - "生成多种Mermaid图表(从当前对话或文件(.pdf)中生产图表)": { + "生成多种Mermaid图表(从当前对话或文件(.pdf/.md)中生产图表)": { "Group": "对话", "Color": "stop", "AsButton": False, "Info" : "基于当前对话或PDF生成多种Mermaid图表,图表类型由模型判断", "Function": HotReload(生成多种Mermaid图表), "AdvancedArgs": True, - "ArgsReminder": "请输入图类型对应的数字:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图", + "ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图", }, "批量总结Word文档": { "Group": "学术", diff --git a/crazy_functions/生成多种Mermaid图表.py b/crazy_functions/生成多种Mermaid图表.py index 720c64e6..4c484fc1 100644 --- a/crazy_functions/生成多种Mermaid图表.py +++ b/crazy_functions/生成多种Mermaid图表.py @@ -3,7 +3,7 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive from .crazy_utils import read_and_clean_pdf_text import datetime -#暂时只写了这几种的PROMPT +#以下是每类图表的PROMPT SELECT_PROMPT = """ “{subject}” ============= @@ -18,6 +18,7 @@ SELECT_PROMPT = """ 8 象限提示图 不需要解释原因,仅需要输出单个不带任何标点符号的数字。 """ +#没有思维导图!!!测试发现模型始终会优先选择思维导图 #流程图 PROMPT_1 = """ 请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例: @@ -135,6 +136,31 @@ graph LR D[Soft skill] --> F(Communication) ``` """ +#思维导图 +PROMPT_9 = """ +{subject} +========== +请给出上方内容的思维导图,充分考虑其之间的逻辑,使用mermaid语法,mermaid语法举例: +```mermaid +mindmap + root((mindmap)) + Origins + Long history + ::icon(fa fa-book) + Popularisation + British popular psychology author Tony Buzan + Research + On effectiveness
and features + On Automatic creation + Uses + Creative techniques + Strategic planning + Argument mapping + Tools + Pen and paper + Mermaid +``` +""" def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs): ############################## <第 0 步,切割输入> ################################## @@ -166,11 +192,11 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs): if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") gpt_say = plugin_kwargs.get("advanced_arg", "") #将图表类型参数赋值为插件参数 results_txt = '\n'.join(results) #合并摘要 - if gpt_say not in ['1','2','3','4','5','6','7','8']: #如插件参数不正确则使用对话模型判断 + if gpt_say not in ['1','2','3','4','5','6','7','8','9']: #如插件参数不正确则使用对话模型判断 i_say_show_user = f'接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制'; gpt_say = "[Local Message] 收到。" # 用户提示 chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI i_say = SELECT_PROMPT.format(subject=results_txt) - i_say_show_user = f'请判断适合使用的流程图类型,其中数字对应关系为:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图' + i_say_show_user = f'请判断适合使用的流程图类型,其中数字对应关系为:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图。由于不管提供文本是什么,模型大概率认为"思维导图"最合适,因此思维导图仅能通过参数调用。' for i in range(3): gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, @@ -178,9 +204,9 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs): llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], sys_prompt="" ) - if gpt_say in ['1','2','3','4','5','6','7','8']: #判断返回是否正确 + if gpt_say in ['1','2','3','4','5','6','7','8','9']: #判断返回是否正确 break - if gpt_say not in ['1','2','3','4','5','6','7','8']: + if gpt_say not in ['1','2','3','4','5','6','7','8','9']: gpt_say = '1' ############################## <第 3 步,根据选择的图表类型绘制图表> ################################## if gpt_say == '1': @@ -199,12 +225,14 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs): i_say = PROMPT_7.replace("{subject}", results_txt) #由于实体关系图用到了{}符号 elif gpt_say == '8': i_say = PROMPT_8.format(subject=results_txt) - i_say_show_user = f'请根据判断结果绘制相应的图表。' + elif gpt_say == '9': + i_say = PROMPT_9.format(subject=results_txt) + i_say_show_user = f'请根据判断结果绘制相应的图表。如需绘制思维导图请使用参数调用,同时过大的图表可能需要复制到在线编辑器中进行渲染。' gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, inputs_show_user=i_say_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="" + sys_prompt="你精通使用mermaid语法来绘制图表,首先确保语法正确,其次避免在mermaid语法中使用不允许的字符,此外也应当分考虑图表的可读性。" ) history.append(gpt_say) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 @@ -215,13 +243,22 @@ def 输入区文件处理(txt): import glob from .crazy_utils import get_files_from_everything file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf') - if not file_pdf or len(pdf_manifest) == 0: - return False, txt #如不是pdf文件则返回输入区内容 + file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md') + if len(pdf_manifest) == 0 and len(md_manifest) == 0: + return False, txt #如输入区内容不是文件则直接返回输入区内容 + final_result = "" - for index, fp in enumerate(pdf_manifest): - file_content, page_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF - file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - final_result += file_content + if file_pdf: + for index, fp in enumerate(pdf_manifest): + file_content, page_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF + file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars + final_result += "\n" + file_content + if file_md: + for index, fp in enumerate(md_manifest): + with open(fp, 'r', encoding='utf-8', errors='replace') as f: + file_content = f.read() + file_content = file_content.encode('utf-8', 'ignore').decode() + final_result += "\n" + file_content return True, final_result @CatchException @@ -240,7 +277,7 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, # 基本信息:功能、贡献者 chatbot.append([ "函数插件功能?", - "根据当前聊天历史或PDF中(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\ + "根据当前聊天历史或文件中(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\ \n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @@ -256,6 +293,8 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, if os.path.exists(txt): #如输入区无内容则直接解析历史记录 file_exist, txt = 输入区文件处理(txt) + else: + file_exist = False if file_exist : history = [] #如输入区内容为文件则清空历史记录 history.append(txt) #将解析后的txt传递加入到历史中 From a93bf4410d67684ec96a7685d07869973b0afb64 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 25 Jan 2024 22:18:43 +0800 Subject: [PATCH 16/33] version 3.71 --- core_functional.py | 2 +- version | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core_functional.py b/core_functional.py index 1f697929..4074cddb 100644 --- a/core_functional.py +++ b/core_functional.py @@ -42,7 +42,7 @@ def get_core_functions(): # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来 "Suffix": # dedent() 函数用于去除多行字符串的缩进 - dedent("\n"+f''' + dedent("\n"+r''' ============================== 使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如: diff --git a/version b/version index ffc83b36..2dc65028 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.70, + "version": 3.71, "show_feature": true, - "new_feature": "支持Mermaid绘图库(让大模型绘制脑图) <-> 支持Gemini-pro <-> 支持直接拖拽文件到上传区 <-> 支持将图片粘贴到输入区 <-> 修复若干隐蔽的内存BUG <-> 修复多用户冲突问题 <-> 接入Deepseek Coder <-> AutoGen多智能体插件测试版" + "new_feature": "用绘图功能增强部分插件 <-> 基础功能区支持自动切换中英提示词 <-> 支持Mermaid绘图库(让大模型绘制脑图) <-> 支持Gemini-pro <-> 支持直接拖拽文件到上传区 <-> 支持将图片粘贴到输入区" } From f889ef7625f4204f25f737325d60b0e727f54ea5 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Thu, 25 Jan 2024 22:42:08 +0800 Subject: [PATCH 17/33] =?UTF-8?q?=E8=A7=A3=E5=86=B3issues=20#1510?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/Latex输出PDF结果.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/crazy_functions/Latex输出PDF结果.py b/crazy_functions/Latex输出PDF结果.py index 2dcaf66d..c520006f 100644 --- a/crazy_functions/Latex输出PDF结果.py +++ b/crazy_functions/Latex输出PDF结果.py @@ -1,7 +1,7 @@ from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str from functools import partial -import glob, os, requests, time +import glob, os, requests, time, tarfile pj = os.path.join ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/") @@ -104,7 +104,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True): if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID txt = 'https://arxiv.org/abs/' + txt[:10] if not txt.startswith('https://arxiv.org'): - return txt, None + return txt, None # 是本地文件,跳过下载 # <-------------- inspect format -------------> chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...']) @@ -250,7 +250,14 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, # <-------------- clear history and read input -------------> history = [] - txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) + try: + txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) + except tarfile.ReadError as e: + yield from update_ui_lastest_msg( + "无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。", + chatbot=chatbot, history=history) + return + if txt.endswith('.pdf'): report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 From 45fa0404ebff7eec784ab1ed58dd96efa9cbfc4e Mon Sep 17 00:00:00 2001 From: hongyi-zhao Date: Fri, 26 Jan 2024 16:36:23 +0800 Subject: [PATCH 18/33] Update bridge_all.py: supports gpt-4-turbo-preview (#1517) * Update bridge_all.py: supports gpt-4-turbo-preview supports gpt-4-turbo-preview * Update bridge_all.py --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> --- request_llms/bridge_all.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 14352475..575ac631 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -150,6 +150,15 @@ model_info = { "token_cnt": get_token_num_gpt4, }, + "gpt-4-turbo-preview": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 128000, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, + "gpt-4-1106-preview": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, @@ -159,6 +168,15 @@ model_info = { "token_cnt": get_token_num_gpt4, }, + "gpt-4-0125-preview": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 128000, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + }, + "gpt-3.5-random": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, From 49c6fcfe97c4756996e42bc0cbc8a71e76276f00 Mon Sep 17 00:00:00 2001 From: hongyi-zhao Date: Fri, 26 Jan 2024 16:44:32 +0800 Subject: [PATCH 19/33] Update config.py: supports gpt-4-turbo-preview (#1516) * Update config.py: supports gpt-4-turbo-preview supports gpt-4-turbo-preview * Update config.py --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> --- config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/config.py b/config.py index 1574a0fd..45fafbd4 100644 --- a/config.py +++ b/config.py @@ -86,9 +86,9 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) -LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-preview", - "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", +LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓ +AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview", + "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "gemini-pro", "chatglm3", "claude-2", "zhipuai"] # P.S. 其他可用的模型还包括 [ From cdb5288f49bf053aad15d8d5c85a571a9ce0aaa3 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Fri, 2 Feb 2024 17:47:35 +0800 Subject: [PATCH 20/33] fix issue #1532 --- docs/GithubAction+NoLocal+AudioAssistant | 2 +- docs/use_audio.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/GithubAction+NoLocal+AudioAssistant b/docs/GithubAction+NoLocal+AudioAssistant index 9ea1ea54..6d6dab0a 100644 --- a/docs/GithubAction+NoLocal+AudioAssistant +++ b/docs/GithubAction+NoLocal+AudioAssistant @@ -13,7 +13,7 @@ COPY . . RUN pip3 install -r requirements.txt # 安装语音插件的额外依赖 -RUN pip3 install pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git +RUN pip3 install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git # 可选步骤,用于预热模块 RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' diff --git a/docs/use_audio.md b/docs/use_audio.md index 337c7868..0889325c 100644 --- a/docs/use_audio.md +++ b/docs/use_audio.md @@ -3,7 +3,7 @@ ## 1. 安装额外依赖 ``` -pip install --upgrade pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git +pip install --upgrade pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git ``` 如果因为特色网络问题导致上述命令无法执行: From c27e559f10864f54647215b64cdb3f11b5eb6192 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 6 Feb 2024 17:51:47 +0800 Subject: [PATCH 21/33] match sess-* key --- shared_utils/key_pattern_manager.py | 2 +- tests/test_key_pattern_manager.py | 41 +++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 tests/test_key_pattern_manager.py diff --git a/shared_utils/key_pattern_manager.py b/shared_utils/key_pattern_manager.py index eed2af95..a617b7fd 100644 --- a/shared_utils/key_pattern_manager.py +++ b/shared_utils/key_pattern_manager.py @@ -14,7 +14,7 @@ def is_openai_api_key(key): if len(CUSTOM_API_KEY_PATTERN) != 0: API_MATCH_ORIGINAL = re.match(CUSTOM_API_KEY_PATTERN, key) else: - API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$", key) + API_MATCH_ORIGINAL = re.match(r"sk-[a-zA-Z0-9]{48}$|sess-[a-zA-Z0-9]{40}$", key) return bool(API_MATCH_ORIGINAL) diff --git a/tests/test_key_pattern_manager.py b/tests/test_key_pattern_manager.py new file mode 100644 index 00000000..bf84441b --- /dev/null +++ b/tests/test_key_pattern_manager.py @@ -0,0 +1,41 @@ +import unittest + +def validate_path(): + import os, sys + + os.path.dirname(__file__) + root_dir_assume = os.path.abspath(os.path.dirname(__file__) + "/..") + os.chdir(root_dir_assume) + sys.path.append(root_dir_assume) + + +validate_path() # validate path so you can run from base directory + +from shared_utils.key_pattern_manager import is_openai_api_key + +class TestKeyPatternManager(unittest.TestCase): + def test_is_openai_api_key_with_valid_key(self): + key = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + self.assertTrue(is_openai_api_key(key)) + + key = "sx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + self.assertFalse(is_openai_api_key(key)) + + key = "sess-wg61ZafYHpNz7FFwIH7HGZlbVqUVaeV5tatHCWpl" + self.assertTrue(is_openai_api_key(key)) + + key = "sess-wg61ZafYHpNz7FFwIH7HGZlbVqUVa5tatHCWpl" + self.assertFalse(is_openai_api_key(key)) + + + def test_is_openai_api_key_with_invalid_key(self): + key = "invalid_key" + self.assertFalse(is_openai_api_key(key)) + + def test_is_openai_api_key_with_custom_pattern(self): + # Assuming you have set a custom pattern in your configuration + key = "custom-pattern-key" + self.assertFalse(is_openai_api_key(key)) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file From 6c13bb7b46519312222f9afacedaa16225b673a9 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Tue, 6 Feb 2024 17:59:09 +0800 Subject: [PATCH 22/33] patch issue #1538 --- main.py | 48 ++++++++++++++++++++++++------------------------ themes/common.js | 6 +++--- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/main.py b/main.py index 92056d9b..7f84b919 100644 --- a/main.py +++ b/main.py @@ -1,9 +1,9 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 help_menu_description = \ -"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), +"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), 感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors). -

常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki), +

常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki), 如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues).

普通对话使用说明: 1. 输入问题; 2. 点击提交

基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮 @@ -15,7 +15,7 @@ help_menu_description = \ def main(): import gradio as gr - if gr.__version__ not in ['3.32.6', '3.32.7']: + if gr.__version__ not in ['3.32.6', '3.32.7', '3.32.8']: raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.") from request_llms.bridge_all import predict from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith @@ -33,7 +33,7 @@ def main(): from themes.theme import js_code_for_css_changing, js_code_for_darkmode_init, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie title_html = f"

GPT 学术优化 {get_current_version()}

{theme_declaration}" - + # 问询记录, python 版本建议3.9+(越新越好) import logging, uuid os.makedirs(PATH_LOGGING, exist_ok=True) @@ -93,7 +93,7 @@ def main(): resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm") stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm") clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm") - if ENABLE_AUDIO: + if ENABLE_AUDIO: with gr.Row(): audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False) with gr.Row(): @@ -114,7 +114,7 @@ def main(): with gr.Row(): gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)") with gr.Row(elem_id="input-plugin-group"): - plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS, + plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS, multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False) with gr.Row(): for k, plugin in plugins.items(): @@ -122,7 +122,7 @@ def main(): visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False variant = plugins[k]["Color"] if "Color" in plugin else "secondary" info = plugins[k].get("Info", k) - plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant, + plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant, visible=visible, info_str=f'函数插件区: {info}').style(size="sm") with gr.Row(): with gr.Accordion("更多函数插件", open=True): @@ -134,7 +134,7 @@ def main(): with gr.Row(): dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False) with gr.Row(): - plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False, + plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False, placeholder="这里是特殊函数插件的高级参数输入区").style(container=False) with gr.Row(): switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm") @@ -148,7 +148,7 @@ def main(): with gr.Tab("上传文件", elem_id="interact-panel"): gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。") file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float") - + with gr.Tab("更换模型", elem_id="interact-panel"): md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False) top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) @@ -158,9 +158,9 @@ def main(): with gr.Tab("界面外观", elem_id="interact-panel"): theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False) - checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], + checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False) - checkboxes_2 = gr.CheckboxGroup(["自定义菜单"], + checkboxes_2 = gr.CheckboxGroup(["自定义菜单"], value=[], label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False) dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm") dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode) @@ -217,7 +217,7 @@ def main(): persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie return ret - + def reflesh_btn(persistent_cookie_, cookies_): ret = {} for k in customize_btns: @@ -225,7 +225,7 @@ def main(): try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict except: return ret - + customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {}) cookies_['customize_fn_overwrite'] = customize_fn_overwrite_ ret.update({cookies: cookies_}) @@ -235,9 +235,9 @@ def main(): if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])}) else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])}) return ret - + basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies], [cookies, *customize_btns.values(), *predefined_btns.values()]) - h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix], + h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix], [persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()]) # save persistent cookie h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""") @@ -321,7 +321,7 @@ def main(): else: css_part2 = adjust_theme()._get_theme_css() return css_part2 + css_part1 - + theme_handle = theme_dropdown.select(on_theme_dropdown_changed, [theme_dropdown, secret_css], [secret_css]) theme_handle.then( None, @@ -346,13 +346,13 @@ def main(): if not group_list: # 处理特殊情况:没有选择任何插件组 return [*[plugin['Button'].update(visible=False) for _, plugin in plugins_as_btn.items()], gr.Dropdown.update(choices=[])] for k, plugin in plugins.items(): - if plugin.get("AsButton", True): + if plugin.get("AsButton", True): btn_list.append(plugin['Button'].update(visible=match_group(plugin['Group'], group_list))) # 刷新按钮 if plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示 elif match_group(plugin['Group'], group_list): fns_list.append(k) # 刷新下拉列表 return [*btn_list, gr.Dropdown.update(choices=fns_list)] plugin_group_sel.select(fn=on_group_change, inputs=[plugin_group_sel], outputs=[*[plugin['Button'] for name, plugin in plugins_as_btn.items()], dropdown]) - if ENABLE_AUDIO: + if ENABLE_AUDIO: from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution rad = RealtimeAudioDistribution() def deal_audio(audio, cookies): @@ -365,7 +365,7 @@ def main(): demo.load(None, inputs=None, outputs=[persistent_cookie], _js=js_code_for_persistent_cookie_init) demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题 demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}') - + # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数 def run_delayed_tasks(): import threading, webbrowser, time @@ -376,7 +376,7 @@ def main(): def auto_updates(): time.sleep(0); auto_update() def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}") def warm_up_mods(): time.sleep(6); warm_up_modules() - + threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新 threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面 threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块 @@ -384,21 +384,21 @@ def main(): run_delayed_tasks() demo.queue(concurrency_count=CONCURRENT_COUNT).launch( quiet=True, - server_name="0.0.0.0", + server_name="0.0.0.0", ssl_keyfile=None if SSL_KEYFILE == "" else SSL_KEYFILE, ssl_certfile=None if SSL_CERTFILE == "" else SSL_CERTFILE, ssl_verify=False, server_port=PORT, - favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"), + favicon_path=os.path.join(os.path.dirname(__file__), "docs/logo.png"), auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None, blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) # 如果需要在二级路径下运行 # CUSTOM_PATH = get_conf('CUSTOM_PATH') - # if CUSTOM_PATH != "/": + # if CUSTOM_PATH != "/": # from toolbox import run_gradio_in_subpath # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH) - # else: + # else: # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png", # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"]) diff --git a/themes/common.js b/themes/common.js index 044dba29..5e2d8c22 100644 --- a/themes/common.js +++ b/themes/common.js @@ -242,14 +242,14 @@ function do_something_but_not_too_frequently(min_interval, func) { // 现在就执行 setTimeout(() => { func.apply(this, lastArgs); - }, 0); + }, 0); } else if (!timeoutID) { // 等一会执行 timeoutID = setTimeout(() => { timeoutID = null; lastInvocationTime = Date.now(); func.apply(this, lastArgs); - }, min_interval - (now - lastInvocationTime)); + }, min_interval - (now - lastInvocationTime)); } else { // 压根不执行 } @@ -349,7 +349,7 @@ function get_elements(consider_state_panel = false) { var chatbot_height = chatbot.style.height; // 交换输入区位置,使得输入区始终可用 if (!swapped) { - if (panel1.top != 0 && (panel1.bottom + panel1.top) / 2 < 0) { swap_input_area(); } + if (panel1.top != 0 && (0.9 * panel1.bottom + 0.1 * panel1.top) < 0) { swap_input_area(); } } else if (swapped) { if (panel2.top != 0 && panel2.top > 0) { swap_input_area(); } From 3025d5be45a8f50c86b23ed5d00ee3519a197db5 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Fri, 9 Feb 2024 13:17:14 +0800 Subject: [PATCH 23/33] remove jsdelivr (#1547) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 22a92404..b612054d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -https://fastly.jsdelivr.net/gh/binary-husky/gradio-fix@gpt-academic/release/gradio-3.32.7-py3-none-any.whl +https://public.gpt-academic.top/publish/gradio-3.32.7-py3-none-any.whl pypdf2==2.12.1 zhipuai<2 tiktoken>=0.3.3 From 8814026ec33aa6e8f83c010f2a126fbe0cff7f91 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Fri, 9 Feb 2024 13:25:01 +0800 Subject: [PATCH 24/33] fix gradio-client version (#1548) --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index b612054d..8f5e45ba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ https://public.gpt-academic.top/publish/gradio-3.32.7-py3-none-any.whl +gradio-client==0.8 pypdf2==2.12.1 zhipuai<2 tiktoken>=0.3.3 From b9b1e12dc90b2b13b0e8dd6425aa843a164af94d Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 12 Feb 2024 15:58:20 +0800 Subject: [PATCH 25/33] fix missing get_token_num method --- crazy_functions/解析JupyterNotebook.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crazy_functions/解析JupyterNotebook.py b/crazy_functions/解析JupyterNotebook.py index c1794d33..2f2c0883 100644 --- a/crazy_functions/解析JupyterNotebook.py +++ b/crazy_functions/解析JupyterNotebook.py @@ -12,6 +12,12 @@ class PaperFileGroup(): self.sp_file_index = [] self.sp_file_tag = [] + # count_token + from request_llms.bridge_all import model_info + enc = model_info["gpt-3.5-turbo"]['tokenizer'] + def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) + self.get_token_num = get_token_num + def run_file_split(self, max_token_limit=1900): """ 将长文本分离开来 @@ -54,7 +60,7 @@ def parseNotebook(filename, enable_markdown=1): Code += f"This is {idx+1}th code block: \n" Code += code+"\n" - return Code + return Code def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): From e0c5859cf92d94e7c985fdf70fe27c0bc3a8f405 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Mon, 12 Feb 2024 23:37:31 +0800 Subject: [PATCH 26/33] update Column min_width parameter --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index 7f84b919..d715f30a 100644 --- a/main.py +++ b/main.py @@ -65,7 +65,7 @@ def main(): proxy_info = check_proxy(proxies) gr_L1 = lambda: gr.Row().style() - gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id) + gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id, min_width=400) if LAYOUT == "TOP-DOWN": gr_L1 = lambda: DummyWith() gr_L2 = lambda scale, elem_id: gr.Row() From 2e9b4a57701fb3ebe5b9295c27bb6b950720e7d0 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Wed, 14 Feb 2024 18:35:09 +0800 Subject: [PATCH 27/33] Merge Frontier, Update to Version 3.72 (#1553) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Zhipu sdk update 适配最新的智谱SDK,支持GLM4v (#1502) * 适配 google gemini 优化为从用户input中提取文件 * 适配最新的智谱SDK、支持glm-4v * requirements.txt fix * pending history check --------- Co-authored-by: binary-husky * Update "生成多种Mermaid图表" plugin: Separate out the file reading function (#1520) * Update crazy_functional.py with new functionality deal with PDF * Update crazy_functional.py and Mermaid.py for plugin_kwargs * Update crazy_functional.py with new chart type: mind map * Update SELECT_PROMPT and i_say_show_user messages * Update ArgsReminder message in get_crazy_functions() function * Update with read md file and update PROMPTS * Return the PROMPTS as the test found that the initial version worked best * Update Mermaid chart generation function * version 3.71 * 解决issues #1510 * Remove unnecessary text from sys_prompt in 解析历史输入 function * Remove sys_prompt message in 解析历史输入 function * Update bridge_all.py: supports gpt-4-turbo-preview (#1517) * Update bridge_all.py: supports gpt-4-turbo-preview supports gpt-4-turbo-preview * Update bridge_all.py --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * Update config.py: supports gpt-4-turbo-preview (#1516) * Update config.py: supports gpt-4-turbo-preview supports gpt-4-turbo-preview * Update config.py --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * Refactor 解析历史输入 function to handle file input * Update Mermaid chart generation functionality * rename files and functions --------- Co-authored-by: binary-husky Co-authored-by: hongyi-zhao Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * 接入mathpix ocr功能 (#1468) * Update Latex输出PDF结果.py 借助mathpix实现了PDF翻译中文并重新编译PDF * Update config.py add mathpix appid & appkey * Add 'PDF翻译中文并重新编译PDF' feature to plugins. --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * fix zhipuai * check picture * remove glm-4 due to bug * 修改config * 检查MATHPIX_APPID * Remove unnecessary code and update function_plugins dictionary * capture non-standard token overflow * bug fix #1524 * change mermaid style * 支持mermaid 滚动放大缩小重置,鼠标滚动和拖拽 (#1530) * 支持mermaid 滚动放大缩小重置,鼠标滚动和拖拽 * 微调未果 先stage一下 * update --------- Co-authored-by: binary-husky Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * ver 3.72 * change live2d * save the status of ``clear btn` in cookie * 前端选择保持 * js ui bug fix * reset btn bug fix * update live2d tips * fix missing get_token_num method * fix live2d toggle switch * fix persistent custom btn with cookie * fix zhipuai feedback with core functionality * Refactor button update and clean up functions --------- Co-authored-by: XIao <46100050+Kilig947@users.noreply.github.com> Co-authored-by: Menghuan1918 Co-authored-by: hongyi-zhao Co-authored-by: Hao Ma <893017927@qq.com> Co-authored-by: zeyuan huang <599012428@qq.com> --- config.py | 41 +- crazy_functional.py | 32 +- crazy_functions/Latex输出PDF.py | 484 ++ crazy_functions/Latex输出PDF结果.py | 313 - crazy_functions/pdf_fns/parse_word.py | 85 + crazy_functions/生成多种Mermaid图表.py | 86 +- docs/translate_english.json | 2 +- docs/translate_japanese.json | 2 +- docs/translate_std.json | 2 +- docs/translate_traditionalchinese.json | 2 +- docs/waifu_plugin/autoload.js | 30 - main.py | 93 +- request_llms/bridge_all.py | 52 +- request_llms/bridge_chatgpt.py | 2 + request_llms/bridge_google_gemini.py | 4 + request_llms/bridge_zhipu.py | 65 +- request_llms/com_zhipuapi.py | 70 - request_llms/com_zhipuglm.py | 84 + requirements.txt | 6 +- tests/test_plugins.py | 6 +- themes/base64.mjs | 297 +- themes/common.css | 1 + themes/common.js | 173 +- themes/common.py | 7 +- themes/mermaid.min.js | 1590 +--- themes/mermaid_editor.js | 56 +- themes/mermaid_loader.js | 198 +- themes/pako.esm.mjs | 6878 +---------------- themes/theme.py | 109 +- themes/waifu_plugin/autoload.js | 0 .../waifu_plugin/flat-ui-icons-regular.eot | Bin .../waifu_plugin/flat-ui-icons-regular.svg | 0 .../waifu_plugin/flat-ui-icons-regular.ttf | Bin .../waifu_plugin/flat-ui-icons-regular.woff | Bin .../waifu_plugin/jquery-ui.min.js | 0 {docs => themes}/waifu_plugin/jquery.min.js | 0 {docs => themes}/waifu_plugin/live2d.js | 0 {docs => themes}/waifu_plugin/source | 0 {docs => themes}/waifu_plugin/waifu-tips.js | 4 +- {docs => themes}/waifu_plugin/waifu-tips.json | 28 +- {docs => themes}/waifu_plugin/waifu.css | 0 version | 4 +- 42 files changed, 1171 insertions(+), 9635 deletions(-) create mode 100644 crazy_functions/Latex输出PDF.py delete mode 100644 crazy_functions/Latex输出PDF结果.py create mode 100644 crazy_functions/pdf_fns/parse_word.py delete mode 100644 docs/waifu_plugin/autoload.js delete mode 100644 request_llms/com_zhipuapi.py create mode 100644 request_llms/com_zhipuglm.py create mode 100644 themes/waifu_plugin/autoload.js rename {docs => themes}/waifu_plugin/flat-ui-icons-regular.eot (100%) rename {docs => themes}/waifu_plugin/flat-ui-icons-regular.svg (100%) rename {docs => themes}/waifu_plugin/flat-ui-icons-regular.ttf (100%) rename {docs => themes}/waifu_plugin/flat-ui-icons-regular.woff (100%) rename {docs => themes}/waifu_plugin/jquery-ui.min.js (100%) rename {docs => themes}/waifu_plugin/jquery.min.js (100%) rename {docs => themes}/waifu_plugin/live2d.js (100%) rename {docs => themes}/waifu_plugin/source (100%) rename {docs => themes}/waifu_plugin/waifu-tips.js (92%) rename {docs => themes}/waifu_plugin/waifu-tips.json (86%) rename {docs => themes}/waifu_plugin/waifu.css (100%) diff --git a/config.py b/config.py index 45fafbd4..3a7ae413 100644 --- a/config.py +++ b/config.py @@ -2,8 +2,8 @@ 以下所有配置也都支持利用环境变量覆写,环境变量配置格式见docker-compose.yml。 读取优先级:环境变量 > config_private.py > config.py --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- - All the following configurations also support using environment variables to override, - and the environment variable configuration format can be seen in docker-compose.yml. + All the following configurations also support using environment variables to override, + and the environment variable configuration format can be seen in docker-compose.yml. Configuration reading priority: environment variable > config_private.py > config.py """ @@ -33,7 +33,7 @@ else: # ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------ # 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!) -# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"} +# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"} # 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions"} API_URL_REDIRECT = {} @@ -66,7 +66,7 @@ LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下 # 暗色模式 / 亮色模式 -DARK_MODE = True +DARK_MODE = True # 发送请求到OpenAI后,等待多久判定为超时 @@ -89,11 +89,11 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓ AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", - "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", - "gemini-pro", "chatglm3", "claude-2", "zhipuai"] + "gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo", + "gemini-pro", "chatglm3", "claude-2"] # P.S. 其他可用的模型还包括 [ # "moss", "qwen-turbo", "qwen-plus", "qwen-max" -# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613", +# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613", # "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama" # ] @@ -158,7 +158,7 @@ API_ORG = "" # 如果需要使用Slack Claude,使用教程详情见 request_llms/README.md -SLACK_CLAUDE_BOT_ID = '' +SLACK_CLAUDE_BOT_ID = '' SLACK_CLAUDE_USER_TOKEN = '' @@ -195,7 +195,7 @@ XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # 接入智谱大模型 ZHIPUAI_API_KEY = "" -ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4" +ZHIPUAI_MODEL = "" # 此选项已废弃,不再需要填写 # # 火山引擎YUNQUE大模型 @@ -208,6 +208,11 @@ ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4" ANTHROPIC_API_KEY = "" +# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号 +MATHPIX_APPID = "" +MATHPIX_APPKEY = "" + + # 自定义API KEY格式 CUSTOM_API_KEY_PATTERN = "" @@ -224,8 +229,8 @@ HUGGINGFACE_ACCESS_TOKEN = "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" # 获取方法:复制以下空间https://huggingface.co/spaces/qingxu98/grobid,设为public,然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space" GROBID_URLS = [ "https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space", - "https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space", - "https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space", + "https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space", + "https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space", ] @@ -246,7 +251,7 @@ PATH_LOGGING = "gpt_log" # 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改 -WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", +WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", "Warmup_Modules", "Nougat_Download", "AutoGen"] @@ -297,9 +302,8 @@ NUM_CUSTOM_BASIC_BTN = 4 │ ├── BAIDU_CLOUD_API_KEY │ └── BAIDU_CLOUD_SECRET_KEY │ -├── "zhipuai" 智谱AI大模型chatglm_turbo -│ ├── ZHIPUAI_API_KEY -│ └── ZHIPUAI_MODEL +├── "glm-4", "glm-3-turbo", "zhipuai" 智谱AI大模型 +│ └── ZHIPUAI_API_KEY │ ├── "qwen-turbo" 等通义千问大模型 │ └── DASHSCOPE_API_KEY @@ -311,7 +315,7 @@ NUM_CUSTOM_BASIC_BTN = 4 ├── NEWBING_STYLE └── NEWBING_COOKIES - + 本地大模型示意图 │ ├── "chatglm3" @@ -351,6 +355,9 @@ NUM_CUSTOM_BASIC_BTN = 4 │ └── ALIYUN_SECRET │ └── PDF文档精准解析 - └── GROBID_URLS + ├── GROBID_URLS + ├── MATHPIX_APPID + └── MATHPIX_APPKEY + """ diff --git a/crazy_functional.py b/crazy_functional.py index b6f4a032..3e998e56 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -70,11 +70,11 @@ def get_crazy_functions(): "Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数", "Function": HotReload(清除缓存), }, - "生成多种Mermaid图表(从当前对话或文件(.pdf/.md)中生产图表)": { + "生成多种Mermaid图表(从当前对话或路径(.pdf/.md/.docx)中生产图表)": { "Group": "对话", "Color": "stop", "AsButton": False, - "Info" : "基于当前对话或PDF生成多种Mermaid图表,图表类型由模型判断", + "Info" : "基于当前对话或文件生成多种Mermaid图表,图表类型由模型判断", "Function": HotReload(生成多种Mermaid图表), "AdvancedArgs": True, "ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图", @@ -532,8 +532,9 @@ def get_crazy_functions(): print("Load function plugin failed") try: - from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比 - from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF + from crazy_functions.Latex输出PDF import Latex英文纠错加PDF对比 + from crazy_functions.Latex输出PDF import Latex翻译中文并重新编译PDF + from crazy_functions.Latex输出PDF import PDF翻译中文并重新编译PDF function_plugins.update( { @@ -550,9 +551,9 @@ def get_crazy_functions(): "Color": "stop", "AsButton": False, "AdvancedArgs": True, - "ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " - + "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " - + 'If the term "agent" is used in this section, it should be translated to "智能体". ', + "ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + r'If the term "agent" is used in this section, it should be translated to "智能体". ', "Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695", "Function": HotReload(Latex翻译中文并重新编译PDF), }, @@ -561,11 +562,22 @@ def get_crazy_functions(): "Color": "stop", "AsButton": False, "AdvancedArgs": True, - "ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " - + "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " - + 'If the term "agent" is used in this section, it should be translated to "智能体". ', + "ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + r'If the term "agent" is used in this section, it should be translated to "智能体". ', "Info": "本地Latex论文精细翻译 | 输入参数是路径", "Function": HotReload(Latex翻译中文并重新编译PDF), + }, + "PDF翻译中文并重新编译PDF(上传PDF)[需Latex]": { + "Group": "学术", + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " + r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + r'If the term "agent" is used in this section, it should be translated to "智能体". ', + "Info": "PDF翻译中文,并重新编译PDF | 输入参数为路径", + "Function": HotReload(PDF翻译中文并重新编译PDF) } } ) diff --git a/crazy_functions/Latex输出PDF.py b/crazy_functions/Latex输出PDF.py new file mode 100644 index 00000000..919a1dbc --- /dev/null +++ b/crazy_functions/Latex输出PDF.py @@ -0,0 +1,484 @@ +from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone +from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str +from functools import partial +import glob, os, requests, time, json, tarfile + +pj = os.path.join +ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/") + + +# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". ' +def switch_prompt(pfg, mode, more_requirement): + """ + Generate prompts and system prompts based on the mode for proofreading or translating. + Args: + - pfg: Proofreader or Translator instance. + - mode: A string specifying the mode, either 'proofread' or 'translate_zh'. + + Returns: + - inputs_array: A list of strings containing prompts for users to respond to. + - sys_prompt_array: A list of strings containing prompts for system prompts. + """ + n_split = len(pfg.sp_file_contents) + if mode == 'proofread_en': + inputs_array = [r"Below is a section from an academic paper, proofread this section." + + r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + more_requirement + + r"Answer me only with the revised text:" + + f"\n\n{frag}" for frag in pfg.sp_file_contents] + sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)] + elif mode == 'translate_zh': + inputs_array = [ + r"Below is a section from an English academic paper, translate it into Chinese. " + more_requirement + + r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + + r"Answer me only with the translated text:" + + f"\n\n{frag}" for frag in pfg.sp_file_contents] + sys_prompt_array = ["You are a professional translator." for _ in range(n_split)] + else: + assert False, "未知指令" + return inputs_array, sys_prompt_array + + +def desend_to_extracted_folder_if_exist(project_folder): + """ + Descend into the extracted folder if it exists, otherwise return the original folder. + + Args: + - project_folder: A string specifying the folder path. + + Returns: + - A string specifying the path to the extracted folder, or the original folder if there is no extracted folder. + """ + maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)] + if len(maybe_dir) == 0: return project_folder + if maybe_dir[0].endswith('.extract'): return maybe_dir[0] + return project_folder + + +def move_project(project_folder, arxiv_id=None): + """ + Create a new work folder and copy the project folder to it. + + Args: + - project_folder: A string specifying the folder path of the project. + + Returns: + - A string specifying the path to the new work folder. + """ + import shutil, time + time.sleep(2) # avoid time string conflict + if arxiv_id is not None: + new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder') + else: + new_workfolder = f'{get_log_folder()}/{gen_time_str()}' + try: + shutil.rmtree(new_workfolder) + except: + pass + + # align subfolder if there is a folder wrapper + items = glob.glob(pj(project_folder, '*')) + items = [item for item in items if os.path.basename(item) != '__MACOSX'] + if len(glob.glob(pj(project_folder, '*.tex'))) == 0 and len(items) == 1: + if os.path.isdir(items[0]): project_folder = items[0] + + shutil.copytree(src=project_folder, dst=new_workfolder) + return new_workfolder + + +def arxiv_download(chatbot, history, txt, allow_cache=True): + def check_cached_translation_pdf(arxiv_id): + translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation') + if not os.path.exists(translation_dir): + os.makedirs(translation_dir) + target_file = pj(translation_dir, 'translate_zh.pdf') + if os.path.exists(target_file): + promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot) + target_file_compare = pj(translation_dir, 'comparison.pdf') + if os.path.exists(target_file_compare): + promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot) + return target_file + return False + + def is_float(s): + try: + float(s) + return True + except ValueError: + return False + + if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID + txt = 'https://arxiv.org/abs/' + txt.strip() + if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID + txt = 'https://arxiv.org/abs/' + txt[:10] + + if not txt.startswith('https://arxiv.org'): + return txt, None # 是本地文件,跳过下载 + + # <-------------- inspect format -------------> + chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...']) + yield from update_ui(chatbot=chatbot, history=history) + time.sleep(1) # 刷新界面 + + url_ = txt # https://arxiv.org/abs/1707.06690 + if not txt.startswith('https://arxiv.org/abs/'): + msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。" + yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面 + return msg, None + # <-------------- set format -------------> + arxiv_id = url_.split('/abs/')[-1] + if 'v' in arxiv_id: arxiv_id = arxiv_id[:10] + cached_translation_pdf = check_cached_translation_pdf(arxiv_id) + if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id + + url_tar = url_.replace('/abs/', '/e-print/') + translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print') + extract_dst = pj(ARXIV_CACHE_DIR, arxiv_id, 'extract') + os.makedirs(translation_dir, exist_ok=True) + + # <-------------- download arxiv source file -------------> + dst = pj(translation_dir, arxiv_id + '.tar') + if os.path.exists(dst): + yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面 + else: + yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面 + proxies = get_conf('proxies') + r = requests.get(url_tar, proxies=proxies) + with open(dst, 'wb+') as f: + f.write(r.content) + # <-------------- extract file -------------> + yield from update_ui_lastest_msg("下载完成", chatbot=chatbot, history=history) # 刷新界面 + from toolbox import extract_archive + extract_archive(file_path=dst, dest_dir=extract_dst) + return extract_dst, arxiv_id + + +def pdf2tex_project(pdf_file_path): + # Mathpix API credentials + app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY') + headers = {"app_id": app_id, "app_key": app_key} + + # Step 1: Send PDF file for processing + options = { + "conversion_formats": {"tex.zip": True}, + "math_inline_delimiters": ["$", "$"], + "rm_spaces": True + } + + response = requests.post(url="https://api.mathpix.com/v3/pdf", + headers=headers, + data={"options_json": json.dumps(options)}, + files={"file": open(pdf_file_path, "rb")}) + + if response.ok: + pdf_id = response.json()["pdf_id"] + print(f"PDF processing initiated. PDF ID: {pdf_id}") + + # Step 2: Check processing status + while True: + conversion_response = requests.get(f"https://api.mathpix.com/v3/pdf/{pdf_id}", headers=headers) + conversion_data = conversion_response.json() + + if conversion_data["status"] == "completed": + print("PDF processing completed.") + break + elif conversion_data["status"] == "error": + print("Error occurred during processing.") + else: + print(f"Processing status: {conversion_data['status']}") + time.sleep(5) # wait for a few seconds before checking again + + # Step 3: Save results to local files + output_dir = os.path.join(os.path.dirname(pdf_file_path), 'mathpix_output') + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + url = f"https://api.mathpix.com/v3/pdf/{pdf_id}.tex" + response = requests.get(url, headers=headers) + file_name_wo_dot = '_'.join(os.path.basename(pdf_file_path).split('.')[:-1]) + output_name = f"{file_name_wo_dot}.tex.zip" + output_path = os.path.join(output_dir, output_name) + with open(output_path, "wb") as output_file: + output_file.write(response.content) + print(f"tex.zip file saved at: {output_path}") + + import zipfile + unzip_dir = os.path.join(output_dir, file_name_wo_dot) + with zipfile.ZipFile(output_path, 'r') as zip_ref: + zip_ref.extractall(unzip_dir) + + return unzip_dir + + else: + print(f"Error sending PDF for processing. Status code: {response.status_code}") + return None + + +# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + + +@CatchException +def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): + # <-------------- information about this plugin -------------> + chatbot.append(["函数插件功能?", + "对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # <-------------- more requirements -------------> + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") + more_req = plugin_kwargs.get("advanced_arg", "") + _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) + + # <-------------- check deps -------------> + try: + import glob, os, time, subprocess + subprocess.Popen(['pdflatex', '-version']) + from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex + except Exception as e: + chatbot.append([f"解析项目: {txt}", + f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # <-------------- clear history and read input -------------> + history = [] + if os.path.exists(txt): + project_folder = txt + else: + if txt == "": txt = '空空如也的输入栏' + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + if len(file_manifest) == 0: + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # <-------------- if is a zip/tar file -------------> + project_folder = desend_to_extracted_folder_if_exist(project_folder) + + # <-------------- move latex project away from temp folder -------------> + project_folder = move_project(project_folder, arxiv_id=None) + + # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> + if not os.path.exists(project_folder + '/merge_proofread_en.tex'): + yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, + chatbot, history, system_prompt, mode='proofread_en', + switch_prompt=_switch_prompt_) + + # <-------------- compile PDF -------------> + success = yield from 编译Latex(chatbot, history, main_file_original='merge', + main_file_modified='merge_proofread_en', + work_folder_original=project_folder, work_folder_modified=project_folder, + work_folder=project_folder) + + # <-------------- zip PDF -------------> + zip_res = zip_result(project_folder) + if success: + chatbot.append((f"成功啦", '请查收结果(压缩包)...')) + yield from update_ui(chatbot=chatbot, history=history); + time.sleep(1) # 刷新界面 + promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) + else: + chatbot.append((f"失败了", + '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...')) + yield from update_ui(chatbot=chatbot, history=history); + time.sleep(1) # 刷新界面 + promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) + + # <-------------- we are done -------------> + return success + + +# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +@CatchException +def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): + # <-------------- information about this plugin -------------> + chatbot.append([ + "函数插件功能?", + "对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # <-------------- more requirements -------------> + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") + more_req = plugin_kwargs.get("advanced_arg", "") + no_cache = more_req.startswith("--no-cache") + if no_cache: more_req.lstrip("--no-cache") + allow_cache = not no_cache + _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) + + # <-------------- check deps -------------> + try: + import glob, os, time, subprocess + subprocess.Popen(['pdflatex', '-version']) + from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex + except Exception as e: + chatbot.append([f"解析项目: {txt}", + f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # <-------------- clear history and read input -------------> + history = [] + try: + txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) + except tarfile.ReadError as e: + yield from update_ui_lastest_msg( + "无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。", + chatbot=chatbot, history=history) + return + + if txt.endswith('.pdf'): + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"发现已经存在翻译好的PDF文档") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + if os.path.exists(txt): + project_folder = txt + else: + if txt == "": txt = '空空如也的输入栏' + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无法处理: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + if len(file_manifest) == 0: + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # <-------------- if is a zip/tar file -------------> + project_folder = desend_to_extracted_folder_if_exist(project_folder) + + # <-------------- move latex project away from temp folder -------------> + project_folder = move_project(project_folder, arxiv_id) + + # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> + if not os.path.exists(project_folder + '/merge_translate_zh.tex'): + yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, + chatbot, history, system_prompt, mode='translate_zh', + switch_prompt=_switch_prompt_) + + # <-------------- compile PDF -------------> + success = yield from 编译Latex(chatbot, history, main_file_original='merge', + main_file_modified='merge_translate_zh', mode='translate_zh', + work_folder_original=project_folder, work_folder_modified=project_folder, + work_folder=project_folder) + + # <-------------- zip PDF -------------> + zip_res = zip_result(project_folder) + if success: + chatbot.append((f"成功啦", '请查收结果(压缩包)...')) + yield from update_ui(chatbot=chatbot, history=history); + time.sleep(1) # 刷新界面 + promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) + else: + chatbot.append((f"失败了", + '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...')) + yield from update_ui(chatbot=chatbot, history=history); + time.sleep(1) # 刷新界面 + promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) + + # <-------------- we are done -------------> + return success + + +# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 插件主程序3 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +@CatchException +def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + # <-------------- information about this plugin -------------> + chatbot.append([ + "函数插件功能?", + "将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # <-------------- more requirements -------------> + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") + more_req = plugin_kwargs.get("advanced_arg", "") + no_cache = more_req.startswith("--no-cache") + if no_cache: more_req.lstrip("--no-cache") + allow_cache = not no_cache + _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) + + # <-------------- check deps -------------> + try: + import glob, os, time, subprocess + subprocess.Popen(['pdflatex', '-version']) + from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex + except Exception as e: + chatbot.append([f"解析项目: {txt}", + f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # <-------------- clear history and read input -------------> + if os.path.exists(txt): + project_folder = txt + else: + if txt == "": txt = '空空如也的输入栏' + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无法处理: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] + if len(file_manifest) == 0: + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf文件: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + if len(file_manifest) != 1: + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"不支持同时处理多个pdf文件: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY') + if len(app_id) == 0 or len(app_key) == 0: + report_exception(chatbot, history, a=f"请配置 MATHPIX_APPID 和 MATHPIX_APPKEY") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # <-------------- convert pdf into tex -------------> + project_folder = pdf2tex_project(file_manifest[0]) + + # Translate English Latex to Chinese Latex, and compile it + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + if len(file_manifest) == 0: + report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # <-------------- if is a zip/tar file -------------> + project_folder = desend_to_extracted_folder_if_exist(project_folder) + + # <-------------- move latex project away from temp folder -------------> + project_folder = move_project(project_folder) + + # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> + if not os.path.exists(project_folder + '/merge_translate_zh.tex'): + yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, + chatbot, history, system_prompt, mode='translate_zh', + switch_prompt=_switch_prompt_) + + # <-------------- compile PDF -------------> + success = yield from 编译Latex(chatbot, history, main_file_original='merge', + main_file_modified='merge_translate_zh', mode='translate_zh', + work_folder_original=project_folder, work_folder_modified=project_folder, + work_folder=project_folder) + + # <-------------- zip PDF -------------> + zip_res = zip_result(project_folder) + if success: + chatbot.append((f"成功啦", '请查收结果(压缩包)...')) + yield from update_ui(chatbot=chatbot, history=history); + time.sleep(1) # 刷新界面 + promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) + else: + chatbot.append((f"失败了", + '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...')) + yield from update_ui(chatbot=chatbot, history=history); + time.sleep(1) # 刷新界面 + promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) + + # <-------------- we are done -------------> + return success diff --git a/crazy_functions/Latex输出PDF结果.py b/crazy_functions/Latex输出PDF结果.py deleted file mode 100644 index c520006f..00000000 --- a/crazy_functions/Latex输出PDF结果.py +++ /dev/null @@ -1,313 +0,0 @@ -from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone -from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str -from functools import partial -import glob, os, requests, time, tarfile -pj = os.path.join -ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/") - -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". ' -def switch_prompt(pfg, mode, more_requirement): - """ - Generate prompts and system prompts based on the mode for proofreading or translating. - Args: - - pfg: Proofreader or Translator instance. - - mode: A string specifying the mode, either 'proofread' or 'translate_zh'. - - Returns: - - inputs_array: A list of strings containing prompts for users to respond to. - - sys_prompt_array: A list of strings containing prompts for system prompts. - """ - n_split = len(pfg.sp_file_contents) - if mode == 'proofread_en': - inputs_array = [r"Below is a section from an academic paper, proofread this section." + - r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + more_requirement + - r"Answer me only with the revised text:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)] - elif mode == 'translate_zh': - inputs_array = [r"Below is a section from an English academic paper, translate it into Chinese. " + more_requirement + - r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + - r"Answer me only with the translated text:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - sys_prompt_array = ["You are a professional translator." for _ in range(n_split)] - else: - assert False, "未知指令" - return inputs_array, sys_prompt_array - -def desend_to_extracted_folder_if_exist(project_folder): - """ - Descend into the extracted folder if it exists, otherwise return the original folder. - - Args: - - project_folder: A string specifying the folder path. - - Returns: - - A string specifying the path to the extracted folder, or the original folder if there is no extracted folder. - """ - maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)] - if len(maybe_dir) == 0: return project_folder - if maybe_dir[0].endswith('.extract'): return maybe_dir[0] - return project_folder - -def move_project(project_folder, arxiv_id=None): - """ - Create a new work folder and copy the project folder to it. - - Args: - - project_folder: A string specifying the folder path of the project. - - Returns: - - A string specifying the path to the new work folder. - """ - import shutil, time - time.sleep(2) # avoid time string conflict - if arxiv_id is not None: - new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder') - else: - new_workfolder = f'{get_log_folder()}/{gen_time_str()}' - try: - shutil.rmtree(new_workfolder) - except: - pass - - # align subfolder if there is a folder wrapper - items = glob.glob(pj(project_folder,'*')) - items = [item for item in items if os.path.basename(item)!='__MACOSX'] - if len(glob.glob(pj(project_folder,'*.tex'))) == 0 and len(items) == 1: - if os.path.isdir(items[0]): project_folder = items[0] - - shutil.copytree(src=project_folder, dst=new_workfolder) - return new_workfolder - -def arxiv_download(chatbot, history, txt, allow_cache=True): - def check_cached_translation_pdf(arxiv_id): - translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation') - if not os.path.exists(translation_dir): - os.makedirs(translation_dir) - target_file = pj(translation_dir, 'translate_zh.pdf') - if os.path.exists(target_file): - promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot) - target_file_compare = pj(translation_dir, 'comparison.pdf') - if os.path.exists(target_file_compare): - promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot) - return target_file - return False - def is_float(s): - try: - float(s) - return True - except ValueError: - return False - if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID - txt = 'https://arxiv.org/abs/' + txt.strip() - if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID - txt = 'https://arxiv.org/abs/' + txt[:10] - if not txt.startswith('https://arxiv.org'): - return txt, None # 是本地文件,跳过下载 - - # <-------------- inspect format -------------> - chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...']) - yield from update_ui(chatbot=chatbot, history=history) - time.sleep(1) # 刷新界面 - - url_ = txt # https://arxiv.org/abs/1707.06690 - if not txt.startswith('https://arxiv.org/abs/'): - msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。" - yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面 - return msg, None - # <-------------- set format -------------> - arxiv_id = url_.split('/abs/')[-1] - if 'v' in arxiv_id: arxiv_id = arxiv_id[:10] - cached_translation_pdf = check_cached_translation_pdf(arxiv_id) - if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id - - url_tar = url_.replace('/abs/', '/e-print/') - translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print') - extract_dst = pj(ARXIV_CACHE_DIR, arxiv_id, 'extract') - os.makedirs(translation_dir, exist_ok=True) - - # <-------------- download arxiv source file -------------> - dst = pj(translation_dir, arxiv_id+'.tar') - if os.path.exists(dst): - yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面 - else: - yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面 - proxies = get_conf('proxies') - r = requests.get(url_tar, proxies=proxies) - with open(dst, 'wb+') as f: - f.write(r.content) - # <-------------- extract file -------------> - yield from update_ui_lastest_msg("下载完成", chatbot=chatbot, history=history) # 刷新界面 - from toolbox import extract_archive - extract_archive(file_path=dst, dest_dir=extract_dst) - return extract_dst, arxiv_id -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - - -@CatchException -def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # <-------------- information about this plugin -------------> - chatbot.append([ "函数插件功能?", - "对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------------- more requirements -------------> - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - more_req = plugin_kwargs.get("advanced_arg", "") - _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) - - # <-------------- check deps -------------> - try: - import glob, os, time, subprocess - subprocess.Popen(['pdflatex', '-version']) - from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex - except Exception as e: - chatbot.append([ f"解析项目: {txt}", - f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - # <-------------- clear history and read input -------------> - history = [] - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - # <-------------- if is a zip/tar file -------------> - project_folder = desend_to_extracted_folder_if_exist(project_folder) - - - # <-------------- move latex project away from temp folder -------------> - project_folder = move_project(project_folder, arxiv_id=None) - - - # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> - if not os.path.exists(project_folder + '/merge_proofread_en.tex'): - yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, - chatbot, history, system_prompt, mode='proofread_en', switch_prompt=_switch_prompt_) - - - # <-------------- compile PDF -------------> - success = yield from 编译Latex(chatbot, history, main_file_original='merge', main_file_modified='merge_proofread_en', - work_folder_original=project_folder, work_folder_modified=project_folder, work_folder=project_folder) - - - # <-------------- zip PDF -------------> - zip_res = zip_result(project_folder) - if success: - chatbot.append((f"成功啦", '请查收结果(压缩包)...')) - yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - else: - chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...')) - yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - - # <-------------- we are done -------------> - return success - -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -@CatchException -def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request): - # <-------------- information about this plugin -------------> - chatbot.append([ - "函数插件功能?", - "对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------------- more requirements -------------> - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - more_req = plugin_kwargs.get("advanced_arg", "") - no_cache = more_req.startswith("--no-cache") - if no_cache: more_req.lstrip("--no-cache") - allow_cache = not no_cache - _switch_prompt_ = partial(switch_prompt, more_requirement=more_req) - - # <-------------- check deps -------------> - try: - import glob, os, time, subprocess - subprocess.Popen(['pdflatex', '-version']) - from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex - except Exception as e: - chatbot.append([ f"解析项目: {txt}", - f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - # <-------------- clear history and read input -------------> - history = [] - try: - txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache) - except tarfile.ReadError as e: - yield from update_ui_lastest_msg( - "无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。", - chatbot=chatbot, history=history) - return - - if txt.endswith('.pdf'): - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - - # <-------------- if is a zip/tar file -------------> - project_folder = desend_to_extracted_folder_if_exist(project_folder) - - - # <-------------- move latex project away from temp folder -------------> - project_folder = move_project(project_folder, arxiv_id) - - - # <-------------- if merge_translate_zh is already generated, skip gpt req -------------> - if not os.path.exists(project_folder + '/merge_translate_zh.tex'): - yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, - chatbot, history, system_prompt, mode='translate_zh', switch_prompt=_switch_prompt_) - - - # <-------------- compile PDF -------------> - success = yield from 编译Latex(chatbot, history, main_file_original='merge', main_file_modified='merge_translate_zh', mode='translate_zh', - work_folder_original=project_folder, work_folder_modified=project_folder, work_folder=project_folder) - - # <-------------- zip PDF -------------> - zip_res = zip_result(project_folder) - if success: - chatbot.append((f"成功啦", '请查收结果(压缩包)...')) - yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - else: - chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...')) - yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面 - promote_file_to_downloadzone(file=zip_res, chatbot=chatbot) - - - # <-------------- we are done -------------> - return success diff --git a/crazy_functions/pdf_fns/parse_word.py b/crazy_functions/pdf_fns/parse_word.py new file mode 100644 index 00000000..64d07dcd --- /dev/null +++ b/crazy_functions/pdf_fns/parse_word.py @@ -0,0 +1,85 @@ +from crazy_functions.crazy_utils import read_and_clean_pdf_text, get_files_from_everything +import os +import re +def extract_text_from_files(txt, chatbot, history): + """ + 查找pdf/md/word并获取文本内容并返回状态以及文本 + + 输入参数 Args: + chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化) + history (list): List of chat history (历史,对话历史列表) + + 输出 Returns: + 文件是否存在(bool) + final_result(list):文本内容 + page_one(list):第一页内容/摘要 + file_manifest(list):文件路径 + excption(string):需要用户手动处理的信息,如没出错则保持为空 + """ + + final_result = [] + page_one = [] + file_manifest = [] + excption = "" + + if txt == "": + final_result.append(txt) + return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容 + + #查找输入区内容中的文件 + file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf') + file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md') + file_word,word_manifest,folder_word = get_files_from_everything(txt, '.docx') + file_doc,doc_manifest,folder_doc = get_files_from_everything(txt, '.doc') + + if file_doc: + excption = "word" + return False, final_result, page_one, file_manifest, excption + + file_num = len(pdf_manifest) + len(md_manifest) + len(word_manifest) + if file_num == 0: + final_result.append(txt) + return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容 + + if file_pdf: + try: # 尝试导入依赖,如果缺少依赖,则给出安装建议 + import fitz + except: + excption = "pdf" + return False, final_result, page_one, file_manifest, excption + for index, fp in enumerate(pdf_manifest): + file_content, pdf_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF + file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars + pdf_one = str(pdf_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars + final_result.append(file_content) + page_one.append(pdf_one) + file_manifest.append(os.path.relpath(fp, folder_pdf)) + + if file_md: + for index, fp in enumerate(md_manifest): + with open(fp, 'r', encoding='utf-8', errors='replace') as f: + file_content = f.read() + file_content = file_content.encode('utf-8', 'ignore').decode() + headers = re.findall(r'^#\s(.*)$', file_content, re.MULTILINE) #接下来提取md中的一级/二级标题作为摘要 + if len(headers) > 0: + page_one.append("\n".join(headers)) #合并所有的标题,以换行符分割 + else: + page_one.append("") + final_result.append(file_content) + file_manifest.append(os.path.relpath(fp, folder_md)) + + if file_word: + try: # 尝试导入依赖,如果缺少依赖,则给出安装建议 + from docx import Document + except: + excption = "word_pip" + return False, final_result, page_one, file_manifest, excption + for index, fp in enumerate(word_manifest): + doc = Document(fp) + file_content = '\n'.join([p.text for p in doc.paragraphs]) + file_content = file_content.encode('utf-8', 'ignore').decode() + page_one.append(file_content[:200]) + final_result.append(file_content) + file_manifest.append(os.path.relpath(fp, folder_word)) + + return True, final_result, page_one, file_manifest, excption \ No newline at end of file diff --git a/crazy_functions/生成多种Mermaid图表.py b/crazy_functions/生成多种Mermaid图表.py index 4c484fc1..dc01e940 100644 --- a/crazy_functions/生成多种Mermaid图表.py +++ b/crazy_functions/生成多种Mermaid图表.py @@ -1,6 +1,5 @@ from toolbox import CatchException, update_ui, report_exception from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -from .crazy_utils import read_and_clean_pdf_text import datetime #以下是每类图表的PROMPT @@ -162,7 +161,7 @@ mindmap ``` """ -def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs): +def 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs): ############################## <第 0 步,切割输入> ################################## # 借用PDF切割中的函数对文本进行切割 TOKEN_LIMIT_PER_FRAGMENT = 2500 @@ -170,8 +169,6 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs): from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model']) ############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ################################## - i_say_show_user = f'首先你从历史记录或文件中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示 - chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI results = [] MAX_WORD_TOTAL = 4096 n_txt = len(txt) @@ -179,7 +176,7 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs): if n_txt >= 20: print('文章极长,不能达到预期效果') for i in range(n_txt): NUM_OF_WORD = MAX_WORD_TOTAL // n_txt - i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i]}" + i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words in Chinese: {txt[i]}" i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...." gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问 llm_kwargs, chatbot, @@ -232,34 +229,10 @@ def 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs): inputs=i_say, inputs_show_user=i_say_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="你精通使用mermaid语法来绘制图表,首先确保语法正确,其次避免在mermaid语法中使用不允许的字符,此外也应当分考虑图表的可读性。" + sys_prompt="" ) history.append(gpt_say) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - -def 输入区文件处理(txt): - if txt == "": return False, txt - success = True - import glob - from .crazy_utils import get_files_from_everything - file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf') - file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md') - if len(pdf_manifest) == 0 and len(md_manifest) == 0: - return False, txt #如输入区内容不是文件则直接返回输入区内容 - - final_result = "" - if file_pdf: - for index, fp in enumerate(pdf_manifest): - file_content, page_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF - file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars - final_result += "\n" + file_content - if file_md: - for index, fp in enumerate(md_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - file_content = file_content.encode('utf-8', 'ignore').decode() - final_result += "\n" + file_content - return True, final_result @CatchException def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): @@ -277,26 +250,47 @@ def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, # 基本信息:功能、贡献者 chatbot.append([ "函数插件功能?", - "根据当前聊天历史或文件中(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\ + "根据当前聊天历史或指定的路径文件(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\ \n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import fitz - except: - report_exception(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return if os.path.exists(txt): #如输入区无内容则直接解析历史记录 - file_exist, txt = 输入区文件处理(txt) + from crazy_functions.pdf_fns.parse_word import extract_text_from_files + file_exist, final_result, page_one, file_manifest, excption = extract_text_from_files(txt, chatbot, history) else: file_exist = False + excption = "" + file_manifest = [] - if file_exist : history = [] #如输入区内容为文件则清空历史记录 - history.append(txt) #将解析后的txt传递加入到历史中 - - yield from 解析历史输入(history,llm_kwargs,chatbot,plugin_kwargs) \ No newline at end of file + if excption != "": + if excption == "word": + report_exception(chatbot, history, + a = f"解析项目: {txt}", + b = f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。") + + elif excption == "pdf": + report_exception(chatbot, history, + a = f"解析项目: {txt}", + b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。") + + elif excption == "word_pip": + report_exception(chatbot, history, + a=f"解析项目: {txt}", + b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。") + + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + else: + if not file_exist: + history.append(txt) #如输入区不是文件则将输入区内容加入历史记录 + i_say_show_user = f'首先你从历史记录中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示 + chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI + yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs) + else: + file_num = len(file_manifest) + for i in range(file_num): #依次处理文件 + i_say_show_user = f"[{i+1}/{file_num}]处理文件{file_manifest[i]}"; gpt_say = "[Local Message] 收到。" # 用户提示 + chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI + history = [] #如输入区内容为文件则清空历史记录 + history.append(final_result[i]) + yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs) \ No newline at end of file diff --git a/docs/translate_english.json b/docs/translate_english.json index 3920e1f6..3a248fe3 100644 --- a/docs/translate_english.json +++ b/docs/translate_english.json @@ -1668,7 +1668,7 @@ "Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage", "Langchain知识库": "LangchainKnowledgeBase", "Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison", - "Latex输出PDF结果": "OutputPDFFromLatex", + "Latex输出PDF": "OutputPDFFromLatex", "Latex翻译中文并重新编译PDF": "TranslateChineseToEnglishInLatexAndRecompilePDF", "sprint亮靛": "SprintIndigo", "寻找Latex主文件": "FindLatexMainFile", diff --git a/docs/translate_japanese.json b/docs/translate_japanese.json index 142e4a69..a70f5df1 100644 --- a/docs/translate_japanese.json +++ b/docs/translate_japanese.json @@ -1492,7 +1492,7 @@ "交互功能模板函数": "InteractiveFunctionTemplateFunction", "交互功能函数模板": "InteractiveFunctionFunctionTemplate", "Latex英文纠错加PDF对比": "LatexEnglishErrorCorrectionWithPDFComparison", - "Latex输出PDF结果": "LatexOutputPDFResult", + "Latex输出PDF": "LatexOutputPDFResult", "Latex翻译中文并重新编译PDF": "TranslateChineseAndRecompilePDF", "语音助手": "VoiceAssistant", "微调数据集生成": "FineTuneDatasetGeneration", diff --git a/docs/translate_std.json b/docs/translate_std.json index 961e595b..46ec5dc7 100644 --- a/docs/translate_std.json +++ b/docs/translate_std.json @@ -16,7 +16,7 @@ "批量Markdown翻译": "BatchTranslateMarkdown", "连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion", "Langchain知识库": "LangchainKnowledgeBase", - "Latex输出PDF结果": "OutputPDFFromLatex", + "Latex输出PDF": "OutputPDFFromLatex", "把字符太少的块清除为回车": "ClearBlocksWithTooFewCharactersToNewline", "Latex精细分解与转化": "DecomposeAndConvertLatex", "解析一个C项目的头文件": "ParseCProjectHeaderFiles", diff --git a/docs/translate_traditionalchinese.json b/docs/translate_traditionalchinese.json index 4edc65de..3378eda7 100644 --- a/docs/translate_traditionalchinese.json +++ b/docs/translate_traditionalchinese.json @@ -1468,7 +1468,7 @@ "交互功能模板函数": "InteractiveFunctionTemplateFunctions", "交互功能函数模板": "InteractiveFunctionFunctionTemplates", "Latex英文纠错加PDF对比": "LatexEnglishCorrectionWithPDFComparison", - "Latex输出PDF结果": "OutputPDFFromLatex", + "Latex输出PDF": "OutputPDFFromLatex", "Latex翻译中文并重新编译PDF": "TranslateLatexToChineseAndRecompilePDF", "语音助手": "VoiceAssistant", "微调数据集生成": "FineTuneDatasetGeneration", diff --git a/docs/waifu_plugin/autoload.js b/docs/waifu_plugin/autoload.js deleted file mode 100644 index d0648770..00000000 --- a/docs/waifu_plugin/autoload.js +++ /dev/null @@ -1,30 +0,0 @@ -try { - $("").attr({href: "file=docs/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css"}).appendTo('head'); - $('body').append('
'); - $.ajax({url: "file=docs/waifu_plugin/waifu-tips.js", dataType:"script", cache: true, success: function() { - $.ajax({url: "file=docs/waifu_plugin/live2d.js", dataType:"script", cache: true, success: function() { - /* 可直接修改部分参数 */ - live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API - live2d_settings['modelId'] = 5; // 默认模型 ID - live2d_settings['modelTexturesId'] = 1; // 默认材质 ID - live2d_settings['modelStorage'] = false; // 不储存模型 ID - live2d_settings['waifuSize'] = '210x187'; - live2d_settings['waifuTipsSize'] = '187x52'; - live2d_settings['canSwitchModel'] = true; - live2d_settings['canSwitchTextures'] = true; - live2d_settings['canSwitchHitokoto'] = false; - live2d_settings['canTakeScreenshot'] = false; - live2d_settings['canTurnToHomePage'] = false; - live2d_settings['canTurnToAboutPage'] = false; - live2d_settings['showHitokoto'] = false; // 显示一言 - live2d_settings['showF12Status'] = false; // 显示加载状态 - live2d_settings['showF12Message'] = false; // 显示看板娘消息 - live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示 - live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示 - live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词 - - /* 在 initModel 前添加 */ - initModel("file=docs/waifu_plugin/waifu-tips.json"); - }}); - }}); -} catch(err) { console.log("[Error] JQuery is not defined.") } diff --git a/main.py b/main.py index d715f30a..6339e1c7 100644 --- a/main.py +++ b/main.py @@ -15,22 +15,22 @@ help_menu_description = \ def main(): import gradio as gr - if gr.__version__ not in ['3.32.6', '3.32.7', '3.32.8']: + if gr.__version__ not in ['3.32.8']: raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.") from request_llms.bridge_all import predict from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址 proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION') CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') - ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME') + ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU') DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE') INIT_SYS_PROMPT = get_conf('INIT_SYS_PROMPT') # 如果WEB_PORT是-1, 则随机选取WEB端口 PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT from check_proxy import get_current_version - from themes.theme import adjust_theme, advanced_css, theme_declaration - from themes.theme import js_code_for_css_changing, js_code_for_darkmode_init, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init + from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2 + from themes.theme import js_code_for_css_changing, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie title_html = f"

GPT 学术优化 {get_current_version()}

{theme_declaration}" @@ -76,7 +76,7 @@ def main(): predefined_btns = {} with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo: gr.HTML(title_html) - secret_css, dark_mode, persistent_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False) + secret_css, dark_mode, py_pickle_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False) cookies = gr.State(load_chat_cookies()) with gr_L1(): with gr_L2(scale=2, elem_id="gpt-chat"): @@ -98,6 +98,7 @@ def main(): audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False) with gr.Row(): status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel") + with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn: with gr.Row(): for k in range(NUM_CUSTOM_BASIC_BTN): @@ -142,7 +143,6 @@ def main(): with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up: file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload") - with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"): with gr.Row(): with gr.Tab("上传文件", elem_id="interact-panel"): @@ -158,10 +158,11 @@ def main(): with gr.Tab("界面外观", elem_id="interact-panel"): theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False) - checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], - value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False) - checkboxes_2 = gr.CheckboxGroup(["自定义菜单"], - value=[], label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False) + checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False) + opt = ["自定义菜单"] + value=[] + if ADD_WAIFU: opt += ["添加Live2D形象"]; value += ["添加Live2D形象"] + checkboxes_2 = gr.CheckboxGroup(opt, value=value, label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False) dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm") dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode) with gr.Tab("帮助", elem_id="interact-panel"): @@ -178,7 +179,7 @@ def main(): submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm") resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm") stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm") - clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm") + clearBtn2 = gr.Button("清除", elem_id="elem_clear2", variant="secondary", visible=False); clearBtn2.style(size="sm") with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize: @@ -192,10 +193,12 @@ def main(): basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False) with gr.Column(scale=1, min_width=70): basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm") - basic_fn_load = gr.Button("加载已保存", variant="primary"); basic_fn_load.style(size="sm") - def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix): + basic_fn_clean = gr.Button("恢复默认", variant="primary"); basic_fn_clean.style(size="sm") + def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix, clean_up=False): ret = {} + # 读取之前的自定义按钮 customize_fn_overwrite_ = cookies_['customize_fn_overwrite'] + # 更新新的自定义按钮 customize_fn_overwrite_.update({ basic_btn_dropdown_: { @@ -205,20 +208,34 @@ def main(): } } ) - cookies_.update(customize_fn_overwrite_) + if clean_up: + customize_fn_overwrite_ = {} + cookies_.update(customize_fn_overwrite_) # 更新cookie + visible = (not clean_up) and (basic_fn_title != "") if basic_btn_dropdown_ in customize_btns: - ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)}) + # 是自定义按钮,不是预定义按钮 + ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)}) else: - ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)}) + # 是预定义按钮 + ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)}) ret.update({cookies: cookies_}) try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict except: persistent_cookie_ = {} persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict - ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie + ret.update({py_pickle_cookie: persistent_cookie_}) # write persistent cookie return ret - def reflesh_btn(persistent_cookie_, cookies_): + # update btn + h = basic_fn_confirm.click(assign_btn, [py_pickle_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix], + [py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()]) + h.then(None, [py_pickle_cookie], None, _js="""(py_pickle_cookie)=>{setCookie("py_pickle_cookie", py_pickle_cookie, 365);}""") + # clean up btn + h2 = basic_fn_clean.click(assign_btn, [py_pickle_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix, gr.State(True)], + [py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()]) + h2.then(None, [py_pickle_cookie], None, _js="""(py_pickle_cookie)=>{setCookie("py_pickle_cookie", py_pickle_cookie, 365);}""") + + def persistent_cookie_reload(persistent_cookie_, cookies_): ret = {} for k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=False, value="")}) @@ -236,25 +253,16 @@ def main(): else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])}) return ret - basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies], [cookies, *customize_btns.values(), *predefined_btns.values()]) - h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix], - [persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()]) - # save persistent cookie - h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""") - # 功能区显示开关与功能区的互动 def fn_area_visibility(a): ret = {} - ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))}) - ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))}) ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))}) ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))}) - ret.update({clearBtn: gr.update(visible=("输入清除键" in a))}) - ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))}) ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))}) if "浮动输入区" in a: ret.update({txt: gr.update(value="")}) return ret - checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] ) + checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, plugin_advanced_arg] ) + checkboxes.select(None, [checkboxes], None, _js=js_code_show_or_hide) # 功能区显示开关与功能区的互动 def fn_area_visibility_2(a): @@ -262,6 +270,7 @@ def main(): ret.update({area_customize: gr.update(visible=("自定义菜单" in a))}) return ret checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] ) + checkboxes_2.select(None, [checkboxes_2], None, _js=js_code_show_or_hide_group2) # 整理反复出现的控件句柄组合 input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg] @@ -272,15 +281,17 @@ def main(): cancel_handles.append(txt2.submit(**predict_args)) cancel_handles.append(submitBtn.click(**predict_args)) cancel_handles.append(submitBtn2.click(**predict_args)) - resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) - resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) - clearBtn.click(lambda: ("",""), None, [txt, txt2]) - clearBtn2.click(lambda: ("",""), None, [txt, txt2]) + resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status + resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status + resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history + resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history + clearBtn.click(None, None, [txt, txt2], _js=js_code_clear) + clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear) if AUTO_CLEAR_TXT: - submitBtn.click(lambda: ("",""), None, [txt, txt2]) - submitBtn2.click(lambda: ("",""), None, [txt, txt2]) - txt.submit(lambda: ("",""), None, [txt, txt2]) - txt2.submit(lambda: ("",""), None, [txt, txt2]) + submitBtn.click(None, None, [txt, txt2], _js=js_code_clear) + submitBtn2.click(None, None, [txt, txt2], _js=js_code_clear) + txt.submit(None, None, [txt, txt2], _js=js_code_clear) + txt2.submit(None, None, [txt, txt2], _js=js_code_clear) # 基础功能区的回调函数注册 for k in functional: if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue @@ -360,10 +371,10 @@ def main(): audio_mic.stream(deal_audio, inputs=[audio_mic, cookies]) - demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies]) - darkmode_js = js_code_for_darkmode_init - demo.load(None, inputs=None, outputs=[persistent_cookie], _js=js_code_for_persistent_cookie_init) - demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题 + demo.load(init_cookie, inputs=[cookies], outputs=[cookies]) + demo.load(persistent_cookie_reload, inputs = [py_pickle_cookie, cookies], + outputs = [py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init) + demo.load(None, inputs=[dark_mode], outputs=None, _js="""(dark_mode)=>{apply_cookie_for_checkbox(dark_mode);}""") # 配置暗色主题或亮色主题 demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}') # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数 diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 575ac631..9b6c491e 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -31,6 +31,9 @@ from .bridge_qianfan import predict as qianfan_ui from .bridge_google_gemini import predict as genai_ui from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui +from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui +from .bridge_zhipu import predict as zhipu_ui + colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044'] class LazyloadTiktoken(object): @@ -44,13 +47,13 @@ class LazyloadTiktoken(object): tmp = tiktoken.encoding_for_model(model) print('加载tokenizer完毕') return tmp - + def encode(self, *args, **kwargs): - encoder = self.get_encoder(self.model) + encoder = self.get_encoder(self.model) return encoder.encode(*args, **kwargs) - + def decode(self, *args, **kwargs): - encoder = self.get_encoder(self.model) + encoder = self.get_encoder(self.model) return encoder.decode(*args, **kwargs) # Endpoint 重定向 @@ -63,7 +66,7 @@ azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/compl # 兼容旧版的配置 try: API_URL = get_conf("API_URL") - if API_URL != "https://api.openai.com/v1/chat/completions": + if API_URL != "https://api.openai.com/v1/chat/completions": openai_endpoint = API_URL print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置") except: @@ -95,7 +98,7 @@ model_info = { "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, - + "gpt-3.5-turbo-16k": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, @@ -185,7 +188,7 @@ model_info = { "tokenizer": tokenizer_gpt4, "token_cnt": get_token_num_gpt4, }, - + "gpt-4-vision-preview": { "fn_with_ui": chatgpt_vision_ui, "fn_without_ui": chatgpt_vision_noui, @@ -215,16 +218,25 @@ model_info = { "token_cnt": get_token_num_gpt4, }, - # api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加) - "api2d-gpt-3.5-turbo": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": api2d_endpoint, - "max_token": 4096, + # 智谱AI + "glm-4": { + "fn_with_ui": zhipu_ui, + "fn_without_ui": zhipu_noui, + "endpoint": None, + "max_token": 10124 * 8, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, + "glm-3-turbo": { + "fn_with_ui": zhipu_ui, + "fn_without_ui": zhipu_noui, + "endpoint": None, + "max_token": 10124 * 4, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, + # api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加) "api2d-gpt-4": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, @@ -580,19 +592,17 @@ if "llama2" in AVAIL_LLM_MODELS: # llama2 }) except: print(trimmed_format_exc()) -if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai +if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容配置 try: - from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui - from .bridge_zhipu import predict as zhipu_ui model_info.update({ "zhipuai": { "fn_with_ui": zhipu_ui, "fn_without_ui": zhipu_noui, "endpoint": None, - "max_token": 4096, + "max_token": 10124 * 8, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, - } + }, }) except: print(trimmed_format_exc()) @@ -635,7 +645,7 @@ AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY") if len(AZURE_CFG_ARRAY) > 0: for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items(): # 可能会覆盖之前的配置,但这是意料之中的 - if not azure_model_name.startswith('azure'): + if not azure_model_name.startswith('azure'): raise ValueError("AZURE_CFG_ARRAY中配置的模型必须以azure开头") endpoint_ = azure_cfg_dict["AZURE_ENDPOINT"] + \ f'openai/deployments/{azure_cfg_dict["AZURE_ENGINE"]}/chat/completions?api-version=2023-05-15' @@ -701,7 +711,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser executor = ThreadPoolExecutor(max_workers=4) models = model.split('&') n_model = len(models) - + window_len = len(observe_window) assert window_len==3 window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True] @@ -720,7 +730,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser time.sleep(0.25) if not window_mutex[-1]: break # 看门狗(watchdog) - for i in range(n_model): + for i in range(n_model): window_mutex[i][1] = observe_window[1] # 观察窗(window) chat_string = [] diff --git a/request_llms/bridge_chatgpt.py b/request_llms/bridge_chatgpt.py index 660d5ddf..ecb8423b 100644 --- a/request_llms/bridge_chatgpt.py +++ b/request_llms/bridge_chatgpt.py @@ -113,6 +113,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", error_msg = get_full_error(chunk, stream_response).decode() if "reduce the length" in error_msg: raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg) + elif """type":"upstream_error","param":"307""" in error_msg: + raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。") else: raise RuntimeError("OpenAI拒绝了请求:" + error_msg) if ('data: [DONE]' in chunk_decoded): break # api2d 正常完成 diff --git a/request_llms/bridge_google_gemini.py b/request_llms/bridge_google_gemini.py index 48e54190..cb85ecb6 100644 --- a/request_llms/bridge_google_gemini.py +++ b/request_llms/bridge_google_gemini.py @@ -57,6 +57,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if "vision" in llm_kwargs["llm_model"]: have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot) + if not have_recent_file: + chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写")) + yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面 + return def make_media_input(inputs, image_paths): for image_path in image_paths: inputs = inputs + f'

' diff --git a/request_llms/bridge_zhipu.py b/request_llms/bridge_zhipu.py index 91903ad3..ecb3b755 100644 --- a/request_llms/bridge_zhipu.py +++ b/request_llms/bridge_zhipu.py @@ -1,15 +1,21 @@ - import time +import os from toolbox import update_ui, get_conf, update_ui_lastest_msg -from toolbox import check_packages, report_exception +from toolbox import check_packages, report_exception, have_any_recent_upload_image_files model_name = '智谱AI大模型' +zhipuai_default_model = 'glm-4' def validate_key(): ZHIPUAI_API_KEY = get_conf("ZHIPUAI_API_KEY") if ZHIPUAI_API_KEY == '': return False return True +def make_media_input(inputs, image_paths): + for image_path in image_paths: + inputs = inputs + f'

' + return inputs + def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): """ ⭐多线程方法 @@ -18,34 +24,40 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", watch_dog_patience = 5 response = "" + if llm_kwargs["llm_model"] == "zhipuai": + llm_kwargs["llm_model"] = zhipuai_default_model + if validate_key() is False: raise RuntimeError('请配置ZHIPUAI_API_KEY') - from .com_zhipuapi import ZhipuRequestInstance - sri = ZhipuRequestInstance() - for response in sri.generate(inputs, llm_kwargs, history, sys_prompt): + # 开始接收回复 + from .com_zhipuglm import ZhipuChatInit + zhipu_bro_init = ZhipuChatInit() + for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, sys_prompt): if len(observe_window) >= 1: observe_window[0] = response if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。") + if (time.time() - observe_window[1]) > watch_dog_patience: + raise RuntimeError("程序终止。") return response -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): + +def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None): """ ⭐单线程方法 函数的说明请见 request_llms/bridge_all.py """ - chatbot.append((inputs, "")) + chatbot.append([inputs, ""]) yield from update_ui(chatbot=chatbot, history=history) # 尝试导入依赖,如果缺少依赖,则给出安装建议 try: check_packages(["zhipuai"]) except: - yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install zhipuai==1.0.7```。", - chatbot=chatbot, history=history, delay=0) + yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。", + chatbot=chatbot, history=history, delay=0) return - + if validate_key() is False: yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0) return @@ -53,16 +65,29 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp if additional_fn is not None: from core_functional import handle_core_functionality inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) - - # 开始接收回复 - from .com_zhipuapi import ZhipuRequestInstance - sri = ZhipuRequestInstance() - for response in sri.generate(inputs, llm_kwargs, history, system_prompt): - chatbot[-1] = (inputs, response) + chatbot[-1] = [inputs, ""] yield from update_ui(chatbot=chatbot, history=history) - # 总结输出 - if response == f"[Local Message] 等待{model_name}响应中 ...": - response = f"[Local Message] {model_name}响应异常 ..." + if llm_kwargs["llm_model"] == "zhipuai": + llm_kwargs["llm_model"] = zhipuai_default_model + + if llm_kwargs["llm_model"] in ["glm-4v"]: + have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot) + if not have_recent_file: + chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写")) + yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面 + return + if have_recent_file: + inputs = make_media_input(inputs, image_paths) + chatbot[-1] = [inputs, ""] + yield from update_ui(chatbot=chatbot, history=history) + + + # 开始接收回复 + from .com_zhipuglm import ZhipuChatInit + zhipu_bro_init = ZhipuChatInit() + for chunk, response in zhipu_bro_init.generate_chat(inputs, llm_kwargs, history, system_prompt): + chatbot[-1] = [inputs, response] + yield from update_ui(chatbot=chatbot, history=history) history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/request_llms/com_zhipuapi.py b/request_llms/com_zhipuapi.py deleted file mode 100644 index d8b763c9..00000000 --- a/request_llms/com_zhipuapi.py +++ /dev/null @@ -1,70 +0,0 @@ -from toolbox import get_conf -import threading -import logging - -timeout_bot_msg = '[Local Message] Request timeout. Network error.' - -class ZhipuRequestInstance(): - def __init__(self): - - self.time_to_yield_event = threading.Event() - self.time_to_exit_event = threading.Event() - - self.result_buf = "" - - def generate(self, inputs, llm_kwargs, history, system_prompt): - # import _thread as thread - import zhipuai - ZHIPUAI_API_KEY, ZHIPUAI_MODEL = get_conf("ZHIPUAI_API_KEY", "ZHIPUAI_MODEL") - zhipuai.api_key = ZHIPUAI_API_KEY - self.result_buf = "" - response = zhipuai.model_api.sse_invoke( - model=ZHIPUAI_MODEL, - prompt=generate_message_payload(inputs, llm_kwargs, history, system_prompt), - top_p=llm_kwargs['top_p']*0.7, # 智谱的API抽风,手动*0.7给做个线性变换 - temperature=llm_kwargs['temperature']*0.95, # 智谱的API抽风,手动*0.7给做个线性变换 - ) - for event in response.events(): - if event.event == "add": - # if self.result_buf == "" and event.data.startswith(" "): - # event.data = event.data.lstrip(" ") # 每次智谱为啥都要带个空格开头呢? - self.result_buf += event.data - yield self.result_buf - elif event.event == "error" or event.event == "interrupted": - raise RuntimeError("Unknown error:" + event.data) - elif event.event == "finish": - yield self.result_buf - break - else: - raise RuntimeError("Unknown error:" + str(event)) - if self.result_buf == "": - yield "智谱没有返回任何数据, 请检查ZHIPUAI_API_KEY和ZHIPUAI_MODEL是否填写正确." - logging.info(f'[raw_input] {inputs}') - logging.info(f'[response] {self.result_buf}') - return self.result_buf - -def generate_message_payload(inputs, llm_kwargs, history, system_prompt): - conversation_cnt = len(history) // 2 - messages = [{"role": "user", "content": system_prompt}, {"role": "assistant", "content": "Certainly!"}] - if conversation_cnt: - for index in range(0, 2*conversation_cnt, 2): - what_i_have_asked = {} - what_i_have_asked["role"] = "user" - what_i_have_asked["content"] = history[index] - what_gpt_answer = {} - what_gpt_answer["role"] = "assistant" - what_gpt_answer["content"] = history[index+1] - if what_i_have_asked["content"] != "": - if what_gpt_answer["content"] == "": - continue - if what_gpt_answer["content"] == timeout_bot_msg: - continue - messages.append(what_i_have_asked) - messages.append(what_gpt_answer) - else: - messages[-1]['content'] = what_gpt_answer['content'] - what_i_ask_now = {} - what_i_ask_now["role"] = "user" - what_i_ask_now["content"] = inputs - messages.append(what_i_ask_now) - return messages diff --git a/request_llms/com_zhipuglm.py b/request_llms/com_zhipuglm.py new file mode 100644 index 00000000..2e96d3fd --- /dev/null +++ b/request_llms/com_zhipuglm.py @@ -0,0 +1,84 @@ +# encoding: utf-8 +# @Time : 2024/1/22 +# @Author : Kilig947 & binary husky +# @Descr : 兼容最新的智谱Ai +from toolbox import get_conf +from zhipuai import ZhipuAI +from toolbox import get_conf, encode_image, get_pictures_list +import logging, os + + +def input_encode_handler(inputs, llm_kwargs): + if llm_kwargs["most_recent_uploaded"].get("path"): + image_paths = get_pictures_list(llm_kwargs["most_recent_uploaded"]["path"]) + md_encode = [] + for md_path in image_paths: + type_ = os.path.splitext(md_path)[1].replace(".", "") + type_ = "jpeg" if type_ == "jpg" else type_ + md_encode.append({"data": encode_image(md_path), "type": type_}) + return inputs, md_encode + + +class ZhipuChatInit: + + def __init__(self): + ZHIPUAI_API_KEY, ZHIPUAI_MODEL = get_conf("ZHIPUAI_API_KEY", "ZHIPUAI_MODEL") + if len(ZHIPUAI_MODEL) > 0: + logging.error('ZHIPUAI_MODEL 配置项选项已经弃用,请在LLM_MODEL中配置') + self.zhipu_bro = ZhipuAI(api_key=ZHIPUAI_API_KEY) + self.model = '' + + def __conversation_user(self, user_input: str, llm_kwargs): + if self.model not in ["glm-4v"]: + return {"role": "user", "content": user_input} + else: + input_, encode_img = input_encode_handler(user_input, llm_kwargs=llm_kwargs) + what_i_have_asked = {"role": "user", "content": []} + what_i_have_asked['content'].append({"type": 'text', "text": user_input}) + if encode_img: + img_d = {"type": "image_url", + "image_url": {'url': encode_img}} + what_i_have_asked['content'].append(img_d) + return what_i_have_asked + + def __conversation_history(self, history, llm_kwargs): + messages = [] + conversation_cnt = len(history) // 2 + if conversation_cnt: + for index in range(0, 2 * conversation_cnt, 2): + what_i_have_asked = self.__conversation_user(history[index], llm_kwargs) + what_gpt_answer = { + "role": "assistant", + "content": history[index + 1] + } + messages.append(what_i_have_asked) + messages.append(what_gpt_answer) + return messages + + def __conversation_message_payload(self, inputs, llm_kwargs, history, system_prompt): + messages = [] + if system_prompt: + messages.append({"role": "system", "content": system_prompt}) + self.model = llm_kwargs['llm_model'] + messages.extend(self.__conversation_history(history, llm_kwargs)) # 处理 history + messages.append(self.__conversation_user(inputs, llm_kwargs)) # 处理用户对话 + response = self.zhipu_bro.chat.completions.create( + model=self.model, messages=messages, stream=True, + temperature=llm_kwargs.get('temperature', 0.95) * 0.95, # 只能传默认的 temperature 和 top_p + top_p=llm_kwargs.get('top_p', 0.7) * 0.7, + max_tokens=llm_kwargs.get('max_tokens', 1024 * 4), # 最大输出模型的一半 + ) + return response + + def generate_chat(self, inputs, llm_kwargs, history, system_prompt): + self.model = llm_kwargs['llm_model'] + response = self.__conversation_message_payload(inputs, llm_kwargs, history, system_prompt) + bro_results = '' + for chunk in response: + bro_results += chunk.choices[0].delta.content + yield chunk.choices[0].delta.content, bro_results + + +if __name__ == '__main__': + zhipu = ZhipuChatInit() + zhipu.generate_chat('你好', {'llm_model': 'glm-4'}, [], '你是WPSAi') diff --git a/requirements.txt b/requirements.txt index 8f5e45ba..fe948ee8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,10 @@ -https://public.gpt-academic.top/publish/gradio-3.32.7-py3-none-any.whl +https://public.gpt-academic.top/publish/gradio-3.32.8-py3-none-any.whl gradio-client==0.8 pypdf2==2.12.1 -zhipuai<2 +zhipuai>=2 tiktoken>=0.3.3 requests[socks] -pydantic==1.10.11 +pydantic==2.5.2 protobuf==3.18 transformers>=4.27.1 scipdf_parser>=0.52 diff --git a/tests/test_plugins.py b/tests/test_plugins.py index 7db36230..de465a48 100644 --- a/tests/test_plugins.py +++ b/tests/test_plugins.py @@ -20,10 +20,10 @@ if __name__ == "__main__": # plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"}) - # plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522") + # plugin_test(plugin='crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF', main_input="2307.07522") plugin_test( - plugin="crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF", + plugin="crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF", main_input="G:/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix", ) @@ -66,7 +66,7 @@ if __name__ == "__main__": # plugin_test(plugin='crazy_functions.知识库文件注入->读取知识库作答', main_input="远程云服务器部署?") - # plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2210.03629") + # plugin_test(plugin='crazy_functions.Latex输出PDF->Latex翻译中文并重新编译PDF', main_input="2210.03629") # advanced_arg = {"advanced_arg":"--llm_to_learn=gpt-3.5-turbo --prompt_prefix='根据下面的服装类型提示,想象一个穿着者,对这个人外貌、身处的环境、内心世界、人设进行描写。要求:100字以内,用第二人称。' --system_prompt=''" } # plugin_test(plugin='crazy_functions.chatglm微调工具->微调数据集生成', main_input='build/dev.json', advanced_arg=advanced_arg) diff --git a/themes/base64.mjs b/themes/base64.mjs index 5e64328d..b842822b 100644 --- a/themes/base64.mjs +++ b/themes/base64.mjs @@ -1,296 +1 @@ -/** - * base64.ts - * - * Licensed under the BSD 3-Clause License. - * http://opensource.org/licenses/BSD-3-Clause - * - * References: - * http://en.wikipedia.org/wiki/Base64 - * - * @author Dan Kogai (https://github.com/dankogai) - */ -const version = '3.7.2'; -/** - * @deprecated use lowercase `version`. - */ -const VERSION = version; -const _hasatob = typeof atob === 'function'; -const _hasbtoa = typeof btoa === 'function'; -const _hasBuffer = typeof Buffer === 'function'; -const _TD = typeof TextDecoder === 'function' ? new TextDecoder() : undefined; -const _TE = typeof TextEncoder === 'function' ? new TextEncoder() : undefined; -const b64ch = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='; -const b64chs = Array.prototype.slice.call(b64ch); -const b64tab = ((a) => { - let tab = {}; - a.forEach((c, i) => tab[c] = i); - return tab; -})(b64chs); -const b64re = /^(?:[A-Za-z\d+\/]{4})*?(?:[A-Za-z\d+\/]{2}(?:==)?|[A-Za-z\d+\/]{3}=?)?$/; -const _fromCC = String.fromCharCode.bind(String); -const _U8Afrom = typeof Uint8Array.from === 'function' - ? Uint8Array.from.bind(Uint8Array) - : (it, fn = (x) => x) => new Uint8Array(Array.prototype.slice.call(it, 0).map(fn)); -const _mkUriSafe = (src) => src - .replace(/=/g, '').replace(/[+\/]/g, (m0) => m0 == '+' ? '-' : '_'); -const _tidyB64 = (s) => s.replace(/[^A-Za-z0-9\+\/]/g, ''); -/** - * polyfill version of `btoa` - */ -const btoaPolyfill = (bin) => { - // console.log('polyfilled'); - let u32, c0, c1, c2, asc = ''; - const pad = bin.length % 3; - for (let i = 0; i < bin.length;) { - if ((c0 = bin.charCodeAt(i++)) > 255 || - (c1 = bin.charCodeAt(i++)) > 255 || - (c2 = bin.charCodeAt(i++)) > 255) - throw new TypeError('invalid character found'); - u32 = (c0 << 16) | (c1 << 8) | c2; - asc += b64chs[u32 >> 18 & 63] - + b64chs[u32 >> 12 & 63] - + b64chs[u32 >> 6 & 63] - + b64chs[u32 & 63]; - } - return pad ? asc.slice(0, pad - 3) + "===".substring(pad) : asc; -}; -/** - * does what `window.btoa` of web browsers do. - * @param {String} bin binary string - * @returns {string} Base64-encoded string - */ -const _btoa = _hasbtoa ? (bin) => btoa(bin) - : _hasBuffer ? (bin) => Buffer.from(bin, 'binary').toString('base64') - : btoaPolyfill; -const _fromUint8Array = _hasBuffer - ? (u8a) => Buffer.from(u8a).toString('base64') - : (u8a) => { - // cf. https://stackoverflow.com/questions/12710001/how-to-convert-uint8-array-to-base64-encoded-string/12713326#12713326 - const maxargs = 0x1000; - let strs = []; - for (let i = 0, l = u8a.length; i < l; i += maxargs) { - strs.push(_fromCC.apply(null, u8a.subarray(i, i + maxargs))); - } - return _btoa(strs.join('')); - }; -/** - * converts a Uint8Array to a Base64 string. - * @param {boolean} [urlsafe] URL-and-filename-safe a la RFC4648 §5 - * @returns {string} Base64 string - */ -const fromUint8Array = (u8a, urlsafe = false) => urlsafe ? _mkUriSafe(_fromUint8Array(u8a)) : _fromUint8Array(u8a); -// This trick is found broken https://github.com/dankogai/js-base64/issues/130 -// const utob = (src: string) => unescape(encodeURIComponent(src)); -// reverting good old fationed regexp -const cb_utob = (c) => { - if (c.length < 2) { - var cc = c.charCodeAt(0); - return cc < 0x80 ? c - : cc < 0x800 ? (_fromCC(0xc0 | (cc >>> 6)) - + _fromCC(0x80 | (cc & 0x3f))) - : (_fromCC(0xe0 | ((cc >>> 12) & 0x0f)) - + _fromCC(0x80 | ((cc >>> 6) & 0x3f)) - + _fromCC(0x80 | (cc & 0x3f))); - } - else { - var cc = 0x10000 - + (c.charCodeAt(0) - 0xD800) * 0x400 - + (c.charCodeAt(1) - 0xDC00); - return (_fromCC(0xf0 | ((cc >>> 18) & 0x07)) - + _fromCC(0x80 | ((cc >>> 12) & 0x3f)) - + _fromCC(0x80 | ((cc >>> 6) & 0x3f)) - + _fromCC(0x80 | (cc & 0x3f))); - } -}; -const re_utob = /[\uD800-\uDBFF][\uDC00-\uDFFFF]|[^\x00-\x7F]/g; -/** - * @deprecated should have been internal use only. - * @param {string} src UTF-8 string - * @returns {string} UTF-16 string - */ -const utob = (u) => u.replace(re_utob, cb_utob); -// -const _encode = _hasBuffer - ? (s) => Buffer.from(s, 'utf8').toString('base64') - : _TE - ? (s) => _fromUint8Array(_TE.encode(s)) - : (s) => _btoa(utob(s)); -/** - * converts a UTF-8-encoded string to a Base64 string. - * @param {boolean} [urlsafe] if `true` make the result URL-safe - * @returns {string} Base64 string - */ -const encode = (src, urlsafe = false) => urlsafe - ? _mkUriSafe(_encode(src)) - : _encode(src); -/** - * converts a UTF-8-encoded string to URL-safe Base64 RFC4648 §5. - * @returns {string} Base64 string - */ -const encodeURI = (src) => encode(src, true); -// This trick is found broken https://github.com/dankogai/js-base64/issues/130 -// const btou = (src: string) => decodeURIComponent(escape(src)); -// reverting good old fationed regexp -const re_btou = /[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF]{2}|[\xF0-\xF7][\x80-\xBF]{3}/g; -const cb_btou = (cccc) => { - switch (cccc.length) { - case 4: - var cp = ((0x07 & cccc.charCodeAt(0)) << 18) - | ((0x3f & cccc.charCodeAt(1)) << 12) - | ((0x3f & cccc.charCodeAt(2)) << 6) - | (0x3f & cccc.charCodeAt(3)), offset = cp - 0x10000; - return (_fromCC((offset >>> 10) + 0xD800) - + _fromCC((offset & 0x3FF) + 0xDC00)); - case 3: - return _fromCC(((0x0f & cccc.charCodeAt(0)) << 12) - | ((0x3f & cccc.charCodeAt(1)) << 6) - | (0x3f & cccc.charCodeAt(2))); - default: - return _fromCC(((0x1f & cccc.charCodeAt(0)) << 6) - | (0x3f & cccc.charCodeAt(1))); - } -}; -/** - * @deprecated should have been internal use only. - * @param {string} src UTF-16 string - * @returns {string} UTF-8 string - */ -const btou = (b) => b.replace(re_btou, cb_btou); -/** - * polyfill version of `atob` - */ -const atobPolyfill = (asc) => { - // console.log('polyfilled'); - asc = asc.replace(/\s+/g, ''); - if (!b64re.test(asc)) - throw new TypeError('malformed base64.'); - asc += '=='.slice(2 - (asc.length & 3)); - let u24, bin = '', r1, r2; - for (let i = 0; i < asc.length;) { - u24 = b64tab[asc.charAt(i++)] << 18 - | b64tab[asc.charAt(i++)] << 12 - | (r1 = b64tab[asc.charAt(i++)]) << 6 - | (r2 = b64tab[asc.charAt(i++)]); - bin += r1 === 64 ? _fromCC(u24 >> 16 & 255) - : r2 === 64 ? _fromCC(u24 >> 16 & 255, u24 >> 8 & 255) - : _fromCC(u24 >> 16 & 255, u24 >> 8 & 255, u24 & 255); - } - return bin; -}; -/** - * does what `window.atob` of web browsers do. - * @param {String} asc Base64-encoded string - * @returns {string} binary string - */ -const _atob = _hasatob ? (asc) => atob(_tidyB64(asc)) - : _hasBuffer ? (asc) => Buffer.from(asc, 'base64').toString('binary') - : atobPolyfill; -// -const _toUint8Array = _hasBuffer - ? (a) => _U8Afrom(Buffer.from(a, 'base64')) - : (a) => _U8Afrom(_atob(a), c => c.charCodeAt(0)); -/** - * converts a Base64 string to a Uint8Array. - */ -const toUint8Array = (a) => _toUint8Array(_unURI(a)); -// -const _decode = _hasBuffer - ? (a) => Buffer.from(a, 'base64').toString('utf8') - : _TD - ? (a) => _TD.decode(_toUint8Array(a)) - : (a) => btou(_atob(a)); -const _unURI = (a) => _tidyB64(a.replace(/[-_]/g, (m0) => m0 == '-' ? '+' : '/')); -/** - * converts a Base64 string to a UTF-8 string. - * @param {String} src Base64 string. Both normal and URL-safe are supported - * @returns {string} UTF-8 string - */ -const decode = (src) => _decode(_unURI(src)); -/** - * check if a value is a valid Base64 string - * @param {String} src a value to check - */ -const isValid = (src) => { - if (typeof src !== 'string') - return false; - const s = src.replace(/\s+/g, '').replace(/={0,2}$/, ''); - return !/[^\s0-9a-zA-Z\+/]/.test(s) || !/[^\s0-9a-zA-Z\-_]/.test(s); -}; -// -const _noEnum = (v) => { - return { - value: v, enumerable: false, writable: true, configurable: true - }; -}; -/** - * extend String.prototype with relevant methods - */ -const extendString = function () { - const _add = (name, body) => Object.defineProperty(String.prototype, name, _noEnum(body)); - _add('fromBase64', function () { return decode(this); }); - _add('toBase64', function (urlsafe) { return encode(this, urlsafe); }); - _add('toBase64URI', function () { return encode(this, true); }); - _add('toBase64URL', function () { return encode(this, true); }); - _add('toUint8Array', function () { return toUint8Array(this); }); -}; -/** - * extend Uint8Array.prototype with relevant methods - */ -const extendUint8Array = function () { - const _add = (name, body) => Object.defineProperty(Uint8Array.prototype, name, _noEnum(body)); - _add('toBase64', function (urlsafe) { return fromUint8Array(this, urlsafe); }); - _add('toBase64URI', function () { return fromUint8Array(this, true); }); - _add('toBase64URL', function () { return fromUint8Array(this, true); }); -}; -/** - * extend Builtin prototypes with relevant methods - */ -const extendBuiltins = () => { - extendString(); - extendUint8Array(); -}; -const gBase64 = { - version: version, - VERSION: VERSION, - atob: _atob, - atobPolyfill: atobPolyfill, - btoa: _btoa, - btoaPolyfill: btoaPolyfill, - fromBase64: decode, - toBase64: encode, - encode: encode, - encodeURI: encodeURI, - encodeURL: encodeURI, - utob: utob, - btou: btou, - decode: decode, - isValid: isValid, - fromUint8Array: fromUint8Array, - toUint8Array: toUint8Array, - extendString: extendString, - extendUint8Array: extendUint8Array, - extendBuiltins: extendBuiltins, -}; -// makecjs:CUT // -export { version }; -export { VERSION }; -export { _atob as atob }; -export { atobPolyfill }; -export { _btoa as btoa }; -export { btoaPolyfill }; -export { decode as fromBase64 }; -export { encode as toBase64 }; -export { utob }; -export { encode }; -export { encodeURI }; -export { encodeURI as encodeURL }; -export { btou }; -export { decode }; -export { isValid }; -export { fromUint8Array }; -export { toUint8Array }; -export { extendString }; -export { extendUint8Array }; -export { extendBuiltins }; -// and finally, -export { gBase64 as Base64 }; \ No newline at end of file +// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0 diff --git a/themes/common.css b/themes/common.css index 4e76b051..e4b453d7 100644 --- a/themes/common.css +++ b/themes/common.css @@ -59,6 +59,7 @@ /* Scrollbar Width */ ::-webkit-scrollbar { + height: 12px; width: 12px; } diff --git a/themes/common.js b/themes/common.js index 5e2d8c22..4a9606c7 100644 --- a/themes/common.js +++ b/themes/common.js @@ -234,7 +234,7 @@ let timeoutID = null; let lastInvocationTime = 0; let lastArgs = null; function do_something_but_not_too_frequently(min_interval, func) { - return function(...args) { + return function (...args) { lastArgs = args; const now = Date.now(); if (!lastInvocationTime || (now - lastInvocationTime) >= min_interval) { @@ -263,13 +263,8 @@ function chatbotContentChanged(attempt = 1, force = false) { gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton); }, i === 0 ? 0 : 200); } + // we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0 - const run_mermaid_render = do_something_but_not_too_frequently(1000, function () { - const blocks = document.querySelectorAll(`pre.mermaid, diagram-div`); - if (blocks.length == 0) { return; } - uml("mermaid"); - }); - run_mermaid_render(); } @@ -672,9 +667,9 @@ function limit_scroll_position() { let scrollableDiv = document.querySelector('#gpt-chatbot > div.wrap'); scrollableDiv.addEventListener('wheel', function (e) { let preventScroll = false; - if (e.deltaX != 0) { prevented_offset = 0; return;} - if (this.scrollHeight == this.clientHeight) { prevented_offset = 0; return;} - if (e.deltaY < 0) { prevented_offset = 0; return;} + if (e.deltaX != 0) { prevented_offset = 0; return; } + if (this.scrollHeight == this.clientHeight) { prevented_offset = 0; return; } + if (e.deltaY < 0) { prevented_offset = 0; return; } if (e.deltaY > 0 && this.scrollHeight - this.clientHeight - this.scrollTop <= 1) { preventScroll = true; } if (preventScroll) { @@ -713,3 +708,161 @@ function GptAcademicJavaScriptInit(LAYOUT = "LEFT-RIGHT") { // setInterval(function () { uml("mermaid") }, 5000); // 每50毫秒执行一次 } + + +function loadLive2D() { + try { + $("").attr({ href: "file=themes/waifu_plugin/waifu.css", rel: "stylesheet", type: "text/css" }).appendTo('head'); + $('body').append('
'); + $.ajax({ + url: "file=themes/waifu_plugin/waifu-tips.js", dataType: "script", cache: true, success: function () { + $.ajax({ + url: "file=themes/waifu_plugin/live2d.js", dataType: "script", cache: true, success: function () { + /* 可直接修改部分参数 */ + live2d_settings['hitokotoAPI'] = "hitokoto.cn"; // 一言 API + live2d_settings['modelId'] = 3; // 默认模型 ID + live2d_settings['modelTexturesId'] = 44; // 默认材质 ID + live2d_settings['modelStorage'] = false; // 不储存模型 ID + live2d_settings['waifuSize'] = '210x187'; + live2d_settings['waifuTipsSize'] = '187x52'; + live2d_settings['canSwitchModel'] = true; + live2d_settings['canSwitchTextures'] = true; + live2d_settings['canSwitchHitokoto'] = false; + live2d_settings['canTakeScreenshot'] = false; + live2d_settings['canTurnToHomePage'] = false; + live2d_settings['canTurnToAboutPage'] = false; + live2d_settings['showHitokoto'] = false; // 显示一言 + live2d_settings['showF12Status'] = false; // 显示加载状态 + live2d_settings['showF12Message'] = false; // 显示看板娘消息 + live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示 + live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示 + live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词 + /* 在 initModel 前添加 */ + initModel("file=themes/waifu_plugin/waifu-tips.json"); + } + }); + } + }); + } catch (err) { console.log("[Error] JQuery is not defined.") } +} + +function get_checkbox_selected_items(elem_id){ + display_panel_arr = []; + document.getElementById(elem_id).querySelector('[data-testid="checkbox-group"]').querySelectorAll('label').forEach(label => { + // Get the span text + const spanText = label.querySelector('span').textContent; + // Get the input value + const checked = label.querySelector('input').checked; + if (checked) { + display_panel_arr.push(spanText) + } + }); + return display_panel_arr; +} + +function set_checkbox(key, bool, set_twice=false) { + set_success = false; + elem_ids = ["cbsc", "cbs"] + elem_ids.forEach(id => { + document.getElementById(id).querySelector('[data-testid="checkbox-group"]').querySelectorAll('label').forEach(label => { + // Get the span text + const spanText = label.querySelector('span').textContent; + if (spanText === key) { + if (bool){ + label.classList.add('selected'); + } else { + if (label.classList.contains('selected')) { + label.classList.remove('selected'); + } + } + if (set_twice){ + setTimeout(() => { + if (bool){ + label.classList.add('selected'); + } else { + if (label.classList.contains('selected')) { + label.classList.remove('selected'); + } + } + }, 5000); + } + + label.querySelector('input').checked = bool; + set_success = true; + return + } + }); + }); + + if (!set_success){ + console.log("设置checkbox失败,没有找到对应的key") + } +} + +function apply_cookie_for_checkbox(dark) { + // console.log("apply_cookie_for_checkboxes") + let searchString = "输入清除键"; + let bool_value = "False"; + + ////////////////// darkmode /////////////////// + if (getCookie("js_darkmode_cookie")) { + dark = getCookie("js_darkmode_cookie") + } + dark = dark == "True"; + if (document.querySelectorAll('.dark').length) { + if (!dark) { + document.querySelectorAll('.dark').forEach(el => el.classList.remove('dark')); + } + } else { + if (dark) { + document.querySelector('body').classList.add('dark'); + } + } + + ////////////////////// clearButton /////////////////////////// + if (getCookie("js_clearbtn_show_cookie")) { + // have cookie + bool_value = getCookie("js_clearbtn_show_cookie") + bool_value = bool_value == "True"; + searchString = "输入清除键"; + if (bool_value) { + let clearButton = document.getElementById("elem_clear"); + let clearButton2 = document.getElementById("elem_clear2"); + clearButton.style.display = "block"; + clearButton2.style.display = "block"; + set_checkbox(searchString, true); + } else { + let clearButton = document.getElementById("elem_clear"); + let clearButton2 = document.getElementById("elem_clear2"); + clearButton.style.display = "none"; + clearButton2.style.display = "none"; + set_checkbox(searchString, false); + } + } + + ////////////////////// live2d /////////////////////////// + + if (getCookie("js_live2d_show_cookie")) { + // have cookie + searchString = "添加Live2D形象"; + bool_value = getCookie("js_live2d_show_cookie"); + bool_value = bool_value == "True"; + if (bool_value) { + loadLive2D(); + set_checkbox(searchString, true); + } else { + $('.waifu').hide(); + set_checkbox(searchString, false); + } + } else { + // do not have cookie + // get conf + display_panel_arr = get_checkbox_selected_items("cbsc"); + searchString = "添加Live2D形象"; + if (display_panel_arr.includes(searchString)) { + loadLive2D(); + } else { + } + } + +} \ No newline at end of file diff --git a/themes/common.py b/themes/common.py index 08f8561b..40c14691 100644 --- a/themes/common.py +++ b/themes/common.py @@ -5,17 +5,14 @@ def get_common_html_javascript_code(): js = "\n" for jsf in [ "file=themes/common.js", - "file=themes/mermaid.min.js", - "file=themes/mermaid_loader.js", ]: js += f"""\n""" # 添加Live2D if ADD_WAIFU: for jsf in [ - "file=docs/waifu_plugin/jquery.min.js", - "file=docs/waifu_plugin/jquery-ui.min.js", - "file=docs/waifu_plugin/autoload.js", + "file=themes/waifu_plugin/jquery.min.js", + "file=themes/waifu_plugin/jquery-ui.min.js", ]: js += f"""\n""" return js \ No newline at end of file diff --git a/themes/mermaid.min.js b/themes/mermaid.min.js index 87df8091..b842822b 100644 --- a/themes/mermaid.min.js +++ b/themes/mermaid.min.js @@ -1,1589 +1 @@ -(function(T2,G0){typeof exports=="object"&&typeof module<"u"?module.exports=G0():typeof define=="function"&&define.amd?define(G0):(T2=typeof globalThis<"u"?globalThis:T2||self,T2.mermaid=G0())})(this,function(){"use strict";var VPn=Object.defineProperty;var UPn=(T2,G0,Ar)=>G0 in T2?VPn(T2,G0,{enumerable:!0,configurable:!0,writable:!0,value:Ar}):T2[G0]=Ar;var CL=(T2,G0,Ar)=>(UPn(T2,typeof G0!="symbol"?G0+"":G0,Ar),Ar);function T2(r){for(var i=[],o=1;o>>0,l;for(l=0;l0)for(o=0;o=0;return(b?o?"+":"":"-")+Math.pow(10,Math.max(0,f)).toString().substr(1)+l}var Tie=/(\[[^\[]*\])|(\\)?([Hh]mm(ss)?|Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|Qo?|N{1,5}|YYYYYY|YYYYY|YYYY|YY|y{2,4}|yo?|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|kk?|mm?|ss?|S{1,9}|x|X|zz?|ZZ?|.)/g,az=/(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g,Cie={},_T={};function Ki(r,i,o,l){var f=l;typeof l=="string"&&(f=function(){return this[l]()}),r&&(_T[r]=f),i&&(_T[i[0]]=function(){return Sm(f.apply(this,arguments),i[1],i[2])}),o&&(_T[o]=function(){return this.localeData().ordinal(f.apply(this,arguments),r)})}function $_t(r){return r.match(/\[[\s\S]/)?r.replace(/^\[|\]$/g,""):r.replace(/\\/g,"")}function H_t(r){var i=r.match(Tie),o,l;for(o=0,l=i.length;o=0&&az.test(r);)r=r.replace(az,l),az.lastIndex=0,o-=1;return r}var z_t={LTS:"h:mm:ss A",LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY h:mm A",LLLL:"dddd, MMMM D, YYYY h:mm A"};function G_t(r){var i=this._longDateFormat[r],o=this._longDateFormat[r.toUpperCase()];return i||!o?i:(this._longDateFormat[r]=o.match(Tie).map(function(l){return l==="MMMM"||l==="MM"||l==="DD"||l==="dddd"?l.slice(1):l}).join(""),this._longDateFormat[r])}var V_t="Invalid date";function U_t(){return this._invalidDate}var q_t="%d",Y_t=/\d{1,2}/;function W_t(r){return this._ordinal.replace("%d",r)}var K_t={future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",w:"a week",ww:"%d weeks",M:"a month",MM:"%d months",y:"a year",yy:"%d years"};function X_t(r,i,o,l){var f=this._relativeTime[o];return Cm(f)?f(r,i,o,l):f.replace(/%d/i,r)}function Q_t(r,i){var o=this._relativeTime[r>0?"future":"past"];return Cm(o)?o(i):o.replace(/%s/i,i)}var ML={};function nd(r,i){var o=r.toLowerCase();ML[o]=ML[o+"s"]=ML[i]=r}function S2(r){return typeof r=="string"?ML[r]||ML[r.toLowerCase()]:void 0}function Sie(r){var i={},o,l;for(l in r)Mo(r,l)&&(o=S2(l),o&&(i[o]=r[l]));return i}var r9e={};function rd(r,i){r9e[r]=i}function Z_t(r){var i=[],o;for(o in r)Mo(r,o)&&i.push({unit:o,priority:r9e[o]});return i.sort(function(l,f){return l.priority-f.priority}),i}function cz(r){return r%4===0&&r%100!==0||r%400===0}function A2(r){return r<0?Math.ceil(r)||0:Math.floor(r)}function Fa(r){var i=+r,o=0;return i!==0&&isFinite(i)&&(o=A2(i)),o}function TT(r,i){return function(o){return o!=null?(i9e(this,r,o),Ar.updateOffset(this,i),this):uz(this,r)}}function uz(r,i){return r.isValid()?r._d["get"+(r._isUTC?"UTC":"")+i]():NaN}function i9e(r,i,o){r.isValid()&&!isNaN(o)&&(i==="FullYear"&&cz(r.year())&&r.month()===1&&r.date()===29?(o=Fa(o),r._d["set"+(r._isUTC?"UTC":"")+i](o,r.month(),bz(o,r.month()))):r._d["set"+(r._isUTC?"UTC":"")+i](o))}function J_t(r){return r=S2(r),Cm(this[r])?this[r]():this}function eTt(r,i){if(typeof r=="object"){r=Sie(r);var o=Z_t(r),l,f=o.length;for(l=0;l68?1900:2e3)};var g9e=TT("FullYear",!0);function mTt(){return cz(this.year())}function yTt(r,i,o,l,f,b,d){var w;return r<100&&r>=0?(w=new Date(r+400,i,o,l,f,b,d),isFinite(w.getFullYear())&&w.setFullYear(r)):w=new Date(r,i,o,l,f,b,d),w}function OL(r){var i,o;return r<100&&r>=0?(o=Array.prototype.slice.call(arguments),o[0]=r+400,i=new Date(Date.UTC.apply(null,o)),isFinite(i.getUTCFullYear())&&i.setUTCFullYear(r)):i=new Date(Date.UTC.apply(null,arguments)),i}function vz(r,i,o){var l=7+i-o,f=(7+OL(r,0,l).getUTCDay()-i)%7;return-f+l-1}function p9e(r,i,o,l,f){var b=(7+o-l)%7,d=vz(r,l,f),w=1+7*(i-1)+b+d,y,k;return w<=0?(y=r-1,k=IL(y)+w):w>IL(r)?(y=r+1,k=w-IL(r)):(y=r,k=w),{year:y,dayOfYear:k}}function NL(r,i,o){var l=vz(r.year(),i,o),f=Math.floor((r.dayOfYear()-l-1)/7)+1,b,d;return f<1?(d=r.year()-1,b=f+N3(d,i,o)):f>N3(r.year(),i,o)?(b=f-N3(r.year(),i,o),d=r.year()+1):(d=r.year(),b=f),{week:b,year:d}}function N3(r,i,o){var l=vz(r,i,o),f=vz(r+1,i,o);return(IL(r)-l+f)/7}Ki("w",["ww",2],"wo","week"),Ki("W",["WW",2],"Wo","isoWeek"),nd("week","w"),nd("isoWeek","W"),rd("week",5),rd("isoWeek",5),ci("w",wu),ci("ww",wu,op),ci("W",wu),ci("WW",wu,op),LL(["w","ww","W","WW"],function(r,i,o,l){i[l.substr(0,1)]=Fa(r)});function kTt(r){return NL(r,this._week.dow,this._week.doy).week}var xTt={dow:0,doy:6};function ETt(){return this._week.dow}function _Tt(){return this._week.doy}function TTt(r){var i=this.localeData().week(this);return r==null?i:this.add((r-i)*7,"d")}function CTt(r){var i=NL(this,1,4).week;return r==null?i:this.add((r-i)*7,"d")}Ki("d",0,"do","day"),Ki("dd",0,0,function(r){return this.localeData().weekdaysMin(this,r)}),Ki("ddd",0,0,function(r){return this.localeData().weekdaysShort(this,r)}),Ki("dddd",0,0,function(r){return this.localeData().weekdays(this,r)}),Ki("e",0,0,"weekday"),Ki("E",0,0,"isoWeekday"),nd("day","d"),nd("weekday","e"),nd("isoWeekday","E"),rd("day",11),rd("weekday",11),rd("isoWeekday",11),ci("d",wu),ci("e",wu),ci("E",wu),ci("dd",function(r,i){return i.weekdaysMinRegex(r)}),ci("ddd",function(r,i){return i.weekdaysShortRegex(r)}),ci("dddd",function(r,i){return i.weekdaysRegex(r)}),LL(["dd","ddd","dddd"],function(r,i,o,l){var f=o._locale.weekdaysParse(r,l,o._strict);f!=null?i.d=f:ba(o).invalidWeekday=r}),LL(["d","e","E"],function(r,i,o,l){i[l]=Fa(r)});function STt(r,i){return typeof r!="string"?r:isNaN(r)?(r=i.weekdaysParse(r),typeof r=="number"?r:null):parseInt(r,10)}function ATt(r,i){return typeof r=="string"?i.weekdaysParse(r)%7||7:isNaN(r)?null:r}function Lie(r,i){return r.slice(i,7).concat(r.slice(0,i))}var MTt="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),b9e="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),DTt="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),LTt=DL,ITt=DL,OTt=DL;function NTt(r,i){var o=mv(this._weekdays)?this._weekdays:this._weekdays[r&&r!==!0&&this._weekdays.isFormat.test(i)?"format":"standalone"];return r===!0?Lie(o,this._week.dow):r?o[r.day()]:o}function PTt(r){return r===!0?Lie(this._weekdaysShort,this._week.dow):r?this._weekdaysShort[r.day()]:this._weekdaysShort}function BTt(r){return r===!0?Lie(this._weekdaysMin,this._week.dow):r?this._weekdaysMin[r.day()]:this._weekdaysMin}function FTt(r,i,o){var l,f,b,d=r.toLocaleLowerCase();if(!this._weekdaysParse)for(this._weekdaysParse=[],this._shortWeekdaysParse=[],this._minWeekdaysParse=[],l=0;l<7;++l)b=Tm([2e3,1]).day(l),this._minWeekdaysParse[l]=this.weekdaysMin(b,"").toLocaleLowerCase(),this._shortWeekdaysParse[l]=this.weekdaysShort(b,"").toLocaleLowerCase(),this._weekdaysParse[l]=this.weekdays(b,"").toLocaleLowerCase();return o?i==="dddd"?(f=ih.call(this._weekdaysParse,d),f!==-1?f:null):i==="ddd"?(f=ih.call(this._shortWeekdaysParse,d),f!==-1?f:null):(f=ih.call(this._minWeekdaysParse,d),f!==-1?f:null):i==="dddd"?(f=ih.call(this._weekdaysParse,d),f!==-1||(f=ih.call(this._shortWeekdaysParse,d),f!==-1)?f:(f=ih.call(this._minWeekdaysParse,d),f!==-1?f:null)):i==="ddd"?(f=ih.call(this._shortWeekdaysParse,d),f!==-1||(f=ih.call(this._weekdaysParse,d),f!==-1)?f:(f=ih.call(this._minWeekdaysParse,d),f!==-1?f:null)):(f=ih.call(this._minWeekdaysParse,d),f!==-1||(f=ih.call(this._weekdaysParse,d),f!==-1)?f:(f=ih.call(this._shortWeekdaysParse,d),f!==-1?f:null))}function RTt(r,i,o){var l,f,b;if(this._weekdaysParseExact)return FTt.call(this,r,i,o);for(this._weekdaysParse||(this._weekdaysParse=[],this._minWeekdaysParse=[],this._shortWeekdaysParse=[],this._fullWeekdaysParse=[]),l=0;l<7;l++){if(f=Tm([2e3,1]).day(l),o&&!this._fullWeekdaysParse[l]&&(this._fullWeekdaysParse[l]=new RegExp("^"+this.weekdays(f,"").replace(".","\\.?")+"$","i"),this._shortWeekdaysParse[l]=new RegExp("^"+this.weekdaysShort(f,"").replace(".","\\.?")+"$","i"),this._minWeekdaysParse[l]=new RegExp("^"+this.weekdaysMin(f,"").replace(".","\\.?")+"$","i")),this._weekdaysParse[l]||(b="^"+this.weekdays(f,"")+"|^"+this.weekdaysShort(f,"")+"|^"+this.weekdaysMin(f,""),this._weekdaysParse[l]=new RegExp(b.replace(".",""),"i")),o&&i==="dddd"&&this._fullWeekdaysParse[l].test(r))return l;if(o&&i==="ddd"&&this._shortWeekdaysParse[l].test(r))return l;if(o&&i==="dd"&&this._minWeekdaysParse[l].test(r))return l;if(!o&&this._weekdaysParse[l].test(r))return l}}function jTt(r){if(!this.isValid())return r!=null?this:NaN;var i=this._isUTC?this._d.getUTCDay():this._d.getDay();return r!=null?(r=STt(r,this.localeData()),this.add(r-i,"d")):i}function $Tt(r){if(!this.isValid())return r!=null?this:NaN;var i=(this.day()+7-this.localeData()._week.dow)%7;return r==null?i:this.add(r-i,"d")}function HTt(r){if(!this.isValid())return r!=null?this:NaN;if(r!=null){var i=ATt(r,this.localeData());return this.day(this.day()%7?i:i-7)}else return this.day()||7}function zTt(r){return this._weekdaysParseExact?(Mo(this,"_weekdaysRegex")||Iie.call(this),r?this._weekdaysStrictRegex:this._weekdaysRegex):(Mo(this,"_weekdaysRegex")||(this._weekdaysRegex=LTt),this._weekdaysStrictRegex&&r?this._weekdaysStrictRegex:this._weekdaysRegex)}function GTt(r){return this._weekdaysParseExact?(Mo(this,"_weekdaysRegex")||Iie.call(this),r?this._weekdaysShortStrictRegex:this._weekdaysShortRegex):(Mo(this,"_weekdaysShortRegex")||(this._weekdaysShortRegex=ITt),this._weekdaysShortStrictRegex&&r?this._weekdaysShortStrictRegex:this._weekdaysShortRegex)}function VTt(r){return this._weekdaysParseExact?(Mo(this,"_weekdaysRegex")||Iie.call(this),r?this._weekdaysMinStrictRegex:this._weekdaysMinRegex):(Mo(this,"_weekdaysMinRegex")||(this._weekdaysMinRegex=OTt),this._weekdaysMinStrictRegex&&r?this._weekdaysMinStrictRegex:this._weekdaysMinRegex)}function Iie(){function r(E,T){return T.length-E.length}var i=[],o=[],l=[],f=[],b,d,w,y,k;for(b=0;b<7;b++)d=Tm([2e3,1]).day(b),w=cp(this.weekdaysMin(d,"")),y=cp(this.weekdaysShort(d,"")),k=cp(this.weekdays(d,"")),i.push(w),o.push(y),l.push(k),f.push(w),f.push(y),f.push(k);i.sort(r),o.sort(r),l.sort(r),f.sort(r),this._weekdaysRegex=new RegExp("^("+f.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=new RegExp("^("+l.join("|")+")","i"),this._weekdaysShortStrictRegex=new RegExp("^("+o.join("|")+")","i"),this._weekdaysMinStrictRegex=new RegExp("^("+i.join("|")+")","i")}function Oie(){return this.hours()%12||12}function UTt(){return this.hours()||24}Ki("H",["HH",2],0,"hour"),Ki("h",["hh",2],0,Oie),Ki("k",["kk",2],0,UTt),Ki("hmm",0,0,function(){return""+Oie.apply(this)+Sm(this.minutes(),2)}),Ki("hmmss",0,0,function(){return""+Oie.apply(this)+Sm(this.minutes(),2)+Sm(this.seconds(),2)}),Ki("Hmm",0,0,function(){return""+this.hours()+Sm(this.minutes(),2)}),Ki("Hmmss",0,0,function(){return""+this.hours()+Sm(this.minutes(),2)+Sm(this.seconds(),2)});function v9e(r,i){Ki(r,0,0,function(){return this.localeData().meridiem(this.hours(),this.minutes(),i)})}v9e("a",!0),v9e("A",!1),nd("hour","h"),rd("hour",13);function w9e(r,i){return i._meridiemParse}ci("a",w9e),ci("A",w9e),ci("H",wu),ci("h",wu),ci("k",wu),ci("HH",wu,op),ci("hh",wu,op),ci("kk",wu,op),ci("hmm",o9e),ci("hmmss",c9e),ci("Hmm",o9e),ci("Hmmss",c9e),Mc(["H","HH"],of),Mc(["k","kk"],function(r,i,o){var l=Fa(r);i[of]=l===24?0:l}),Mc(["a","A"],function(r,i,o){o._isPm=o._locale.isPM(r),o._meridiem=r}),Mc(["h","hh"],function(r,i,o){i[of]=Fa(r),ba(o).bigHour=!0}),Mc("hmm",function(r,i,o){var l=r.length-2;i[of]=Fa(r.substr(0,l)),i[kv]=Fa(r.substr(l)),ba(o).bigHour=!0}),Mc("hmmss",function(r,i,o){var l=r.length-4,f=r.length-2;i[of]=Fa(r.substr(0,l)),i[kv]=Fa(r.substr(l,2)),i[O3]=Fa(r.substr(f)),ba(o).bigHour=!0}),Mc("Hmm",function(r,i,o){var l=r.length-2;i[of]=Fa(r.substr(0,l)),i[kv]=Fa(r.substr(l))}),Mc("Hmmss",function(r,i,o){var l=r.length-4,f=r.length-2;i[of]=Fa(r.substr(0,l)),i[kv]=Fa(r.substr(l,2)),i[O3]=Fa(r.substr(f))});function qTt(r){return(r+"").toLowerCase().charAt(0)==="p"}var YTt=/[ap]\.?m?\.?/i,WTt=TT("Hours",!0);function KTt(r,i,o){return r>11?o?"pm":"PM":o?"am":"AM"}var m9e={calendar:R_t,longDateFormat:z_t,invalidDate:V_t,ordinal:q_t,dayOfMonthOrdinalParse:Y_t,relativeTime:K_t,months:uTt,monthsShort:u9e,week:xTt,weekdays:MTt,weekdaysMin:DTt,weekdaysShort:b9e,meridiemParse:YTt},Nu={},PL={},BL;function XTt(r,i){var o,l=Math.min(r.length,i.length);for(o=0;o0;){if(f=wz(b.slice(0,o).join("-")),f)return f;if(l&&l.length>=o&&XTt(b,l)>=o-1)break;o--}i++}return BL}function ZTt(r){return r.match("^[^/\\\\]*$")!=null}function wz(r){var i=null,o;if(Nu[r]===void 0&&typeof module<"u"&&module&&module.exports&&ZTt(r))try{i=BL._abbr,o=require,o("./locale/"+r),r6(i)}catch{Nu[r]=null}return Nu[r]}function r6(r,i){var o;return r&&(V0(i)?o=P3(r):o=Nie(r,i),o?BL=o:typeof console<"u"&&console.warn&&console.warn("Locale "+r+" not found. Did you forget to load it?")),BL._abbr}function Nie(r,i){if(i!==null){var o,l=m9e;if(i.abbr=r,Nu[r]!=null)t9e("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),l=Nu[r]._config;else if(i.parentLocale!=null)if(Nu[i.parentLocale]!=null)l=Nu[i.parentLocale]._config;else if(o=wz(i.parentLocale),o!=null)l=o._config;else return PL[i.parentLocale]||(PL[i.parentLocale]=[]),PL[i.parentLocale].push({name:r,config:i}),null;return Nu[r]=new Eie(xie(l,i)),PL[r]&&PL[r].forEach(function(f){Nie(f.name,f.config)}),r6(r),Nu[r]}else return delete Nu[r],null}function JTt(r,i){if(i!=null){var o,l,f=m9e;Nu[r]!=null&&Nu[r].parentLocale!=null?Nu[r].set(xie(Nu[r]._config,i)):(l=wz(r),l!=null&&(f=l._config),i=xie(f,i),l==null&&(i.abbr=r),o=new Eie(i),o.parentLocale=Nu[r],Nu[r]=o),r6(r)}else Nu[r]!=null&&(Nu[r].parentLocale!=null?(Nu[r]=Nu[r].parentLocale,r===r6()&&r6(r)):Nu[r]!=null&&delete Nu[r]);return Nu[r]}function P3(r){var i;if(r&&r._locale&&r._locale._abbr&&(r=r._locale._abbr),!r)return BL;if(!mv(r)){if(i=wz(r),i)return i;r=[r]}return QTt(r)}function eCt(){return _ie(Nu)}function Pie(r){var i,o=r._a;return o&&ba(r).overflow===-2&&(i=o[I3]<0||o[I3]>11?I3:o[Am]<1||o[Am]>bz(o[id],o[I3])?Am:o[of]<0||o[of]>24||o[of]===24&&(o[kv]!==0||o[O3]!==0||o[D8]!==0)?of:o[kv]<0||o[kv]>59?kv:o[O3]<0||o[O3]>59?O3:o[D8]<0||o[D8]>999?D8:-1,ba(r)._overflowDayOfYear&&(iAm)&&(i=Am),ba(r)._overflowWeeks&&i===-1&&(i=aTt),ba(r)._overflowWeekday&&i===-1&&(i=oTt),ba(r).overflow=i),r}var tCt=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([+-]\d\d(?::?\d\d)?|\s*Z)?)?$/,nCt=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d|))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([+-]\d\d(?::?\d\d)?|\s*Z)?)?$/,rCt=/Z|[+-]\d\d(?::?\d\d)?/,mz=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/],["YYYYMM",/\d{6}/,!1],["YYYY",/\d{4}/,!1]],Bie=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],iCt=/^\/?Date\((-?\d+)/i,sCt=/^(?:(Mon|Tue|Wed|Thu|Fri|Sat|Sun),?\s)?(\d{1,2})\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s(\d{2,4})\s(\d\d):(\d\d)(?::(\d\d))?\s(?:(UT|GMT|[ECMP][SD]T)|([Zz])|([+-]\d{4}))$/,aCt={UT:0,GMT:0,EDT:-4*60,EST:-5*60,CDT:-5*60,CST:-6*60,MDT:-6*60,MST:-7*60,PDT:-7*60,PST:-8*60};function k9e(r){var i,o,l=r._i,f=tCt.exec(l)||nCt.exec(l),b,d,w,y,k=mz.length,E=Bie.length;if(f){for(ba(r).iso=!0,i=0,o=k;iIL(d)||r._dayOfYear===0)&&(ba(r)._overflowDayOfYear=!0),o=OL(d,0,r._dayOfYear),r._a[I3]=o.getUTCMonth(),r._a[Am]=o.getUTCDate()),i=0;i<3&&r._a[i]==null;++i)r._a[i]=l[i]=f[i];for(;i<7;i++)r._a[i]=l[i]=r._a[i]==null?i===2?1:0:r._a[i];r._a[of]===24&&r._a[kv]===0&&r._a[O3]===0&&r._a[D8]===0&&(r._nextDay=!0,r._a[of]=0),r._d=(r._useUTC?OL:yTt).apply(null,l),b=r._useUTC?r._d.getUTCDay():r._d.getDay(),r._tzm!=null&&r._d.setUTCMinutes(r._d.getUTCMinutes()-r._tzm),r._nextDay&&(r._a[of]=24),r._w&&typeof r._w.d<"u"&&r._w.d!==b&&(ba(r).weekdayMismatch=!0)}}function gCt(r){var i,o,l,f,b,d,w,y,k;i=r._w,i.GG!=null||i.W!=null||i.E!=null?(b=1,d=4,o=ST(i.GG,r._a[id],NL(mu(),1,4).year),l=ST(i.W,1),f=ST(i.E,1),(f<1||f>7)&&(y=!0)):(b=r._locale._week.dow,d=r._locale._week.doy,k=NL(mu(),b,d),o=ST(i.gg,r._a[id],k.year),l=ST(i.w,k.week),i.d!=null?(f=i.d,(f<0||f>6)&&(y=!0)):i.e!=null?(f=i.e+b,(i.e<0||i.e>6)&&(y=!0)):f=b),l<1||l>N3(o,b,d)?ba(r)._overflowWeeks=!0:y!=null?ba(r)._overflowWeekday=!0:(w=p9e(o,l,f,b,d),r._a[id]=w.year,r._dayOfYear=w.dayOfYear)}Ar.ISO_8601=function(){},Ar.RFC_2822=function(){};function Rie(r){if(r._f===Ar.ISO_8601){k9e(r);return}if(r._f===Ar.RFC_2822){x9e(r);return}r._a=[],ba(r).empty=!0;var i=""+r._i,o,l,f,b,d,w=i.length,y=0,k,E;for(f=n9e(r._f,r._locale).match(Tie)||[],E=f.length,o=0;o0&&ba(r).unusedInput.push(d),i=i.slice(i.indexOf(l)+l.length),y+=l.length),_T[b]?(l?ba(r).empty=!1:ba(r).unusedTokens.push(b),sTt(b,l,r)):r._strict&&!l&&ba(r).unusedTokens.push(b);ba(r).charsLeftOver=w-y,i.length>0&&ba(r).unusedInput.push(i),r._a[of]<=12&&ba(r).bigHour===!0&&r._a[of]>0&&(ba(r).bigHour=void 0),ba(r).parsedDateParts=r._a.slice(0),ba(r).meridiem=r._meridiem,r._a[of]=pCt(r._locale,r._a[of],r._meridiem),k=ba(r).era,k!==null&&(r._a[id]=r._locale.erasConvertYear(k,r._a[id])),Fie(r),Pie(r)}function pCt(r,i,o){var l;return o==null?i:r.meridiemHour!=null?r.meridiemHour(i,o):(r.isPM!=null&&(l=r.isPM(o),l&&i<12&&(i+=12),!l&&i===12&&(i=0)),i)}function bCt(r){var i,o,l,f,b,d,w=!1,y=r._f.length;if(y===0){ba(r).invalidFormat=!0,r._d=new Date(NaN);return}for(f=0;fthis?this:r:sz()});function T9e(r,i){var o,l;if(i.length===1&&mv(i[0])&&(i=i[0]),!i.length)return mu();for(o=i[0],l=1;lthis.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()}function FCt(){if(!V0(this._isDSTShifted))return this._isDSTShifted;var r={},i;return kie(r,this),r=E9e(r),r._a?(i=r._isUTC?Tm(r._a):mu(r._a),this._isDSTShifted=this.isValid()&&ACt(r._a,i.toArray())>0):this._isDSTShifted=!1,this._isDSTShifted}function RCt(){return this.isValid()?!this._isUTC:!1}function jCt(){return this.isValid()?this._isUTC:!1}function S9e(){return this.isValid()?this._isUTC&&this._offset===0:!1}var $Ct=/^(-|\+)?(?:(\d*)[. ])?(\d+):(\d+)(?::(\d+)(\.\d*)?)?$/,HCt=/^(-|\+)?P(?:([-+]?[0-9,.]*)Y)?(?:([-+]?[0-9,.]*)M)?(?:([-+]?[0-9,.]*)W)?(?:([-+]?[0-9,.]*)D)?(?:T(?:([-+]?[0-9,.]*)H)?(?:([-+]?[0-9,.]*)M)?(?:([-+]?[0-9,.]*)S)?)?$/;function xv(r,i){var o=r,l=null,f,b,d;return kz(r)?o={ms:r._milliseconds,d:r._days,M:r._months}:L3(r)||!isNaN(+r)?(o={},i?o[i]=+r:o.milliseconds=+r):(l=$Ct.exec(r))?(f=l[1]==="-"?-1:1,o={y:0,d:Fa(l[Am])*f,h:Fa(l[of])*f,m:Fa(l[kv])*f,s:Fa(l[O3])*f,ms:Fa(jie(l[D8]*1e3))*f}):(l=HCt.exec(r))?(f=l[1]==="-"?-1:1,o={y:L8(l[2],f),M:L8(l[3],f),w:L8(l[4],f),d:L8(l[5],f),h:L8(l[6],f),m:L8(l[7],f),s:L8(l[8],f)}):o==null?o={}:typeof o=="object"&&("from"in o||"to"in o)&&(d=zCt(mu(o.from),mu(o.to)),o={},o.ms=d.milliseconds,o.M=d.months),b=new yz(o),kz(r)&&Mo(r,"_locale")&&(b._locale=r._locale),kz(r)&&Mo(r,"_isValid")&&(b._isValid=r._isValid),b}xv.fn=yz.prototype,xv.invalid=SCt;function L8(r,i){var o=r&&parseFloat(r.replace(",","."));return(isNaN(o)?0:o)*i}function A9e(r,i){var o={};return o.months=i.month()-r.month()+(i.year()-r.year())*12,r.clone().add(o.months,"M").isAfter(i)&&--o.months,o.milliseconds=+i-+r.clone().add(o.months,"M"),o}function zCt(r,i){var o;return r.isValid()&&i.isValid()?(i=Hie(i,r),r.isBefore(i)?o=A9e(r,i):(o=A9e(i,r),o.milliseconds=-o.milliseconds,o.months=-o.months),o):{milliseconds:0,months:0}}function M9e(r,i){return function(o,l){var f,b;return l!==null&&!isNaN(+l)&&(t9e(i,"moment()."+i+"(period, number) is deprecated. Please use moment()."+i+"(number, period). See http://momentjs.com/guides/#/warnings/add-inverted-param/ for more info."),b=o,o=l,l=b),f=xv(o,l),D9e(this,f,r),this}}function D9e(r,i,o,l){var f=i._milliseconds,b=jie(i._days),d=jie(i._months);!r.isValid()||(l=l==null?!0:l,d&&h9e(r,uz(r,"Month")+d*o),b&&i9e(r,"Date",uz(r,"Date")+b*o),f&&r._d.setTime(r._d.valueOf()+f*o),l&&Ar.updateOffset(r,b||d))}var GCt=M9e(1,"add"),VCt=M9e(-1,"subtract");function L9e(r){return typeof r=="string"||r instanceof String}function UCt(r){return yv(r)||SL(r)||L9e(r)||L3(r)||YCt(r)||qCt(r)||r===null||r===void 0}function qCt(r){var i=M8(r)&&!vie(r),o=!1,l=["years","year","y","months","month","M","days","day","d","dates","date","D","hours","hour","h","minutes","minute","m","seconds","second","s","milliseconds","millisecond","ms"],f,b,d=l.length;for(f=0;fo.valueOf():o.valueOf()9999?oz(o,i?"YYYYYY-MM-DD[T]HH:mm:ss.SSS[Z]":"YYYYYY-MM-DD[T]HH:mm:ss.SSSZ"):Cm(Date.prototype.toISOString)?i?this.toDate().toISOString():new Date(this.valueOf()+this.utcOffset()*60*1e3).toISOString().replace("Z",oz(o,"Z")):oz(o,i?"YYYY-MM-DD[T]HH:mm:ss.SSS[Z]":"YYYY-MM-DD[T]HH:mm:ss.SSSZ")}function oSt(){if(!this.isValid())return"moment.invalid(/* "+this._i+" */)";var r="moment",i="",o,l,f,b;return this.isLocal()||(r=this.utcOffset()===0?"moment.utc":"moment.parseZone",i="Z"),o="["+r+'("]',l=0<=this.year()&&this.year()<=9999?"YYYY":"YYYYYY",f="-MM-DD[T]HH:mm:ss.SSS",b=i+'[")]',this.format(o+l+f+b)}function cSt(r){r||(r=this.isUtc()?Ar.defaultFormatUtc:Ar.defaultFormat);var i=oz(this,r);return this.localeData().postformat(i)}function uSt(r,i){return this.isValid()&&(yv(r)&&r.isValid()||mu(r).isValid())?xv({to:this,from:r}).locale(this.locale()).humanize(!i):this.localeData().invalidDate()}function lSt(r){return this.from(mu(),r)}function hSt(r,i){return this.isValid()&&(yv(r)&&r.isValid()||mu(r).isValid())?xv({from:this,to:r}).locale(this.locale()).humanize(!i):this.localeData().invalidDate()}function fSt(r){return this.to(mu(),r)}function I9e(r){var i;return r===void 0?this._locale._abbr:(i=P3(r),i!=null&&(this._locale=i),this)}var O9e=C2("moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.",function(r){return r===void 0?this.localeData():this.locale(r)});function N9e(){return this._locale}var Ez=1e3,AT=60*Ez,_z=60*AT,P9e=(365*400+97)*24*_z;function MT(r,i){return(r%i+i)%i}function B9e(r,i,o){return r<100&&r>=0?new Date(r+400,i,o)-P9e:new Date(r,i,o).valueOf()}function F9e(r,i,o){return r<100&&r>=0?Date.UTC(r+400,i,o)-P9e:Date.UTC(r,i,o)}function dSt(r){var i,o;if(r=S2(r),r===void 0||r==="millisecond"||!this.isValid())return this;switch(o=this._isUTC?F9e:B9e,r){case"year":i=o(this.year(),0,1);break;case"quarter":i=o(this.year(),this.month()-this.month()%3,1);break;case"month":i=o(this.year(),this.month(),1);break;case"week":i=o(this.year(),this.month(),this.date()-this.weekday());break;case"isoWeek":i=o(this.year(),this.month(),this.date()-(this.isoWeekday()-1));break;case"day":case"date":i=o(this.year(),this.month(),this.date());break;case"hour":i=this._d.valueOf(),i-=MT(i+(this._isUTC?0:this.utcOffset()*AT),_z);break;case"minute":i=this._d.valueOf(),i-=MT(i,AT);break;case"second":i=this._d.valueOf(),i-=MT(i,Ez);break}return this._d.setTime(i),Ar.updateOffset(this,!0),this}function gSt(r){var i,o;if(r=S2(r),r===void 0||r==="millisecond"||!this.isValid())return this;switch(o=this._isUTC?F9e:B9e,r){case"year":i=o(this.year()+1,0,1)-1;break;case"quarter":i=o(this.year(),this.month()-this.month()%3+3,1)-1;break;case"month":i=o(this.year(),this.month()+1,1)-1;break;case"week":i=o(this.year(),this.month(),this.date()-this.weekday()+7)-1;break;case"isoWeek":i=o(this.year(),this.month(),this.date()-(this.isoWeekday()-1)+7)-1;break;case"day":case"date":i=o(this.year(),this.month(),this.date()+1)-1;break;case"hour":i=this._d.valueOf(),i+=_z-MT(i+(this._isUTC?0:this.utcOffset()*AT),_z)-1;break;case"minute":i=this._d.valueOf(),i+=AT-MT(i,AT)-1;break;case"second":i=this._d.valueOf(),i+=Ez-MT(i,Ez)-1;break}return this._d.setTime(i),Ar.updateOffset(this,!0),this}function pSt(){return this._d.valueOf()-(this._offset||0)*6e4}function bSt(){return Math.floor(this.valueOf()/1e3)}function vSt(){return new Date(this.valueOf())}function wSt(){var r=this;return[r.year(),r.month(),r.date(),r.hour(),r.minute(),r.second(),r.millisecond()]}function mSt(){var r=this;return{years:r.year(),months:r.month(),date:r.date(),hours:r.hours(),minutes:r.minutes(),seconds:r.seconds(),milliseconds:r.milliseconds()}}function ySt(){return this.isValid()?this.toISOString():null}function kSt(){return mie(this)}function xSt(){return n6({},ba(this))}function ESt(){return ba(this).overflow}function _St(){return{input:this._i,format:this._f,locale:this._locale,isUTC:this._isUTC,strict:this._strict}}Ki("N",0,0,"eraAbbr"),Ki("NN",0,0,"eraAbbr"),Ki("NNN",0,0,"eraAbbr"),Ki("NNNN",0,0,"eraName"),Ki("NNNNN",0,0,"eraNarrow"),Ki("y",["y",1],"yo","eraYear"),Ki("y",["yy",2],0,"eraYear"),Ki("y",["yyy",3],0,"eraYear"),Ki("y",["yyyy",4],0,"eraYear"),ci("N",Gie),ci("NN",Gie),ci("NNN",Gie),ci("NNNN",PSt),ci("NNNNN",BSt),Mc(["N","NN","NNN","NNNN","NNNNN"],function(r,i,o,l){var f=o._locale.erasParse(r,l,o._strict);f?ba(o).era=f:ba(o).invalidEra=r}),ci("y",CT),ci("yy",CT),ci("yyy",CT),ci("yyyy",CT),ci("yo",FSt),Mc(["y","yy","yyy","yyyy"],id),Mc(["yo"],function(r,i,o,l){var f;o._locale._eraYearOrdinalRegex&&(f=r.match(o._locale._eraYearOrdinalRegex)),o._locale.eraYearOrdinalParse?i[id]=o._locale.eraYearOrdinalParse(r,f):i[id]=parseInt(r,10)});function TSt(r,i){var o,l,f,b=this._eras||P3("en")._eras;for(o=0,l=b.length;o=0)return b[l]}function SSt(r,i){var o=r.since<=r.until?1:-1;return i===void 0?Ar(r.since).year():Ar(r.since).year()+(i-r.offset)*o}function ASt(){var r,i,o,l=this.localeData().eras();for(r=0,i=l.length;rb&&(i=b),VSt.call(this,r,i,o,l,f))}function VSt(r,i,o,l,f){var b=p9e(r,i,o,l,f),d=OL(b.year,0,b.dayOfYear);return this.year(d.getUTCFullYear()),this.month(d.getUTCMonth()),this.date(d.getUTCDate()),this}Ki("Q",0,"Qo","quarter"),nd("quarter","Q"),rd("quarter",7),ci("Q",s9e),Mc("Q",function(r,i){i[I3]=(Fa(r)-1)*3});function USt(r){return r==null?Math.ceil((this.month()+1)/3):this.month((r-1)*3+this.month()%3)}Ki("D",["DD",2],"Do","date"),nd("date","D"),rd("date",9),ci("D",wu),ci("DD",wu,op),ci("Do",function(r,i){return r?i._dayOfMonthOrdinalParse||i._ordinalParse:i._dayOfMonthOrdinalParseLenient}),Mc(["D","DD"],Am),Mc("Do",function(r,i){i[Am]=Fa(r.match(wu)[0])});var j9e=TT("Date",!0);Ki("DDD",["DDDD",3],"DDDo","dayOfYear"),nd("dayOfYear","DDD"),rd("dayOfYear",4),ci("DDD",hz),ci("DDDD",a9e),Mc(["DDD","DDDD"],function(r,i,o){o._dayOfYear=Fa(r)});function qSt(r){var i=Math.round((this.clone().startOf("day")-this.clone().startOf("year"))/864e5)+1;return r==null?i:this.add(r-i,"d")}Ki("m",["mm",2],0,"minute"),nd("minute","m"),rd("minute",14),ci("m",wu),ci("mm",wu,op),Mc(["m","mm"],kv);var YSt=TT("Minutes",!1);Ki("s",["ss",2],0,"second"),nd("second","s"),rd("second",15),ci("s",wu),ci("ss",wu,op),Mc(["s","ss"],O3);var WSt=TT("Seconds",!1);Ki("S",0,0,function(){return~~(this.millisecond()/100)}),Ki(0,["SS",2],0,function(){return~~(this.millisecond()/10)}),Ki(0,["SSS",3],0,"millisecond"),Ki(0,["SSSS",4],0,function(){return this.millisecond()*10}),Ki(0,["SSSSS",5],0,function(){return this.millisecond()*100}),Ki(0,["SSSSSS",6],0,function(){return this.millisecond()*1e3}),Ki(0,["SSSSSSS",7],0,function(){return this.millisecond()*1e4}),Ki(0,["SSSSSSSS",8],0,function(){return this.millisecond()*1e5}),Ki(0,["SSSSSSSSS",9],0,function(){return this.millisecond()*1e6}),nd("millisecond","ms"),rd("millisecond",16),ci("S",hz,s9e),ci("SS",hz,op),ci("SSS",hz,a9e);var i6,$9e;for(i6="SSSS";i6.length<=9;i6+="S")ci(i6,CT);function KSt(r,i){i[D8]=Fa(("0."+r)*1e3)}for(i6="S";i6.length<=9;i6+="S")Mc(i6,KSt);$9e=TT("Milliseconds",!1),Ki("z",0,0,"zoneAbbr"),Ki("zz",0,0,"zoneName");function XSt(){return this._isUTC?"UTC":""}function QSt(){return this._isUTC?"Coordinated Universal Time":""}var xr=AL.prototype;xr.add=GCt,xr.calendar=XCt,xr.clone=QCt,xr.diff=iSt,xr.endOf=gSt,xr.format=cSt,xr.from=uSt,xr.fromNow=lSt,xr.to=hSt,xr.toNow=fSt,xr.get=J_t,xr.invalidAt=ESt,xr.isAfter=ZCt,xr.isBefore=JCt,xr.isBetween=eSt,xr.isSame=tSt,xr.isSameOrAfter=nSt,xr.isSameOrBefore=rSt,xr.isValid=kSt,xr.lang=O9e,xr.locale=I9e,xr.localeData=N9e,xr.max=kCt,xr.min=yCt,xr.parsingFlags=xSt,xr.set=eTt,xr.startOf=dSt,xr.subtract=VCt,xr.toArray=wSt,xr.toObject=mSt,xr.toDate=vSt,xr.toISOString=aSt,xr.inspect=oSt,typeof Symbol<"u"&&Symbol.for!=null&&(xr[Symbol.for("nodejs.util.inspect.custom")]=function(){return"Moment<"+this.format()+">"}),xr.toJSON=ySt,xr.toString=sSt,xr.unix=bSt,xr.valueOf=pSt,xr.creationData=_St,xr.eraName=ASt,xr.eraNarrow=MSt,xr.eraAbbr=DSt,xr.eraYear=LSt,xr.year=g9e,xr.isLeapYear=mTt,xr.weekYear=RSt,xr.isoWeekYear=jSt,xr.quarter=xr.quarters=USt,xr.month=f9e,xr.daysInMonth=bTt,xr.week=xr.weeks=TTt,xr.isoWeek=xr.isoWeeks=CTt,xr.weeksInYear=zSt,xr.weeksInWeekYear=GSt,xr.isoWeeksInYear=$St,xr.isoWeeksInISOWeekYear=HSt,xr.date=j9e,xr.day=xr.days=jTt,xr.weekday=$Tt,xr.isoWeekday=HTt,xr.dayOfYear=qSt,xr.hour=xr.hours=WTt,xr.minute=xr.minutes=YSt,xr.second=xr.seconds=WSt,xr.millisecond=xr.milliseconds=$9e,xr.utcOffset=DCt,xr.utc=ICt,xr.local=OCt,xr.parseZone=NCt,xr.hasAlignedHourOffset=PCt,xr.isDST=BCt,xr.isLocal=RCt,xr.isUtcOffset=jCt,xr.isUtc=S9e,xr.isUTC=S9e,xr.zoneAbbr=XSt,xr.zoneName=QSt,xr.dates=C2("dates accessor is deprecated. Use date instead.",j9e),xr.months=C2("months accessor is deprecated. Use month instead",f9e),xr.years=C2("years accessor is deprecated. Use year instead",g9e),xr.zone=C2("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",LCt),xr.isDSTShifted=C2("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",FCt);function ZSt(r){return mu(r*1e3)}function JSt(){return mu.apply(null,arguments).parseZone()}function H9e(r){return r}var Do=Eie.prototype;Do.calendar=j_t,Do.longDateFormat=G_t,Do.invalidDate=U_t,Do.ordinal=W_t,Do.preparse=H9e,Do.postformat=H9e,Do.relativeTime=X_t,Do.pastFuture=Q_t,Do.set=F_t,Do.eras=TSt,Do.erasParse=CSt,Do.erasConvertYear=SSt,Do.erasAbbrRegex=OSt,Do.erasNameRegex=ISt,Do.erasNarrowRegex=NSt,Do.months=fTt,Do.monthsShort=dTt,Do.monthsParse=pTt,Do.monthsRegex=wTt,Do.monthsShortRegex=vTt,Do.week=kTt,Do.firstDayOfYear=_Tt,Do.firstDayOfWeek=ETt,Do.weekdays=NTt,Do.weekdaysMin=BTt,Do.weekdaysShort=PTt,Do.weekdaysParse=RTt,Do.weekdaysRegex=zTt,Do.weekdaysShortRegex=GTt,Do.weekdaysMinRegex=VTt,Do.isPM=qTt,Do.meridiem=KTt;function Cz(r,i,o,l){var f=P3(),b=Tm().set(l,i);return f[o](b,r)}function z9e(r,i,o){if(L3(r)&&(i=r,r=void 0),r=r||"",i!=null)return Cz(r,i,o,"month");var l,f=[];for(l=0;l<12;l++)f[l]=Cz(r,l,o,"month");return f}function Uie(r,i,o,l){typeof r=="boolean"?(L3(i)&&(o=i,i=void 0),i=i||""):(i=r,o=i,r=!1,L3(i)&&(o=i,i=void 0),i=i||"");var f=P3(),b=r?f._week.dow:0,d,w=[];if(o!=null)return Cz(i,(o+b)%7,l,"day");for(d=0;d<7;d++)w[d]=Cz(i,(d+b)%7,l,"day");return w}function eAt(r,i){return z9e(r,i,"months")}function tAt(r,i){return z9e(r,i,"monthsShort")}function nAt(r,i,o){return Uie(r,i,o,"weekdays")}function rAt(r,i,o){return Uie(r,i,o,"weekdaysShort")}function iAt(r,i,o){return Uie(r,i,o,"weekdaysMin")}r6("en",{eras:[{since:"0001-01-01",until:1/0,offset:1,name:"Anno Domini",narrow:"AD",abbr:"AD"},{since:"0000-12-31",until:-1/0,offset:1,name:"Before Christ",narrow:"BC",abbr:"BC"}],dayOfMonthOrdinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(r){var i=r%10,o=Fa(r%100/10)===1?"th":i===1?"st":i===2?"nd":i===3?"rd":"th";return r+o}}),Ar.lang=C2("moment.lang is deprecated. Use moment.locale instead.",r6),Ar.langData=C2("moment.langData is deprecated. Use moment.localeData instead.",P3);var B3=Math.abs;function sAt(){var r=this._data;return this._milliseconds=B3(this._milliseconds),this._days=B3(this._days),this._months=B3(this._months),r.milliseconds=B3(r.milliseconds),r.seconds=B3(r.seconds),r.minutes=B3(r.minutes),r.hours=B3(r.hours),r.months=B3(r.months),r.years=B3(r.years),this}function G9e(r,i,o,l){var f=xv(i,o);return r._milliseconds+=l*f._milliseconds,r._days+=l*f._days,r._months+=l*f._months,r._bubble()}function aAt(r,i){return G9e(this,r,i,1)}function oAt(r,i){return G9e(this,r,i,-1)}function V9e(r){return r<0?Math.floor(r):Math.ceil(r)}function cAt(){var r=this._milliseconds,i=this._days,o=this._months,l=this._data,f,b,d,w,y;return r>=0&&i>=0&&o>=0||r<=0&&i<=0&&o<=0||(r+=V9e(qie(o)+i)*864e5,i=0,o=0),l.milliseconds=r%1e3,f=A2(r/1e3),l.seconds=f%60,b=A2(f/60),l.minutes=b%60,d=A2(b/60),l.hours=d%24,i+=A2(d/24),y=A2(U9e(i)),o+=y,i-=V9e(qie(y)),w=A2(o/12),o%=12,l.days=i,l.months=o,l.years=w,this}function U9e(r){return r*4800/146097}function qie(r){return r*146097/4800}function uAt(r){if(!this.isValid())return NaN;var i,o,l=this._milliseconds;if(r=S2(r),r==="month"||r==="quarter"||r==="year")switch(i=this._days+l/864e5,o=this._months+U9e(i),r){case"month":return o;case"quarter":return o/3;case"year":return o/12}else switch(i=this._days+Math.round(qie(this._months)),r){case"week":return i/7+l/6048e5;case"day":return i+l/864e5;case"hour":return i*24+l/36e5;case"minute":return i*1440+l/6e4;case"second":return i*86400+l/1e3;case"millisecond":return Math.floor(i*864e5)+l;default:throw new Error("Unknown unit "+r)}}function lAt(){return this.isValid()?this._milliseconds+this._days*864e5+this._months%12*2592e6+Fa(this._months/12)*31536e6:NaN}function F3(r){return function(){return this.as(r)}}var hAt=F3("ms"),fAt=F3("s"),dAt=F3("m"),gAt=F3("h"),pAt=F3("d"),bAt=F3("w"),vAt=F3("M"),wAt=F3("Q"),mAt=F3("y");function yAt(){return xv(this)}function kAt(r){return r=S2(r),this.isValid()?this[r+"s"]():NaN}function I8(r){return function(){return this.isValid()?this._data[r]:NaN}}var xAt=I8("milliseconds"),EAt=I8("seconds"),_At=I8("minutes"),TAt=I8("hours"),CAt=I8("days"),SAt=I8("months"),AAt=I8("years");function MAt(){return A2(this.days()/7)}var R3=Math.round,DT={ss:44,s:45,m:45,h:22,d:26,w:null,M:11};function DAt(r,i,o,l,f){return f.relativeTime(i||1,!!o,r,l)}function LAt(r,i,o,l){var f=xv(r).abs(),b=R3(f.as("s")),d=R3(f.as("m")),w=R3(f.as("h")),y=R3(f.as("d")),k=R3(f.as("M")),E=R3(f.as("w")),T=R3(f.as("y")),C=b<=o.ss&&["s",b]||b0,C[4]=l,DAt.apply(null,C)}function IAt(r){return r===void 0?R3:typeof r=="function"?(R3=r,!0):!1}function OAt(r,i){return DT[r]===void 0?!1:i===void 0?DT[r]:(DT[r]=i,r==="s"&&(DT.ss=i-1),!0)}function NAt(r,i){if(!this.isValid())return this.localeData().invalidDate();var o=!1,l=DT,f,b;return typeof r=="object"&&(i=r,r=!1),typeof r=="boolean"&&(o=r),typeof i=="object"&&(l=Object.assign({},DT,i),i.s!=null&&i.ss==null&&(l.ss=i.s-1)),f=this.localeData(),b=LAt(this,!o,l,f),o&&(b=f.pastFuture(+this,b)),f.postformat(b)}var Yie=Math.abs;function LT(r){return(r>0)-(r<0)||+r}function Sz(){if(!this.isValid())return this.localeData().invalidDate();var r=Yie(this._milliseconds)/1e3,i=Yie(this._days),o=Yie(this._months),l,f,b,d,w=this.asSeconds(),y,k,E,T;return w?(l=A2(r/60),f=A2(l/60),r%=60,l%=60,b=A2(o/12),o%=12,d=r?r.toFixed(3).replace(/\.?0+$/,""):"",y=w<0?"-":"",k=LT(this._months)!==LT(w)?"-":"",E=LT(this._days)!==LT(w)?"-":"",T=LT(this._milliseconds)!==LT(w)?"-":"",y+"P"+(b?k+b+"Y":"")+(o?k+o+"M":"")+(i?E+i+"D":"")+(f||l||r?"T":"")+(f?T+f+"H":"")+(l?T+l+"M":"")+(r?T+d+"S":"")):"P0D"}var vo=yz.prototype;vo.isValid=CCt,vo.abs=sAt,vo.add=aAt,vo.subtract=oAt,vo.as=uAt,vo.asMilliseconds=hAt,vo.asSeconds=fAt,vo.asMinutes=dAt,vo.asHours=gAt,vo.asDays=pAt,vo.asWeeks=bAt,vo.asMonths=vAt,vo.asQuarters=wAt,vo.asYears=mAt,vo.valueOf=lAt,vo._bubble=cAt,vo.clone=yAt,vo.get=kAt,vo.milliseconds=xAt,vo.seconds=EAt,vo.minutes=_At,vo.hours=TAt,vo.days=CAt,vo.weeks=MAt,vo.months=SAt,vo.years=AAt,vo.humanize=NAt,vo.toISOString=Sz,vo.toString=Sz,vo.toJSON=Sz,vo.locale=I9e,vo.localeData=N9e,vo.toIsoString=C2("toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)",Sz),vo.lang=O9e,Ki("X",0,0,"unix"),Ki("x",0,0,"valueOf"),ci("x",dz),ci("X",nTt),Mc("X",function(r,i,o){o._d=new Date(parseFloat(r)*1e3)}),Mc("x",function(r,i,o){o._d=new Date(Fa(r))});//! moment.js -Ar.version="2.29.4",P_t(mu),Ar.fn=xr,Ar.min=xCt,Ar.max=ECt,Ar.now=_Ct,Ar.utc=Tm,Ar.unix=ZSt,Ar.months=eAt,Ar.isDate=SL,Ar.locale=r6,Ar.invalid=sz,Ar.duration=xv,Ar.isMoment=yv,Ar.weekdays=nAt,Ar.parseZone=JSt,Ar.localeData=P3,Ar.isDuration=kz,Ar.monthsShort=tAt,Ar.weekdaysMin=iAt,Ar.defineLocale=Nie,Ar.updateLocale=JTt,Ar.locales=eCt,Ar.weekdaysShort=rAt,Ar.normalizeUnits=S2,Ar.relativeTimeRounding=IAt,Ar.relativeTimeThreshold=OAt,Ar.calendarFormat=KCt,Ar.prototype=xr,Ar.HTML5_FMT={DATETIME_LOCAL:"YYYY-MM-DDTHH:mm",DATETIME_LOCAL_SECONDS:"YYYY-MM-DDTHH:mm:ss",DATETIME_LOCAL_MS:"YYYY-MM-DDTHH:mm:ss.SSS",DATE:"YYYY-MM-DD",TIME:"HH:mm",TIME_SECONDS:"HH:mm:ss",TIME_MS:"HH:mm:ss.SSS",WEEK:"GGGG-[W]WW",MONTH:"YYYY-MM"};const j3={trace:0,debug:1,info:2,warn:3,error:4,fatal:5},Se={trace:(...r)=>{},debug:(...r)=>{},info:(...r)=>{},warn:(...r)=>{},error:(...r)=>{},fatal:(...r)=>{}},Wie=function(r="fatal"){let i=j3.fatal;typeof r=="string"?(r=r.toLowerCase(),r in j3&&(i=j3[r])):typeof r=="number"&&(i=r),Se.trace=()=>{},Se.debug=()=>{},Se.info=()=>{},Se.warn=()=>{},Se.error=()=>{},Se.fatal=()=>{},i<=j3.fatal&&(Se.fatal=console.error?console.error.bind(console,M2("FATAL"),"color: orange"):console.log.bind(console,"\x1B[35m",M2("FATAL"))),i<=j3.error&&(Se.error=console.error?console.error.bind(console,M2("ERROR"),"color: orange"):console.log.bind(console,"\x1B[31m",M2("ERROR"))),i<=j3.warn&&(Se.warn=console.warn?console.warn.bind(console,M2("WARN"),"color: orange"):console.log.bind(console,"\x1B[33m",M2("WARN"))),i<=j3.info&&(Se.info=console.info?console.info.bind(console,M2("INFO"),"color: lightblue"):console.log.bind(console,"\x1B[34m",M2("INFO"))),i<=j3.debug&&(Se.debug=console.debug?console.debug.bind(console,M2("DEBUG"),"color: lightgreen"):console.log.bind(console,"\x1B[32m",M2("DEBUG"))),i<=j3.trace&&(Se.trace=console.debug?console.debug.bind(console,M2("TRACE"),"color: lightgreen"):console.log.bind(console,"\x1B[32m",M2("TRACE")))},M2=r=>`%c${Ar().format("ss.SSS")} : ${r} : `;var Mm=typeof globalThis<"u"?globalThis:typeof window<"u"?window:typeof global<"u"?global:typeof self<"u"?self:{};function q9e(r){return r&&r.__esModule&&Object.prototype.hasOwnProperty.call(r,"default")?r.default:r}var Kie={};Object.defineProperty(Kie,"__esModule",{value:!0});var $3=Kie.sanitizeUrl=void 0,PAt=/^([^\w]*)(javascript|data|vbscript)/im,BAt=/&#(\w+)(^\w|;)?/g,FAt=/[\u0000-\u001F\u007F-\u009F\u2000-\u200D\uFEFF]/gim,RAt=/^([^:]+):/gm,jAt=[".","/"];function $At(r){return jAt.indexOf(r[0])>-1}function HAt(r){return r.replace(BAt,function(i,o){return String.fromCharCode(o)})}function zAt(r){var i=HAt(r||"").replace(FAt,"").trim();if(!i)return"about:blank";if($At(i))return i;var o=i.match(RAt);if(!o)return i;var l=o[0];return PAt.test(l)?"about:blank":i}$3=Kie.sanitizeUrl=zAt;function Az(r,i){return r==null||i==null?NaN:ri?1:r>=i?0:NaN}function GAt(r,i){return r==null||i==null?NaN:ir?1:i>=r?0:NaN}function Xie(r){let i,o,l;r.length!==2?(i=Az,o=(w,y)=>Az(r(w),y),l=(w,y)=>r(w)-y):(i=r===Az||r===GAt?r:VAt,o=r,l=r);function f(w,y,k=0,E=w.length){if(k>>1;o(w[T],y)<0?k=T+1:E=T}while(k>>1;o(w[T],y)<=0?k=T+1:E=T}while(kk&&l(w[T-1],y)>-l(w[T],y)?T-1:T}return{left:f,center:d,right:b}}function VAt(){return 0}function UAt(r){return r===null?NaN:+r}const qAt=Xie(Az).right;Xie(UAt).center;const YAt=qAt;class Y9e extends Map{constructor(i,o=XAt){if(super(),Object.defineProperties(this,{_intern:{value:new Map},_key:{value:o}}),i!=null)for(const[l,f]of i)this.set(l,f)}get(i){return super.get(W9e(this,i))}has(i){return super.has(W9e(this,i))}set(i,o){return super.set(WAt(this,i),o)}delete(i){return super.delete(KAt(this,i))}}function W9e({_intern:r,_key:i},o){const l=i(o);return r.has(l)?r.get(l):o}function WAt({_intern:r,_key:i},o){const l=i(o);return r.has(l)?r.get(l):(r.set(l,o),o)}function KAt({_intern:r,_key:i},o){const l=i(o);return r.has(l)&&(o=r.get(l),r.delete(l)),o}function XAt(r){return r!==null&&typeof r=="object"?r.valueOf():r}var Qie=Math.sqrt(50),Zie=Math.sqrt(10),Jie=Math.sqrt(2);function QAt(r,i,o){var l,f=-1,b,d,w;if(i=+i,r=+r,o=+o,r===i&&o>0)return[r];if((l=i0){let y=Math.round(r/w),k=Math.round(i/w);for(y*wi&&--k,d=new Array(b=k-y+1);++fi&&--k,d=new Array(b=k-y+1);++f=0?(b>=Qie?10:b>=Zie?5:b>=Jie?2:1)*Math.pow(10,f):-Math.pow(10,-f)/(b>=Qie?10:b>=Zie?5:b>=Jie?2:1)}function ese(r,i,o){var l=Math.abs(i-r)/Math.max(0,o),f=Math.pow(10,Math.floor(Math.log(l)/Math.LN10)),b=l/f;return b>=Qie?f*=10:b>=Zie?f*=5:b>=Jie&&(f*=2),i=l)&&(o=l);else{let l=-1;for(let f of r)(f=i(f,++l,r))!=null&&(o=f)&&(o=f)}return o}function JAt(r,i){let o;if(i===void 0)for(const l of r)l!=null&&(o>l||o===void 0&&l>=l)&&(o=l);else{let l=-1;for(let f of r)(f=i(f,++l,r))!=null&&(o>f||o===void 0&&f>=f)&&(o=f)}return o}function eMt(r){return r}var Mz=1,tse=2,nse=3,Dz=4,X9e=1e-6;function tMt(r){return"translate("+r+",0)"}function nMt(r){return"translate(0,"+r+")"}function rMt(r){return i=>+r(i)}function iMt(r,i){return i=Math.max(0,r.bandwidth()-i*2)/2,r.round()&&(i=Math.round(i)),o=>+r(o)+i}function sMt(){return!this.__axis}function Q9e(r,i){var o=[],l=null,f=null,b=6,d=6,w=3,y=typeof window<"u"&&window.devicePixelRatio>1?0:.5,k=r===Mz||r===Dz?-1:1,E=r===Dz||r===tse?"x":"y",T=r===Mz||r===nse?tMt:nMt;function C(S){var L=l==null?i.ticks?i.ticks.apply(i,o):i.domain():l,O=f==null?i.tickFormat?i.tickFormat.apply(i,o):eMt:f,B=Math.max(b,0)+w,N=i.range(),F=+N[0]+y,R=+N[N.length-1]+y,q=(i.bandwidth?iMt:rMt)(i.copy(),y),X=S.selection?S.selection():S,te=X.selectAll(".domain").data([null]),H=X.selectAll(".tick").data(L,i).order(),Y=H.exit(),z=H.enter().append("g").attr("class","tick"),W=H.select("line"),Z=H.select("text");te=te.merge(te.enter().insert("path",".tick").attr("class","domain").attr("stroke","currentColor")),H=H.merge(z),W=W.merge(z.append("line").attr("stroke","currentColor").attr(E+"2",k*b)),Z=Z.merge(z.append("text").attr("fill","currentColor").attr(E,k*B).attr("dy",r===Mz?"0em":r===nse?"0.71em":"0.32em")),S!==X&&(te=te.transition(S),H=H.transition(S),W=W.transition(S),Z=Z.transition(S),Y=Y.transition(S).attr("opacity",X9e).attr("transform",function(G){return isFinite(G=q(G))?T(G+y):this.getAttribute("transform")}),z.attr("opacity",X9e).attr("transform",function(G){var ae=this.parentNode.__axis;return T((ae&&isFinite(ae=ae(G))?ae:q(G))+y)})),Y.remove(),te.attr("d",r===Dz||r===tse?d?"M"+k*d+","+F+"H"+y+"V"+R+"H"+k*d:"M"+y+","+F+"V"+R:d?"M"+F+","+k*d+"V"+y+"H"+R+"V"+k*d:"M"+F+","+y+"H"+R),H.attr("opacity",1).attr("transform",function(G){return T(q(G)+y)}),W.attr(E+"2",k*b),Z.attr(E,k*B).text(O),X.filter(sMt).attr("fill","none").attr("font-size",10).attr("font-family","sans-serif").attr("text-anchor",r===tse?"start":r===Dz?"end":"middle"),X.each(function(){this.__axis=q})}return C.scale=function(S){return arguments.length?(i=S,C):i},C.ticks=function(){return o=Array.from(arguments),C},C.tickArguments=function(S){return arguments.length?(o=S==null?[]:Array.from(S),C):o.slice()},C.tickValues=function(S){return arguments.length?(l=S==null?null:Array.from(S),C):l&&l.slice()},C.tickFormat=function(S){return arguments.length?(f=S,C):f},C.tickSize=function(S){return arguments.length?(b=d=+S,C):b},C.tickSizeInner=function(S){return arguments.length?(b=+S,C):b},C.tickSizeOuter=function(S){return arguments.length?(d=+S,C):d},C.tickPadding=function(S){return arguments.length?(w=+S,C):w},C.offset=function(S){return arguments.length?(y=+S,C):y},C}function aMt(r){return Q9e(Mz,r)}function oMt(r){return Q9e(nse,r)}var cMt={value:()=>{}};function Z9e(){for(var r=0,i=arguments.length,o={},l;r=0&&(l=o.slice(f+1),o=o.slice(0,f)),o&&!i.hasOwnProperty(o))throw new Error("unknown type: "+o);return{type:o,name:l}})}Lz.prototype=Z9e.prototype={constructor:Lz,on:function(r,i){var o=this._,l=uMt(r+"",o),f,b=-1,d=l.length;if(arguments.length<2){for(;++b0)for(var o=new Array(f),l=0,f,b;l=0&&(i=r.slice(0,o))!=="xmlns"&&(r=r.slice(o+1)),e_e.hasOwnProperty(i)?{space:e_e[i],local:r}:r}function hMt(r){return function(){var i=this.ownerDocument,o=this.namespaceURI;return o===rse&&i.documentElement.namespaceURI===rse?i.createElement(r):i.createElementNS(o,r)}}function fMt(r){return function(){return this.ownerDocument.createElementNS(r.space,r.local)}}function t_e(r){var i=Iz(r);return(i.local?fMt:hMt)(i)}function dMt(){}function ise(r){return r==null?dMt:function(){return this.querySelector(r)}}function gMt(r){typeof r!="function"&&(r=ise(r));for(var i=this._groups,o=i.length,l=new Array(o),f=0;f=R&&(R=F+1);!(X=B[R])&&++R=0;)(d=l[f])&&(b&&d.compareDocumentPosition(b)^4&&b.parentNode.insertBefore(d,b),b=d);return this}function RMt(r){r||(r=jMt);function i(T,C){return T&&C?r(T.__data__,C.__data__):!T-!C}for(var o=this._groups,l=o.length,f=new Array(l),b=0;bi?1:r>=i?0:NaN}function $Mt(){var r=arguments[0];return arguments[0]=this,r.apply(null,arguments),this}function HMt(){return Array.from(this)}function zMt(){for(var r=this._groups,i=0,o=r.length;i1?this.each((i==null?JMt:typeof i=="function"?tDt:eDt)(r,i,o==null?"":o)):IT(this.node(),r)}function IT(r,i){return r.style.getPropertyValue(i)||o_e(r).getComputedStyle(r,null).getPropertyValue(i)}function rDt(r){return function(){delete this[r]}}function iDt(r,i){return function(){this[r]=i}}function sDt(r,i){return function(){var o=i.apply(this,arguments);o==null?delete this[r]:this[r]=o}}function aDt(r,i){return arguments.length>1?this.each((i==null?rDt:typeof i=="function"?sDt:iDt)(r,i)):this.node()[r]}function c_e(r){return r.trim().split(/^|\s+/)}function sse(r){return r.classList||new u_e(r)}function u_e(r){this._node=r,this._names=c_e(r.getAttribute("class")||"")}u_e.prototype={add:function(r){var i=this._names.indexOf(r);i<0&&(this._names.push(r),this._node.setAttribute("class",this._names.join(" ")))},remove:function(r){var i=this._names.indexOf(r);i>=0&&(this._names.splice(i,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(r){return this._names.indexOf(r)>=0}};function l_e(r,i){for(var o=sse(r),l=-1,f=i.length;++l=0&&(o=i.slice(l+1),i=i.slice(0,l)),{type:i,name:o}})}function NDt(r){return function(){var i=this.__on;if(!!i){for(var o=0,l=-1,f=i.length,b;o>8&15|i>>4&240,i>>4&15|i&240,(i&15)<<4|i&15,1):o===8?Bz(i>>24&255,i>>16&255,i>>8&255,(i&255)/255):o===4?Bz(i>>12&15|i>>8&240,i>>8&15|i>>4&240,i>>4&15|i&240,((i&15)<<4|i&15)/255):null):(i=GDt.exec(r))?new sd(i[1],i[2],i[3],1):(i=VDt.exec(r))?new sd(i[1]*255/100,i[2]*255/100,i[3]*255/100,1):(i=UDt.exec(r))?Bz(i[1],i[2],i[3],i[4]):(i=qDt.exec(r))?Bz(i[1]*255/100,i[2]*255/100,i[3]*255/100,i[4]):(i=YDt.exec(r))?y_e(i[1],i[2]/100,i[3]/100,1):(i=WDt.exec(r))?y_e(i[1],i[2]/100,i[3]/100,i[4]):d_e.hasOwnProperty(r)?b_e(d_e[r]):r==="transparent"?new sd(NaN,NaN,NaN,0):null}function b_e(r){return new sd(r>>16&255,r>>8&255,r&255,1)}function Bz(r,i,o,l){return l<=0&&(r=i=o=NaN),new sd(r,i,o,l)}function v_e(r){return r instanceof O8||(r=N8(r)),r?(r=r.rgb(),new sd(r.r,r.g,r.b,r.opacity)):new sd}function cse(r,i,o,l){return arguments.length===1?v_e(r):new sd(r,i,o,l==null?1:l)}function sd(r,i,o,l){this.r=+r,this.g=+i,this.b=+o,this.opacity=+l}jL(sd,cse,Nz(O8,{brighter(r){return r=r==null?Pz:Math.pow(Pz,r),new sd(this.r*r,this.g*r,this.b*r,this.opacity)},darker(r){return r=r==null?$L:Math.pow($L,r),new sd(this.r*r,this.g*r,this.b*r,this.opacity)},rgb(){return this},clamp(){return new sd(P8(this.r),P8(this.g),P8(this.b),Fz(this.opacity))},displayable(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:w_e,formatHex:w_e,formatHex8:QDt,formatRgb:m_e,toString:m_e}));function w_e(){return`#${B8(this.r)}${B8(this.g)}${B8(this.b)}`}function QDt(){return`#${B8(this.r)}${B8(this.g)}${B8(this.b)}${B8((isNaN(this.opacity)?1:this.opacity)*255)}`}function m_e(){const r=Fz(this.opacity);return`${r===1?"rgb(":"rgba("}${P8(this.r)}, ${P8(this.g)}, ${P8(this.b)}${r===1?")":`, ${r})`}`}function Fz(r){return isNaN(r)?1:Math.max(0,Math.min(1,r))}function P8(r){return Math.max(0,Math.min(255,Math.round(r)||0))}function B8(r){return r=P8(r),(r<16?"0":"")+r.toString(16)}function y_e(r,i,o,l){return l<=0?r=i=o=NaN:o<=0||o>=1?r=i=NaN:i<=0&&(r=NaN),new Ev(r,i,o,l)}function k_e(r){if(r instanceof Ev)return new Ev(r.h,r.s,r.l,r.opacity);if(r instanceof O8||(r=N8(r)),!r)return new Ev;if(r instanceof Ev)return r;r=r.rgb();var i=r.r/255,o=r.g/255,l=r.b/255,f=Math.min(i,o,l),b=Math.max(i,o,l),d=NaN,w=b-f,y=(b+f)/2;return w?(i===b?d=(o-l)/w+(o0&&y<1?0:d,new Ev(d,w,y,r.opacity)}function ZDt(r,i,o,l){return arguments.length===1?k_e(r):new Ev(r,i,o,l==null?1:l)}function Ev(r,i,o,l){this.h=+r,this.s=+i,this.l=+o,this.opacity=+l}jL(Ev,ZDt,Nz(O8,{brighter(r){return r=r==null?Pz:Math.pow(Pz,r),new Ev(this.h,this.s,this.l*r,this.opacity)},darker(r){return r=r==null?$L:Math.pow($L,r),new Ev(this.h,this.s,this.l*r,this.opacity)},rgb(){var r=this.h%360+(this.h<0)*360,i=isNaN(r)||isNaN(this.s)?0:this.s,o=this.l,l=o+(o<.5?o:1-o)*i,f=2*o-l;return new sd(use(r>=240?r-240:r+120,f,l),use(r,f,l),use(r<120?r+240:r-120,f,l),this.opacity)},clamp(){return new Ev(x_e(this.h),Rz(this.s),Rz(this.l),Fz(this.opacity))},displayable(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl(){const r=Fz(this.opacity);return`${r===1?"hsl(":"hsla("}${x_e(this.h)}, ${Rz(this.s)*100}%, ${Rz(this.l)*100}%${r===1?")":`, ${r})`}`}}));function x_e(r){return r=(r||0)%360,r<0?r+360:r}function Rz(r){return Math.max(0,Math.min(1,r||0))}function use(r,i,o){return(r<60?i+(o-i)*r/60:r<180?o:r<240?i+(o-i)*(240-r)/60:i)*255}const JDt=Math.PI/180,eLt=180/Math.PI,jz=18,E_e=.96422,__e=1,T_e=.82521,C_e=4/29,NT=6/29,S_e=3*NT*NT,tLt=NT*NT*NT;function A_e(r){if(r instanceof Lm)return new Lm(r.l,r.a,r.b,r.opacity);if(r instanceof H3)return M_e(r);r instanceof sd||(r=v_e(r));var i=dse(r.r),o=dse(r.g),l=dse(r.b),f=lse((.2225045*i+.7168786*o+.0606169*l)/__e),b,d;return i===o&&o===l?b=d=f:(b=lse((.4360747*i+.3850649*o+.1430804*l)/E_e),d=lse((.0139322*i+.0971045*o+.7141733*l)/T_e)),new Lm(116*f-16,500*(b-f),200*(f-d),r.opacity)}function nLt(r,i,o,l){return arguments.length===1?A_e(r):new Lm(r,i,o,l==null?1:l)}function Lm(r,i,o,l){this.l=+r,this.a=+i,this.b=+o,this.opacity=+l}jL(Lm,nLt,Nz(O8,{brighter(r){return new Lm(this.l+jz*(r==null?1:r),this.a,this.b,this.opacity)},darker(r){return new Lm(this.l-jz*(r==null?1:r),this.a,this.b,this.opacity)},rgb(){var r=(this.l+16)/116,i=isNaN(this.a)?r:r+this.a/500,o=isNaN(this.b)?r:r-this.b/200;return i=E_e*hse(i),r=__e*hse(r),o=T_e*hse(o),new sd(fse(3.1338561*i-1.6168667*r-.4906146*o),fse(-.9787684*i+1.9161415*r+.033454*o),fse(.0719453*i-.2289914*r+1.4052427*o),this.opacity)}}));function lse(r){return r>tLt?Math.pow(r,1/3):r/S_e+C_e}function hse(r){return r>NT?r*r*r:S_e*(r-C_e)}function fse(r){return 255*(r<=.0031308?12.92*r:1.055*Math.pow(r,1/2.4)-.055)}function dse(r){return(r/=255)<=.04045?r/12.92:Math.pow((r+.055)/1.055,2.4)}function rLt(r){if(r instanceof H3)return new H3(r.h,r.c,r.l,r.opacity);if(r instanceof Lm||(r=A_e(r)),r.a===0&&r.b===0)return new H3(NaN,0()=>r;function D_e(r,i){return function(o){return r+o*i}}function iLt(r,i,o){return r=Math.pow(r,o),i=Math.pow(i,o)-r,o=1/o,function(l){return Math.pow(r+l*i,o)}}function sLt(r,i){var o=i-r;return o?D_e(r,o>180||o<-180?o-360*Math.round(o/360):o):$z(isNaN(r)?i:r)}function aLt(r){return(r=+r)==1?zL:function(i,o){return o-i?iLt(i,o,r):$z(isNaN(i)?o:i)}}function zL(r,i){var o=i-r;return o?D_e(r,o):$z(isNaN(r)?i:r)}const Hz=function r(i){var o=aLt(i);function l(f,b){var d=o((f=cse(f)).r,(b=cse(b)).r),w=o(f.g,b.g),y=o(f.b,b.b),k=zL(f.opacity,b.opacity);return function(E){return f.r=d(E),f.g=w(E),f.b=y(E),f.opacity=k(E),f+""}}return l.gamma=r,l}(1);function oLt(r,i){i||(i=[]);var o=r?Math.min(i.length,r.length):0,l=i.slice(),f;return function(b){for(f=0;fo&&(b=i.slice(o,b),w[d]?w[d]+=b:w[++d]=b),(l=l[0])===(f=f[0])?w[d]?w[d]+=f:w[++d]=f:(w[++d]=null,y.push({i:d,x:_v(l,f)})),o=bse.lastIndex;return o180?E+=360:E-k>180&&(k+=360),C.push({i:T.push(f(T)+"rotate(",null,l)-2,x:_v(k,E)})):E&&T.push(f(T)+"rotate("+E+l)}function w(k,E,T,C){k!==E?C.push({i:T.push(f(T)+"skewX(",null,l)-2,x:_v(k,E)}):E&&T.push(f(T)+"skewX("+E+l)}function y(k,E,T,C,S,L){if(k!==T||E!==C){var O=S.push(f(S)+"scale(",null,",",null,")");L.push({i:O-4,x:_v(k,T)},{i:O-2,x:_v(E,C)})}else(T!==1||C!==1)&&S.push(f(S)+"scale("+T+","+C+")")}return function(k,E){var T=[],C=[];return k=r(k),E=r(E),b(k.translateX,k.translateY,E.translateX,E.translateY,T,C),d(k.rotate,E.rotate,T,C),w(k.skewX,E.skewX,T,C),y(k.scaleX,k.scaleY,E.scaleX,E.scaleY,T,C),k=E=null,function(S){for(var L=-1,O=C.length,B;++L=0&&r._call.call(void 0,i),r=r._next;--PT}function R_e(){F8=(Vz=qL.now())+Uz,PT=GL=0;try{xLt()}finally{PT=0,_Lt(),F8=0}}function ELt(){var r=qL.now(),i=r-Vz;i>P_e&&(Uz-=i,Vz=r)}function _Lt(){for(var r,i=Gz,o,l=1/0;i;)i._call?(l>i._time&&(l=i._time),r=i,i=i._next):(o=i._next,i._next=null,i=r?r._next=o:Gz=o);UL=r,yse(l)}function yse(r){if(!PT){GL&&(GL=clearTimeout(GL));var i=r-F8;i>24?(r<1/0&&(GL=setTimeout(R_e,r-qL.now()-Uz)),VL&&(VL=clearInterval(VL))):(VL||(Vz=qL.now(),VL=setInterval(ELt,P_e)),PT=1,B_e(R_e))}}function j_e(r,i,o){var l=new qz;return i=i==null?0:+i,l.restart(f=>{l.stop(),r(f+i)},i,o),l}var TLt=Z9e("start","end","cancel","interrupt"),CLt=[],$_e=0,H_e=1,kse=2,Yz=3,z_e=4,xse=5,Wz=6;function Kz(r,i,o,l,f,b){var d=r.__transition;if(!d)r.__transition={};else if(o in d)return;SLt(r,o,{name:i,index:l,group:f,on:TLt,tween:CLt,time:b.time,delay:b.delay,duration:b.duration,ease:b.ease,timer:null,state:$_e})}function Ese(r,i){var o=Tv(r,i);if(o.state>$_e)throw new Error("too late; already scheduled");return o}function Im(r,i){var o=Tv(r,i);if(o.state>Yz)throw new Error("too late; already running");return o}function Tv(r,i){var o=r.__transition;if(!o||!(o=o[i]))throw new Error("transition not found");return o}function SLt(r,i,o){var l=r.__transition,f;l[i]=o,o.timer=F_e(b,0,o.time);function b(k){o.state=H_e,o.timer.restart(d,o.delay,o.time),o.delay<=k&&d(k-o.delay)}function d(k){var E,T,C,S;if(o.state!==H_e)return y();for(E in l)if(S=l[E],S.name===o.name){if(S.state===Yz)return j_e(d);S.state===z_e?(S.state=Wz,S.timer.stop(),S.on.call("interrupt",r,r.__data__,S.index,S.group),delete l[E]):+Ekse&&l.state=0&&(i=i.slice(0,o)),!i||i==="start"})}function iIt(r,i,o){var l,f,b=rIt(i)?Ese:Im;return function(){var d=b(this,r),w=d.on;w!==l&&(f=(l=w).copy()).on(i,o),d.on=f}}function sIt(r,i){var o=this._id;return arguments.length<2?Tv(this.node(),o).on.on(r):this.each(iIt(o,r,i))}function aIt(r){return function(){var i=this.parentNode;for(var o in this.__transition)if(+o!==r)return;i&&i.removeChild(this)}}function oIt(){return this.on("end.remove",aIt(this._id))}function cIt(r){var i=this._name,o=this._id;typeof r!="function"&&(r=ise(r));for(var l=this._groups,f=l.length,b=new Array(f),d=0;dR8)if(!(Math.abs(E*w-y*k)>R8)||!f)this._+="L"+(this._x1=r)+","+(this._y1=i);else{var C=o-b,S=l-d,L=w*w+y*y,O=C*C+S*S,B=Math.sqrt(L),N=Math.sqrt(T),F=f*Math.tan((Tse-Math.acos((L+T-O)/(2*B*N)))/2),R=F/N,q=F/B;Math.abs(R-1)>R8&&(this._+="L"+(r+R*k)+","+(i+R*E)),this._+="A"+f+","+f+",0,0,"+ +(E*C>k*S)+","+(this._x1=r+q*w)+","+(this._y1=i+q*y)}},arc:function(r,i,o,l,f,b){r=+r,i=+i,o=+o,b=!!b;var d=o*Math.cos(l),w=o*Math.sin(l),y=r+d,k=i+w,E=1^b,T=b?l-f:f-l;if(o<0)throw new Error("negative radius: "+o);this._x1===null?this._+="M"+y+","+k:(Math.abs(this._x1-y)>R8||Math.abs(this._y1-k)>R8)&&(this._+="L"+y+","+k),o&&(T<0&&(T=T%Cse+Cse),T>OIt?this._+="A"+o+","+o+",0,1,"+E+","+(r-d)+","+(i-w)+"A"+o+","+o+",0,1,"+E+","+(this._x1=y)+","+(this._y1=k):T>R8&&(this._+="A"+o+","+o+",0,"+ +(T>=Tse)+","+E+","+(this._x1=r+o*Math.cos(f))+","+(this._y1=i+o*Math.sin(f))))},rect:function(r,i,o,l){this._+="M"+(this._x0=this._x1=+r)+","+(this._y0=this._y1=+i)+"h"+ +o+"v"+ +l+"h"+-o+"Z"},toString:function(){return this._}};function NIt(r){if(!r.ok)throw new Error(r.status+" "+r.statusText);return r.text()}function PIt(r,i){return fetch(r,i).then(NIt)}function BIt(r){return(i,o)=>PIt(i,o).then(l=>new DOMParser().parseFromString(l,r))}var FIt=BIt("image/svg+xml");function RIt(r){return Math.abs(r=Math.round(r))>=1e21?r.toLocaleString("en").replace(/,/g,""):r.toString(10)}function Xz(r,i){if((o=(r=i?r.toExponential(i-1):r.toExponential()).indexOf("e"))<0)return null;var o,l=r.slice(0,o);return[l.length>1?l[0]+l.slice(2):l,+r.slice(o+1)]}function BT(r){return r=Xz(Math.abs(r)),r?r[1]:NaN}function jIt(r,i){return function(o,l){for(var f=o.length,b=[],d=0,w=r[0],y=0;f>0&&w>0&&(y+w+1>l&&(w=Math.max(1,l-y)),b.push(o.substring(f-=w,f+w)),!((y+=w+1)>l));)w=r[d=(d+1)%r.length];return b.reverse().join(i)}}function $It(r){return function(i){return i.replace(/[0-9]/g,function(o){return r[+o]})}}var HIt=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Qz(r){if(!(i=HIt.exec(r)))throw new Error("invalid format: "+r);var i;return new Mse({fill:i[1],align:i[2],sign:i[3],symbol:i[4],zero:i[5],width:i[6],comma:i[7],precision:i[8]&&i[8].slice(1),trim:i[9],type:i[10]})}Qz.prototype=Mse.prototype;function Mse(r){this.fill=r.fill===void 0?" ":r.fill+"",this.align=r.align===void 0?">":r.align+"",this.sign=r.sign===void 0?"-":r.sign+"",this.symbol=r.symbol===void 0?"":r.symbol+"",this.zero=!!r.zero,this.width=r.width===void 0?void 0:+r.width,this.comma=!!r.comma,this.precision=r.precision===void 0?void 0:+r.precision,this.trim=!!r.trim,this.type=r.type===void 0?"":r.type+""}Mse.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(this.width===void 0?"":Math.max(1,this.width|0))+(this.comma?",":"")+(this.precision===void 0?"":"."+Math.max(0,this.precision|0))+(this.trim?"~":"")+this.type};function zIt(r){e:for(var i=r.length,o=1,l=-1,f;o0&&(l=0);break}return l>0?r.slice(0,l)+r.slice(f+1):r}var q_e;function GIt(r,i){var o=Xz(r,i);if(!o)return r+"";var l=o[0],f=o[1],b=f-(q_e=Math.max(-8,Math.min(8,Math.floor(f/3)))*3)+1,d=l.length;return b===d?l:b>d?l+new Array(b-d+1).join("0"):b>0?l.slice(0,b)+"."+l.slice(b):"0."+new Array(1-b).join("0")+Xz(r,Math.max(0,i+b-1))[0]}function Y_e(r,i){var o=Xz(r,i);if(!o)return r+"";var l=o[0],f=o[1];return f<0?"0."+new Array(-f).join("0")+l:l.length>f+1?l.slice(0,f+1)+"."+l.slice(f+1):l+new Array(f-l.length+2).join("0")}const W_e={"%":(r,i)=>(r*100).toFixed(i),b:r=>Math.round(r).toString(2),c:r=>r+"",d:RIt,e:(r,i)=>r.toExponential(i),f:(r,i)=>r.toFixed(i),g:(r,i)=>r.toPrecision(i),o:r=>Math.round(r).toString(8),p:(r,i)=>Y_e(r*100,i),r:Y_e,s:GIt,X:r=>Math.round(r).toString(16).toUpperCase(),x:r=>Math.round(r).toString(16)};function K_e(r){return r}var X_e=Array.prototype.map,Q_e=["y","z","a","f","p","n","\xB5","m","","k","M","G","T","P","E","Z","Y"];function VIt(r){var i=r.grouping===void 0||r.thousands===void 0?K_e:jIt(X_e.call(r.grouping,Number),r.thousands+""),o=r.currency===void 0?"":r.currency[0]+"",l=r.currency===void 0?"":r.currency[1]+"",f=r.decimal===void 0?".":r.decimal+"",b=r.numerals===void 0?K_e:$It(X_e.call(r.numerals,String)),d=r.percent===void 0?"%":r.percent+"",w=r.minus===void 0?"\u2212":r.minus+"",y=r.nan===void 0?"NaN":r.nan+"";function k(T){T=Qz(T);var C=T.fill,S=T.align,L=T.sign,O=T.symbol,B=T.zero,N=T.width,F=T.comma,R=T.precision,q=T.trim,X=T.type;X==="n"?(F=!0,X="g"):W_e[X]||(R===void 0&&(R=12),q=!0,X="g"),(B||C==="0"&&S==="=")&&(B=!0,C="0",S="=");var te=O==="$"?o:O==="#"&&/[boxX]/.test(X)?"0"+X.toLowerCase():"",H=O==="$"?l:/[%p]/.test(X)?d:"",Y=W_e[X],z=/[defgprs%]/.test(X);R=R===void 0?6:/[gprs]/.test(X)?Math.max(1,Math.min(21,R)):Math.max(0,Math.min(20,R));function W(Z){var G=te,ae=H,$,ge,ee;if(X==="c")ae=Y(Z)+ae,Z="";else{Z=+Z;var de=Z<0||1/Z<0;if(Z=isNaN(Z)?y:Y(Math.abs(Z),R),q&&(Z=zIt(Z)),de&&+Z==0&&L!=="+"&&(de=!1),G=(de?L==="("?L:w:L==="-"||L==="("?"":L)+G,ae=(X==="s"?Q_e[8+q_e/3]:"")+ae+(de&&L==="("?")":""),z){for($=-1,ge=Z.length;++$ee||ee>57){ae=(ee===46?f+Z.slice($+1):Z.slice($))+ae,Z=Z.slice(0,$);break}}}F&&!B&&(Z=i(Z,1/0));var re=G.length+Z.length+ae.length,ke=re>1)+G+Z+ae+ke.slice(re);break;default:Z=ke+G+Z+ae;break}return b(Z)}return W.toString=function(){return T+""},W}function E(T,C){var S=k((T=Qz(T),T.type="f",T)),L=Math.max(-8,Math.min(8,Math.floor(BT(C)/3)))*3,O=Math.pow(10,-L),B=Q_e[8+L/3];return function(N){return S(O*N)+B}}return{format:k,formatPrefix:E}}var Zz,Z_e,J_e;UIt({thousands:",",grouping:[3],currency:["$",""]});function UIt(r){return Zz=VIt(r),Z_e=Zz.format,J_e=Zz.formatPrefix,Zz}function qIt(r){return Math.max(0,-BT(Math.abs(r)))}function YIt(r,i){return Math.max(0,Math.max(-8,Math.min(8,Math.floor(BT(i)/3)))*3-BT(Math.abs(r)))}function WIt(r,i){return r=Math.abs(r),i=Math.abs(i)-r,Math.max(0,BT(i)-BT(r))+1}function Dse(r,i){switch(arguments.length){case 0:break;case 1:this.range(r);break;default:this.range(i).domain(r);break}return this}const eTe=Symbol("implicit");function tTe(){var r=new Y9e,i=[],o=[],l=eTe;function f(b){let d=r.get(b);if(d===void 0){if(l!==eTe)return l;r.set(b,d=i.push(b)-1)}return o[d%o.length]}return f.domain=function(b){if(!arguments.length)return i.slice();i=[],r=new Y9e;for(const d of b)r.has(d)||r.set(d,i.push(d)-1);return f},f.range=function(b){return arguments.length?(o=Array.from(b),f):o.slice()},f.unknown=function(b){return arguments.length?(l=b,f):l},f.copy=function(){return tTe(i,o).unknown(l)},Dse.apply(f,arguments),f}function KIt(r){return function(){return r}}function XIt(r){return+r}var nTe=[0,1];function FT(r){return r}function Lse(r,i){return(i-=r=+r)?function(o){return(o-r)/i}:KIt(isNaN(i)?NaN:.5)}function QIt(r,i){var o;return r>i&&(o=r,r=i,i=o),function(l){return Math.max(r,Math.min(i,l))}}function ZIt(r,i,o){var l=r[0],f=r[1],b=i[0],d=i[1];return f2?JIt:ZIt,y=k=null,T}function T(C){return C==null||isNaN(C=+C)?b:(y||(y=w(r.map(l),i,o)))(l(d(C)))}return T.invert=function(C){return d(f((k||(k=w(i,r.map(l),_v)))(C)))},T.domain=function(C){return arguments.length?(r=Array.from(C,XIt),E()):r.slice()},T.range=function(C){return arguments.length?(i=Array.from(C),E()):i.slice()},T.rangeRound=function(C){return i=Array.from(C),o=gLt,E()},T.clamp=function(C){return arguments.length?(d=C?!0:FT,E()):d!==FT},T.interpolate=function(C){return arguments.length?(o=C,E()):o},T.unknown=function(C){return arguments.length?(b=C,T):b},function(C,S){return l=C,f=S,E()}}function iTe(){return eOt()(FT,FT)}function tOt(r,i,o,l){var f=ese(r,i,o),b;switch(l=Qz(l==null?",f":l),l.type){case"s":{var d=Math.max(Math.abs(r),Math.abs(i));return l.precision==null&&!isNaN(b=YIt(f,d))&&(l.precision=b),J_e(l,d)}case"":case"e":case"g":case"p":case"r":{l.precision==null&&!isNaN(b=WIt(f,Math.max(Math.abs(r),Math.abs(i))))&&(l.precision=b-(l.type==="e"));break}case"f":case"%":{l.precision==null&&!isNaN(b=qIt(f))&&(l.precision=b-(l.type==="%")*2);break}}return Z_e(l)}function nOt(r){var i=r.domain;return r.ticks=function(o){var l=i();return QAt(l[0],l[l.length-1],o==null?10:o)},r.tickFormat=function(o,l){var f=i();return tOt(f[0],f[f.length-1],o==null?10:o,l)},r.nice=function(o){o==null&&(o=10);var l=i(),f=0,b=l.length-1,d=l[f],w=l[b],y,k,E=10;for(w0;){if(k=K9e(d,w,o),k===y)return l[f]=d,l[b]=w,i(l);if(k>0)d=Math.floor(d/k)*k,w=Math.ceil(w/k)*k;else if(k<0)d=Math.ceil(d*k)/k,w=Math.floor(w*k)/k;else break;y=k}return r},r}function sTe(){var r=iTe();return r.copy=function(){return rTe(r,sTe())},Dse.apply(r,arguments),nOt(r)}function rOt(r,i){r=r.slice();var o=0,l=r.length-1,f=r[o],b=r[l],d;return b0))return y;do y.push(k=new Date(+b)),i(b,w),r(b);while(k=d)for(;r(d),!b(d);)d.setTime(d-1)},function(d,w){if(d>=d)if(w<0)for(;++w<=0;)for(;i(d,-1),!b(d););else for(;--w>=0;)for(;i(d,1),!b(d););})},o&&(f.count=function(b,d){return Ise.setTime(+b),Ose.setTime(+d),r(Ise),r(Ose),Math.floor(o(Ise,Ose))},f.every=function(b){return b=Math.floor(b),!isFinite(b)||!(b>0)?null:b>1?f.filter(l?function(d){return l(d)%b===0}:function(d){return f.count(0,d)%b===0}):f}),f}var Jz=Uf(function(){},function(r,i){r.setTime(+r+i)},function(r,i){return i-r});Jz.every=function(r){return r=Math.floor(r),!isFinite(r)||!(r>0)?null:r>1?Uf(function(i){i.setTime(Math.floor(i/r)*r)},function(i,o){i.setTime(+i+o*r)},function(i,o){return(o-i)/r}):Jz};const iOt=Jz;Jz.range;const V3=1e3,D2=V3*60,U3=D2*60,j8=U3*24,Nse=j8*7,aTe=j8*30,Pse=j8*365;var oTe=Uf(function(r){r.setTime(r-r.getMilliseconds())},function(r,i){r.setTime(+r+i*V3)},function(r,i){return(i-r)/V3},function(r){return r.getUTCSeconds()});const YL=oTe;oTe.range;var cTe=Uf(function(r){r.setTime(r-r.getMilliseconds()-r.getSeconds()*V3)},function(r,i){r.setTime(+r+i*D2)},function(r,i){return(i-r)/D2},function(r){return r.getMinutes()});const eG=cTe;cTe.range;var uTe=Uf(function(r){r.setTime(r-r.getMilliseconds()-r.getSeconds()*V3-r.getMinutes()*D2)},function(r,i){r.setTime(+r+i*U3)},function(r,i){return(i-r)/U3},function(r){return r.getHours()});const tG=uTe;uTe.range;var lTe=Uf(r=>r.setHours(0,0,0,0),(r,i)=>r.setDate(r.getDate()+i),(r,i)=>(i-r-(i.getTimezoneOffset()-r.getTimezoneOffset())*D2)/j8,r=>r.getDate()-1);const RT=lTe;lTe.range;function $8(r){return Uf(function(i){i.setDate(i.getDate()-(i.getDay()+7-r)%7),i.setHours(0,0,0,0)},function(i,o){i.setDate(i.getDate()+o*7)},function(i,o){return(o-i-(o.getTimezoneOffset()-i.getTimezoneOffset())*D2)/Nse})}var jT=$8(0),nG=$8(1),sOt=$8(2),aOt=$8(3),$T=$8(4),oOt=$8(5),cOt=$8(6);jT.range,nG.range,sOt.range,aOt.range,$T.range,oOt.range,cOt.range;var hTe=Uf(function(r){r.setDate(1),r.setHours(0,0,0,0)},function(r,i){r.setMonth(r.getMonth()+i)},function(r,i){return i.getMonth()-r.getMonth()+(i.getFullYear()-r.getFullYear())*12},function(r){return r.getMonth()});const rG=hTe;hTe.range;var Bse=Uf(function(r){r.setMonth(0,1),r.setHours(0,0,0,0)},function(r,i){r.setFullYear(r.getFullYear()+i)},function(r,i){return i.getFullYear()-r.getFullYear()},function(r){return r.getFullYear()});Bse.every=function(r){return!isFinite(r=Math.floor(r))||!(r>0)?null:Uf(function(i){i.setFullYear(Math.floor(i.getFullYear()/r)*r),i.setMonth(0,1),i.setHours(0,0,0,0)},function(i,o){i.setFullYear(i.getFullYear()+o*r)})};const H8=Bse;Bse.range;var fTe=Uf(function(r){r.setUTCSeconds(0,0)},function(r,i){r.setTime(+r+i*D2)},function(r,i){return(i-r)/D2},function(r){return r.getUTCMinutes()});const uOt=fTe;fTe.range;var dTe=Uf(function(r){r.setUTCMinutes(0,0,0)},function(r,i){r.setTime(+r+i*U3)},function(r,i){return(i-r)/U3},function(r){return r.getUTCHours()});const lOt=dTe;dTe.range;var gTe=Uf(function(r){r.setUTCHours(0,0,0,0)},function(r,i){r.setUTCDate(r.getUTCDate()+i)},function(r,i){return(i-r)/j8},function(r){return r.getUTCDate()-1});const Fse=gTe;gTe.range;function z8(r){return Uf(function(i){i.setUTCDate(i.getUTCDate()-(i.getUTCDay()+7-r)%7),i.setUTCHours(0,0,0,0)},function(i,o){i.setUTCDate(i.getUTCDate()+o*7)},function(i,o){return(o-i)/Nse})}var Rse=z8(0),iG=z8(1),hOt=z8(2),fOt=z8(3),HT=z8(4),dOt=z8(5),gOt=z8(6);Rse.range,iG.range,hOt.range,fOt.range,HT.range,dOt.range,gOt.range;var pTe=Uf(function(r){r.setUTCDate(1),r.setUTCHours(0,0,0,0)},function(r,i){r.setUTCMonth(r.getUTCMonth()+i)},function(r,i){return i.getUTCMonth()-r.getUTCMonth()+(i.getUTCFullYear()-r.getUTCFullYear())*12},function(r){return r.getUTCMonth()});const pOt=pTe;pTe.range;var jse=Uf(function(r){r.setUTCMonth(0,1),r.setUTCHours(0,0,0,0)},function(r,i){r.setUTCFullYear(r.getUTCFullYear()+i)},function(r,i){return i.getUTCFullYear()-r.getUTCFullYear()},function(r){return r.getUTCFullYear()});jse.every=function(r){return!isFinite(r=Math.floor(r))||!(r>0)?null:Uf(function(i){i.setUTCFullYear(Math.floor(i.getUTCFullYear()/r)*r),i.setUTCMonth(0,1),i.setUTCHours(0,0,0,0)},function(i,o){i.setUTCFullYear(i.getUTCFullYear()+o*r)})};const zT=jse;jse.range;function bTe(r,i,o,l,f,b){const d=[[YL,1,V3],[YL,5,5*V3],[YL,15,15*V3],[YL,30,30*V3],[b,1,D2],[b,5,5*D2],[b,15,15*D2],[b,30,30*D2],[f,1,U3],[f,3,3*U3],[f,6,6*U3],[f,12,12*U3],[l,1,j8],[l,2,2*j8],[o,1,Nse],[i,1,aTe],[i,3,3*aTe],[r,1,Pse]];function w(k,E,T){const C=EB).right(d,C);if(S===d.length)return r.every(ese(k/Pse,E/Pse,T));if(S===0)return iOt.every(Math.max(ese(k,E,T),1));const[L,O]=d[C/d[S-1][2]53)return null;"w"in xe||(xe.w=1),"Z"in xe?(je=Hse(WL(xe.y,0,1)),me=je.getUTCDay(),je=me>4||me===0?iG.ceil(je):iG(je),je=Fse.offset(je,(xe.V-1)*7),xe.y=je.getUTCFullYear(),xe.m=je.getUTCMonth(),xe.d=je.getUTCDate()+(xe.w+6)%7):(je=$se(WL(xe.y,0,1)),me=je.getDay(),je=me>4||me===0?nG.ceil(je):nG(je),je=RT.offset(je,(xe.V-1)*7),xe.y=je.getFullYear(),xe.m=je.getMonth(),xe.d=je.getDate()+(xe.w+6)%7)}else("W"in xe||"U"in xe)&&("w"in xe||(xe.w="u"in xe?xe.u%7:"W"in xe?1:0),me="Z"in xe?Hse(WL(xe.y,0,1)).getUTCDay():$se(WL(xe.y,0,1)).getDay(),xe.m=0,xe.d="W"in xe?(xe.w+6)%7+xe.W*7-(me+5)%7:xe.w+xe.U*7-(me+6)%7);return"Z"in xe?(xe.H+=xe.Z/100|0,xe.M+=xe.Z%100,Hse(xe)):$se(xe)}}function Y(Oe,Le,$e,xe){for(var Ae=0,je=Le.length,me=$e.length,vt,ve;Ae=me)return-1;if(vt=Le.charCodeAt(Ae++),vt===37){if(vt=Le.charAt(Ae++),ve=X[vt in vTe?Le.charAt(Ae++):vt],!ve||(xe=ve(Oe,$e,xe))<0)return-1}else if(vt!=$e.charCodeAt(xe++))return-1}return xe}function z(Oe,Le,$e){var xe=k.exec(Le.slice($e));return xe?(Oe.p=E.get(xe[0].toLowerCase()),$e+xe[0].length):-1}function W(Oe,Le,$e){var xe=S.exec(Le.slice($e));return xe?(Oe.w=L.get(xe[0].toLowerCase()),$e+xe[0].length):-1}function Z(Oe,Le,$e){var xe=T.exec(Le.slice($e));return xe?(Oe.w=C.get(xe[0].toLowerCase()),$e+xe[0].length):-1}function G(Oe,Le,$e){var xe=N.exec(Le.slice($e));return xe?(Oe.m=F.get(xe[0].toLowerCase()),$e+xe[0].length):-1}function ae(Oe,Le,$e){var xe=O.exec(Le.slice($e));return xe?(Oe.m=B.get(xe[0].toLowerCase()),$e+xe[0].length):-1}function $(Oe,Le,$e){return Y(Oe,i,Le,$e)}function ge(Oe,Le,$e){return Y(Oe,o,Le,$e)}function ee(Oe,Le,$e){return Y(Oe,l,Le,$e)}function de(Oe){return d[Oe.getDay()]}function re(Oe){return b[Oe.getDay()]}function ke(Oe){return y[Oe.getMonth()]}function Ce(Oe){return w[Oe.getMonth()]}function _e(Oe){return f[+(Oe.getHours()>=12)]}function Te(Oe){return 1+~~(Oe.getMonth()/3)}function Be(Oe){return d[Oe.getUTCDay()]}function Ge(Oe){return b[Oe.getUTCDay()]}function Xe(Oe){return y[Oe.getUTCMonth()]}function Ee(Oe){return w[Oe.getUTCMonth()]}function Ze(Oe){return f[+(Oe.getUTCHours()>=12)]}function Ie(Oe){return 1+~~(Oe.getUTCMonth()/3)}return{format:function(Oe){var Le=te(Oe+="",R);return Le.toString=function(){return Oe},Le},parse:function(Oe){var Le=H(Oe+="",!1);return Le.toString=function(){return Oe},Le},utcFormat:function(Oe){var Le=te(Oe+="",q);return Le.toString=function(){return Oe},Le},utcParse:function(Oe){var Le=H(Oe+="",!0);return Le.toString=function(){return Oe},Le}}}var vTe={"-":"",_:" ",0:"0"},qf=/^\s*\d+/,mOt=/^%/,yOt=/[\\^$*+?|[\]().{}]/g;function Lo(r,i,o){var l=r<0?"-":"",f=(l?-r:r)+"",b=f.length;return l+(b[i.toLowerCase(),o]))}function xOt(r,i,o){var l=qf.exec(i.slice(o,o+1));return l?(r.w=+l[0],o+l[0].length):-1}function EOt(r,i,o){var l=qf.exec(i.slice(o,o+1));return l?(r.u=+l[0],o+l[0].length):-1}function _Ot(r,i,o){var l=qf.exec(i.slice(o,o+2));return l?(r.U=+l[0],o+l[0].length):-1}function TOt(r,i,o){var l=qf.exec(i.slice(o,o+2));return l?(r.V=+l[0],o+l[0].length):-1}function COt(r,i,o){var l=qf.exec(i.slice(o,o+2));return l?(r.W=+l[0],o+l[0].length):-1}function wTe(r,i,o){var l=qf.exec(i.slice(o,o+4));return l?(r.y=+l[0],o+l[0].length):-1}function mTe(r,i,o){var l=qf.exec(i.slice(o,o+2));return l?(r.y=+l[0]+(+l[0]>68?1900:2e3),o+l[0].length):-1}function SOt(r,i,o){var l=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(i.slice(o,o+6));return l?(r.Z=l[1]?0:-(l[2]+(l[3]||"00")),o+l[0].length):-1}function AOt(r,i,o){var l=qf.exec(i.slice(o,o+1));return l?(r.q=l[0]*3-3,o+l[0].length):-1}function MOt(r,i,o){var l=qf.exec(i.slice(o,o+2));return l?(r.m=l[0]-1,o+l[0].length):-1}function yTe(r,i,o){var l=qf.exec(i.slice(o,o+2));return l?(r.d=+l[0],o+l[0].length):-1}function DOt(r,i,o){var l=qf.exec(i.slice(o,o+3));return l?(r.m=0,r.d=+l[0],o+l[0].length):-1}function kTe(r,i,o){var l=qf.exec(i.slice(o,o+2));return l?(r.H=+l[0],o+l[0].length):-1}function LOt(r,i,o){var l=qf.exec(i.slice(o,o+2));return l?(r.M=+l[0],o+l[0].length):-1}function IOt(r,i,o){var l=qf.exec(i.slice(o,o+2));return l?(r.S=+l[0],o+l[0].length):-1}function OOt(r,i,o){var l=qf.exec(i.slice(o,o+3));return l?(r.L=+l[0],o+l[0].length):-1}function NOt(r,i,o){var l=qf.exec(i.slice(o,o+6));return l?(r.L=Math.floor(l[0]/1e3),o+l[0].length):-1}function POt(r,i,o){var l=mOt.exec(i.slice(o,o+1));return l?o+l[0].length:-1}function BOt(r,i,o){var l=qf.exec(i.slice(o));return l?(r.Q=+l[0],o+l[0].length):-1}function FOt(r,i,o){var l=qf.exec(i.slice(o));return l?(r.s=+l[0],o+l[0].length):-1}function xTe(r,i){return Lo(r.getDate(),i,2)}function ROt(r,i){return Lo(r.getHours(),i,2)}function jOt(r,i){return Lo(r.getHours()%12||12,i,2)}function $Ot(r,i){return Lo(1+RT.count(H8(r),r),i,3)}function ETe(r,i){return Lo(r.getMilliseconds(),i,3)}function HOt(r,i){return ETe(r,i)+"000"}function zOt(r,i){return Lo(r.getMonth()+1,i,2)}function GOt(r,i){return Lo(r.getMinutes(),i,2)}function VOt(r,i){return Lo(r.getSeconds(),i,2)}function UOt(r){var i=r.getDay();return i===0?7:i}function qOt(r,i){return Lo(jT.count(H8(r)-1,r),i,2)}function _Te(r){var i=r.getDay();return i>=4||i===0?$T(r):$T.ceil(r)}function YOt(r,i){return r=_Te(r),Lo($T.count(H8(r),r)+(H8(r).getDay()===4),i,2)}function WOt(r){return r.getDay()}function KOt(r,i){return Lo(nG.count(H8(r)-1,r),i,2)}function XOt(r,i){return Lo(r.getFullYear()%100,i,2)}function QOt(r,i){return r=_Te(r),Lo(r.getFullYear()%100,i,2)}function ZOt(r,i){return Lo(r.getFullYear()%1e4,i,4)}function JOt(r,i){var o=r.getDay();return r=o>=4||o===0?$T(r):$T.ceil(r),Lo(r.getFullYear()%1e4,i,4)}function eNt(r){var i=r.getTimezoneOffset();return(i>0?"-":(i*=-1,"+"))+Lo(i/60|0,"0",2)+Lo(i%60,"0",2)}function TTe(r,i){return Lo(r.getUTCDate(),i,2)}function tNt(r,i){return Lo(r.getUTCHours(),i,2)}function nNt(r,i){return Lo(r.getUTCHours()%12||12,i,2)}function rNt(r,i){return Lo(1+Fse.count(zT(r),r),i,3)}function CTe(r,i){return Lo(r.getUTCMilliseconds(),i,3)}function iNt(r,i){return CTe(r,i)+"000"}function sNt(r,i){return Lo(r.getUTCMonth()+1,i,2)}function aNt(r,i){return Lo(r.getUTCMinutes(),i,2)}function oNt(r,i){return Lo(r.getUTCSeconds(),i,2)}function cNt(r){var i=r.getUTCDay();return i===0?7:i}function uNt(r,i){return Lo(Rse.count(zT(r)-1,r),i,2)}function STe(r){var i=r.getUTCDay();return i>=4||i===0?HT(r):HT.ceil(r)}function lNt(r,i){return r=STe(r),Lo(HT.count(zT(r),r)+(zT(r).getUTCDay()===4),i,2)}function hNt(r){return r.getUTCDay()}function fNt(r,i){return Lo(iG.count(zT(r)-1,r),i,2)}function dNt(r,i){return Lo(r.getUTCFullYear()%100,i,2)}function gNt(r,i){return r=STe(r),Lo(r.getUTCFullYear()%100,i,2)}function pNt(r,i){return Lo(r.getUTCFullYear()%1e4,i,4)}function bNt(r,i){var o=r.getUTCDay();return r=o>=4||o===0?HT(r):HT.ceil(r),Lo(r.getUTCFullYear()%1e4,i,4)}function vNt(){return"+0000"}function ATe(){return"%"}function MTe(r){return+r}function DTe(r){return Math.floor(+r/1e3)}var GT,sG;wNt({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});function wNt(r){return GT=wOt(r),sG=GT.format,GT.parse,GT.utcFormat,GT.utcParse,GT}function mNt(r){return new Date(r)}function yNt(r){return r instanceof Date?+r:+new Date(+r)}function LTe(r,i,o,l,f,b,d,w,y,k){var E=iTe(),T=E.invert,C=E.domain,S=k(".%L"),L=k(":%S"),O=k("%I:%M"),B=k("%I %p"),N=k("%a %d"),F=k("%b %d"),R=k("%B"),q=k("%Y");function X(te){return(y(te)1?0:r<-1?QL:Math.acos(r)}function OTe(r){return r>=1?aG:r<=-1?-aG:Math.asin(r)}function _Nt(r){return r.innerRadius}function TNt(r){return r.outerRadius}function CNt(r){return r.startAngle}function SNt(r){return r.endAngle}function ANt(r){return r&&r.padAngle}function MNt(r,i,o,l,f,b,d,w){var y=o-r,k=l-i,E=d-f,T=w-b,C=T*y-E*k;if(!(C*C$*$+ge*ge&&(Y=W,z=Z),{cx:Y,cy:z,x01:-E,y01:-T,x11:Y*(f/X-1),y11:z*(f/X-1)}}function ZL(){var r=_Nt,i=TNt,o=sh(0),l=null,f=CNt,b=SNt,d=ANt,w=null;function y(){var k,E,T=+r.apply(this,arguments),C=+i.apply(this,arguments),S=f.apply(this,arguments)-aG,L=b.apply(this,arguments)-aG,O=ITe(L-S),B=L>S;if(w||(w=k=Ase()),Cod))w.moveTo(0,0);else if(O>oG-od)w.moveTo(C*G8(S),C*Om(S)),w.arc(0,0,C,S,L,!B),T>od&&(w.moveTo(T*G8(L),T*Om(L)),w.arc(0,0,T,L,S,B));else{var N=S,F=L,R=S,q=L,X=O,te=O,H=d.apply(this,arguments)/2,Y=H>od&&(l?+l.apply(this,arguments):VT(T*T+C*C)),z=zse(ITe(C-T)/2,+o.apply(this,arguments)),W=z,Z=z,G,ae;if(Y>od){var $=OTe(Y/T*Om(H)),ge=OTe(Y/C*Om(H));(X-=$*2)>od?($*=B?1:-1,R+=$,q-=$):(X=0,R=q=(S+L)/2),(te-=ge*2)>od?(ge*=B?1:-1,N+=ge,F-=ge):(te=0,N=F=(S+L)/2)}var ee=C*G8(N),de=C*Om(N),re=T*G8(q),ke=T*Om(q);if(z>od){var Ce=C*G8(F),_e=C*Om(F),Te=T*G8(R),Be=T*Om(R),Ge;if(Ood?Z>od?(G=cG(Te,Be,ee,de,C,Z,B),ae=cG(Ce,_e,re,ke,C,Z,B),w.moveTo(G.cx+G.x01,G.cy+G.y01),Zod)||!(X>od)?w.lineTo(re,ke):W>od?(G=cG(re,ke,Ce,_e,T,-W,B),ae=cG(ee,de,Te,Be,T,-W,B),w.lineTo(G.cx+G.x01,G.cy+G.y01),Wr?1:i>=r?0:NaN}function ONt(r){return r}function NNt(){var r=ONt,i=INt,o=null,l=sh(0),f=sh(oG),b=sh(0);function d(w){var y,k=(w=NTe(w)).length,E,T,C=0,S=new Array(k),L=new Array(k),O=+l.apply(this,arguments),B=Math.min(oG,Math.max(-oG,f.apply(this,arguments)-O)),N,F=Math.min(Math.abs(B)/k,b.apply(this,arguments)),R=F*(B<0?-1:1),q;for(y=0;y0&&(C+=q);for(i!=null?S.sort(function(X,te){return i(L[X],L[te])}):o!=null&&S.sort(function(X,te){return o(w[X],w[te])}),y=0,T=C?(B-k*R)/C:0;y0?q*T:0)+R,L[E]={data:w[E],index:y,value:q,startAngle:O,endAngle:N,padAngle:F};return L}return d.value=function(w){return arguments.length?(r=typeof w=="function"?w:sh(+w),d):r},d.sortValues=function(w){return arguments.length?(i=w,o=null,d):i},d.sort=function(w){return arguments.length?(o=w,i=null,d):o},d.startAngle=function(w){return arguments.length?(l=typeof w=="function"?w:sh(+w),d):l},d.endAngle=function(w){return arguments.length?(f=typeof w=="function"?w:sh(+w),d):f},d.padAngle=function(w){return arguments.length?(b=typeof w=="function"?w:sh(+w),d):b},d}class BTe{constructor(i,o){this._context=i,this._x=o}areaStart(){this._line=0}areaEnd(){this._line=NaN}lineStart(){this._point=0}lineEnd(){(this._line||this._line!==0&&this._point===1)&&this._context.closePath(),this._line=1-this._line}point(i,o){switch(i=+i,o=+o,this._point){case 0:{this._point=1,this._line?this._context.lineTo(i,o):this._context.moveTo(i,o);break}case 1:this._point=2;default:{this._x?this._context.bezierCurveTo(this._x0=(this._x0+i)/2,this._y0,this._x0,o,i,o):this._context.bezierCurveTo(this._x0,this._y0=(this._y0+o)/2,i,this._y0,i,o);break}}this._x0=i,this._y0=o}}function PNt(r){return new BTe(r,!0)}function BNt(r){return new BTe(r,!1)}function s6(){}function uG(r,i,o){r._context.bezierCurveTo((2*r._x0+r._x1)/3,(2*r._y0+r._y1)/3,(r._x0+2*r._x1)/3,(r._y0+2*r._y1)/3,(r._x0+4*r._x1+i)/6,(r._y0+4*r._y1+o)/6)}function lG(r){this._context=r}lG.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:uG(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1);break}(this._line||this._line!==0&&this._point===1)&&this._context.closePath(),this._line=1-this._line},point:function(r,i){switch(r=+r,i=+i,this._point){case 0:this._point=1,this._line?this._context.lineTo(r,i):this._context.moveTo(r,i);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:uG(this,r,i);break}this._x0=this._x1,this._x1=r,this._y0=this._y1,this._y1=i}};function UT(r){return new lG(r)}function FTe(r){this._context=r}FTe.prototype={areaStart:s6,areaEnd:s6,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:{this._context.moveTo(this._x2,this._y2),this._context.closePath();break}case 2:{this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break}case 3:{this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4);break}}},point:function(r,i){switch(r=+r,i=+i,this._point){case 0:this._point=1,this._x2=r,this._y2=i;break;case 1:this._point=2,this._x3=r,this._y3=i;break;case 2:this._point=3,this._x4=r,this._y4=i,this._context.moveTo((this._x0+4*this._x1+r)/6,(this._y0+4*this._y1+i)/6);break;default:uG(this,r,i);break}this._x0=this._x1,this._x1=r,this._y0=this._y1,this._y1=i}};function FNt(r){return new FTe(r)}function RTe(r){this._context=r}RTe.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||this._line!==0&&this._point===3)&&this._context.closePath(),this._line=1-this._line},point:function(r,i){switch(r=+r,i=+i,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var o=(this._x0+4*this._x1+r)/6,l=(this._y0+4*this._y1+i)/6;this._line?this._context.lineTo(o,l):this._context.moveTo(o,l);break;case 3:this._point=4;default:uG(this,r,i);break}this._x0=this._x1,this._x1=r,this._y0=this._y1,this._y1=i}};function RNt(r){return new RTe(r)}function jTe(r,i){this._basis=new lG(r),this._beta=i}jTe.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var r=this._x,i=this._y,o=r.length-1;if(o>0)for(var l=r[0],f=i[0],b=r[o]-l,d=i[o]-f,w=-1,y;++w<=o;)y=w/o,this._basis.point(this._beta*r[w]+(1-this._beta)*(l+y*b),this._beta*i[w]+(1-this._beta)*(f+y*d));this._x=this._y=null,this._basis.lineEnd()},point:function(r,i){this._x.push(+r),this._y.push(+i)}};const jNt=function r(i){function o(l){return i===1?new lG(l):new jTe(l,i)}return o.beta=function(l){return r(+l)},o}(.85);function hG(r,i,o){r._context.bezierCurveTo(r._x1+r._k*(r._x2-r._x0),r._y1+r._k*(r._y2-r._y0),r._x2+r._k*(r._x1-i),r._y2+r._k*(r._y1-o),r._x2,r._y2)}function Gse(r,i){this._context=r,this._k=(1-i)/6}Gse.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:hG(this,this._x1,this._y1);break}(this._line||this._line!==0&&this._point===1)&&this._context.closePath(),this._line=1-this._line},point:function(r,i){switch(r=+r,i=+i,this._point){case 0:this._point=1,this._line?this._context.lineTo(r,i):this._context.moveTo(r,i);break;case 1:this._point=2,this._x1=r,this._y1=i;break;case 2:this._point=3;default:hG(this,r,i);break}this._x0=this._x1,this._x1=this._x2,this._x2=r,this._y0=this._y1,this._y1=this._y2,this._y2=i}};const $Nt=function r(i){function o(l){return new Gse(l,i)}return o.tension=function(l){return r(+l)},o}(0);function Vse(r,i){this._context=r,this._k=(1-i)/6}Vse.prototype={areaStart:s6,areaEnd:s6,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:{this._context.moveTo(this._x3,this._y3),this._context.closePath();break}case 2:{this._context.lineTo(this._x3,this._y3),this._context.closePath();break}case 3:{this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5);break}}},point:function(r,i){switch(r=+r,i=+i,this._point){case 0:this._point=1,this._x3=r,this._y3=i;break;case 1:this._point=2,this._context.moveTo(this._x4=r,this._y4=i);break;case 2:this._point=3,this._x5=r,this._y5=i;break;default:hG(this,r,i);break}this._x0=this._x1,this._x1=this._x2,this._x2=r,this._y0=this._y1,this._y1=this._y2,this._y2=i}};const HNt=function r(i){function o(l){return new Vse(l,i)}return o.tension=function(l){return r(+l)},o}(0);function Use(r,i){this._context=r,this._k=(1-i)/6}Use.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||this._line!==0&&this._point===3)&&this._context.closePath(),this._line=1-this._line},point:function(r,i){switch(r=+r,i=+i,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:hG(this,r,i);break}this._x0=this._x1,this._x1=this._x2,this._x2=r,this._y0=this._y1,this._y1=this._y2,this._y2=i}};const zNt=function r(i){function o(l){return new Use(l,i)}return o.tension=function(l){return r(+l)},o}(0);function qse(r,i,o){var l=r._x1,f=r._y1,b=r._x2,d=r._y2;if(r._l01_a>od){var w=2*r._l01_2a+3*r._l01_a*r._l12_a+r._l12_2a,y=3*r._l01_a*(r._l01_a+r._l12_a);l=(l*w-r._x0*r._l12_2a+r._x2*r._l01_2a)/y,f=(f*w-r._y0*r._l12_2a+r._y2*r._l01_2a)/y}if(r._l23_a>od){var k=2*r._l23_2a+3*r._l23_a*r._l12_a+r._l12_2a,E=3*r._l23_a*(r._l23_a+r._l12_a);b=(b*k+r._x1*r._l23_2a-i*r._l12_2a)/E,d=(d*k+r._y1*r._l23_2a-o*r._l12_2a)/E}r._context.bezierCurveTo(l,f,b,d,r._x2,r._y2)}function $Te(r,i){this._context=r,this._alpha=i}$Te.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2);break}(this._line||this._line!==0&&this._point===1)&&this._context.closePath(),this._line=1-this._line},point:function(r,i){if(r=+r,i=+i,this._point){var o=this._x2-r,l=this._y2-i;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(o*o+l*l,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(r,i):this._context.moveTo(r,i);break;case 1:this._point=2;break;case 2:this._point=3;default:qse(this,r,i);break}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=r,this._y0=this._y1,this._y1=this._y2,this._y2=i}};const GNt=function r(i){function o(l){return i?new $Te(l,i):new Gse(l,0)}return o.alpha=function(l){return r(+l)},o}(.5);function HTe(r,i){this._context=r,this._alpha=i}HTe.prototype={areaStart:s6,areaEnd:s6,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:{this._context.moveTo(this._x3,this._y3),this._context.closePath();break}case 2:{this._context.lineTo(this._x3,this._y3),this._context.closePath();break}case 3:{this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5);break}}},point:function(r,i){if(r=+r,i=+i,this._point){var o=this._x2-r,l=this._y2-i;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(o*o+l*l,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=r,this._y3=i;break;case 1:this._point=2,this._context.moveTo(this._x4=r,this._y4=i);break;case 2:this._point=3,this._x5=r,this._y5=i;break;default:qse(this,r,i);break}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=r,this._y0=this._y1,this._y1=this._y2,this._y2=i}};const VNt=function r(i){function o(l){return i?new HTe(l,i):new Vse(l,0)}return o.alpha=function(l){return r(+l)},o}(.5);function zTe(r,i){this._context=r,this._alpha=i}zTe.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||this._line!==0&&this._point===3)&&this._context.closePath(),this._line=1-this._line},point:function(r,i){if(r=+r,i=+i,this._point){var o=this._x2-r,l=this._y2-i;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(o*o+l*l,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:qse(this,r,i);break}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=r,this._y0=this._y1,this._y1=this._y2,this._y2=i}};const UNt=function r(i){function o(l){return i?new zTe(l,i):new Use(l,0)}return o.alpha=function(l){return r(+l)},o}(.5);function GTe(r){this._context=r}GTe.prototype={areaStart:s6,areaEnd:s6,lineStart:function(){this._point=0},lineEnd:function(){this._point&&this._context.closePath()},point:function(r,i){r=+r,i=+i,this._point?this._context.lineTo(r,i):(this._point=1,this._context.moveTo(r,i))}};function qNt(r){return new GTe(r)}function VTe(r){return r<0?-1:1}function UTe(r,i,o){var l=r._x1-r._x0,f=i-r._x1,b=(r._y1-r._y0)/(l||f<0&&-0),d=(o-r._y1)/(f||l<0&&-0),w=(b*f+d*l)/(l+f);return(VTe(b)+VTe(d))*Math.min(Math.abs(b),Math.abs(d),.5*Math.abs(w))||0}function qTe(r,i){var o=r._x1-r._x0;return o?(3*(r._y1-r._y0)/o-i)/2:i}function Yse(r,i,o){var l=r._x0,f=r._y0,b=r._x1,d=r._y1,w=(b-l)/3;r._context.bezierCurveTo(l+w,f+w*i,b-w,d-w*o,b,d)}function fG(r){this._context=r}fG.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=this._t0=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x1,this._y1);break;case 3:Yse(this,this._t0,qTe(this,this._t0));break}(this._line||this._line!==0&&this._point===1)&&this._context.closePath(),this._line=1-this._line},point:function(r,i){var o=NaN;if(r=+r,i=+i,!(r===this._x1&&i===this._y1)){switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(r,i):this._context.moveTo(r,i);break;case 1:this._point=2;break;case 2:this._point=3,Yse(this,qTe(this,o=UTe(this,r,i)),o);break;default:Yse(this,this._t0,o=UTe(this,r,i));break}this._x0=this._x1,this._x1=r,this._y0=this._y1,this._y1=i,this._t0=o}}};function YTe(r){this._context=new WTe(r)}(YTe.prototype=Object.create(fG.prototype)).point=function(r,i){fG.prototype.point.call(this,i,r)};function WTe(r){this._context=r}WTe.prototype={moveTo:function(r,i){this._context.moveTo(i,r)},closePath:function(){this._context.closePath()},lineTo:function(r,i){this._context.lineTo(i,r)},bezierCurveTo:function(r,i,o,l,f,b){this._context.bezierCurveTo(i,r,l,o,b,f)}};function YNt(r){return new fG(r)}function WNt(r){return new YTe(r)}function KTe(r){this._context=r}KTe.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x=[],this._y=[]},lineEnd:function(){var r=this._x,i=this._y,o=r.length;if(o)if(this._line?this._context.lineTo(r[0],i[0]):this._context.moveTo(r[0],i[0]),o===2)this._context.lineTo(r[1],i[1]);else for(var l=XTe(r),f=XTe(i),b=0,d=1;d=0;--i)f[i]=(d[i]-f[i+1])/b[i];for(b[o-1]=(r[o]+f[o-1])/2,i=0;i=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(r,i){switch(r=+r,i=+i,this._point){case 0:this._point=1,this._line?this._context.lineTo(r,i):this._context.moveTo(r,i);break;case 1:this._point=2;default:{if(this._t<=0)this._context.lineTo(this._x,i),this._context.lineTo(r,i);else{var o=this._x*(1-this._t)+r*this._t;this._context.lineTo(o,this._y),this._context.lineTo(o,i)}break}}this._x=r,this._y=i}};function XNt(r){return new dG(r,.5)}function QNt(r){return new dG(r,0)}function ZNt(r){return new dG(r,1)}function qT(r,i,o){this.k=r,this.x=i,this.y=o}qT.prototype={constructor:qT,scale:function(r){return r===1?this:new qT(this.k*r,this.x,this.y)},translate:function(r,i){return r===0&i===0?this:new qT(this.k,this.x+this.k*r,this.y+this.k*i)},apply:function(r){return[r[0]*this.k+this.x,r[1]*this.k+this.y]},applyX:function(r){return r*this.k+this.x},applyY:function(r){return r*this.k+this.y},invert:function(r){return[(r[0]-this.x)/this.k,(r[1]-this.y)/this.k]},invertX:function(r){return(r-this.x)/this.k},invertY:function(r){return(r-this.y)/this.k},rescaleX:function(r){return r.copy().domain(r.range().map(this.invertX,this).map(r.invert,r))},rescaleY:function(r){return r.copy().domain(r.range().map(this.invertY,this).map(r.invert,r))},toString:function(){return"translate("+this.x+","+this.y+") scale("+this.k+")"}},new qT(1,0,0),qT.prototype;/*! @license DOMPurify 2.4.3 | (c) Cure53 and other contributors | Released under the Apache license 2.0 and Mozilla Public License 2.0 | github.com/cure53/DOMPurify/blob/2.4.3/LICENSE */function a6(r){return a6=typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?function(i){return typeof i}:function(i){return i&&typeof Symbol=="function"&&i.constructor===Symbol&&i!==Symbol.prototype?"symbol":typeof i},a6(r)}function Wse(r,i){return Wse=Object.setPrototypeOf||function(l,f){return l.__proto__=f,l},Wse(r,i)}function JNt(){if(typeof Reflect>"u"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],function(){})),!0}catch{return!1}}function gG(r,i,o){return JNt()?gG=Reflect.construct:gG=function(f,b,d){var w=[null];w.push.apply(w,b);var y=Function.bind.apply(f,w),k=new y;return d&&Wse(k,d.prototype),k},gG.apply(null,arguments)}function Cv(r){return ePt(r)||tPt(r)||nPt(r)||rPt()}function ePt(r){if(Array.isArray(r))return Kse(r)}function tPt(r){if(typeof Symbol<"u"&&r[Symbol.iterator]!=null||r["@@iterator"]!=null)return Array.from(r)}function nPt(r,i){if(!!r){if(typeof r=="string")return Kse(r,i);var o=Object.prototype.toString.call(r).slice(8,-1);if(o==="Object"&&r.constructor&&(o=r.constructor.name),o==="Map"||o==="Set")return Array.from(r);if(o==="Arguments"||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(o))return Kse(r,i)}}function Kse(r,i){(i==null||i>r.length)&&(i=r.length);for(var o=0,l=new Array(i);o1?o-1:0),f=1;f/gm),wPt=Sv(/\${[\w\W]*}/gm),mPt=Sv(/^data-[\-\w.\u00B7-\uFFFF]/),yPt=Sv(/^aria-[\-\w]+$/),kPt=Sv(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),xPt=Sv(/^(?:\w+script|data):/i),EPt=Sv(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),_Pt=Sv(/^html$/i),TPt=function(){return typeof window>"u"?null:window},CPt=function(i,o){if(a6(i)!=="object"||typeof i.createPolicy!="function")return null;var l=null,f="data-tt-policy-suffix";o.currentScript&&o.currentScript.hasAttribute(f)&&(l=o.currentScript.getAttribute(f));var b="dompurify"+(l?"#"+l:"");try{return i.createPolicy(b,{createHTML:function(w){return w},createScriptURL:function(w){return w}})}catch{return console.warn("TrustedTypes policy "+b+" could not be created."),null}};function iCe(){var r=arguments.length>0&&arguments[0]!==void 0?arguments[0]:TPt(),i=function(bt){return iCe(bt)};if(i.version="2.4.3",i.removed=[],!r||!r.document||r.document.nodeType!==9)return i.isSupported=!1,i;var o=r.document,l=r.document,f=r.DocumentFragment,b=r.HTMLTemplateElement,d=r.Node,w=r.Element,y=r.NodeFilter,k=r.NamedNodeMap,E=k===void 0?r.NamedNodeMap||r.MozNamedAttrMap:k,T=r.HTMLFormElement,C=r.DOMParser,S=r.trustedTypes,L=w.prototype,O=vG(L,"cloneNode"),B=vG(L,"nextSibling"),N=vG(L,"childNodes"),F=vG(L,"parentNode");if(typeof b=="function"){var R=l.createElement("template");R.content&&R.content.ownerDocument&&(l=R.content.ownerDocument)}var q=CPt(S,o),X=q?q.createHTML(""):"",te=l,H=te.implementation,Y=te.createNodeIterator,z=te.createDocumentFragment,W=te.getElementsByTagName,Z=o.importNode,G={};try{G=U8(l).documentMode?l.documentMode:{}}catch{}var ae={};i.isSupported=typeof F=="function"&&H&&typeof H.createHTMLDocument<"u"&&G!==9;var $=bPt,ge=vPt,ee=wPt,de=mPt,re=yPt,ke=xPt,Ce=EPt,_e=kPt,Te=null,Be=xa({},[].concat(Cv(eCe),Cv(Jse),Cv(eae),Cv(tae),Cv(tCe))),Ge=null,Xe=xa({},[].concat(Cv(nCe),Cv(nae),Cv(rCe),Cv(wG))),Ee=Object.seal(Object.create(null,{tagNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},attributeNameCheck:{writable:!0,configurable:!1,enumerable:!0,value:null},allowCustomizedBuiltInElements:{writable:!0,configurable:!1,enumerable:!0,value:!1}})),Ze=null,Ie=null,Oe=!0,Le=!0,$e=!1,xe=!1,Ae=!1,je=!1,me=!1,vt=!1,ve=!1,Zt=!1,nt=!0,xn=!1,cn="user-content-",jt=!0,ot=!1,be={},We=null,ct=xa({},["annotation-xml","audio","colgroup","desc","foreignobject","head","iframe","math","mi","mn","mo","ms","mtext","noembed","noframes","noscript","plaintext","script","style","svg","template","thead","title","video","xmp"]),Yt=null,Ut=xa({},["audio","video","img","source","image","track"]),Wn=null,Gt=xa({},["alt","class","for","id","label","name","pattern","placeholder","role","summary","title","value","style","xmlns"]),Rn="http://www.w3.org/1998/Math/MathML",si="http://www.w3.org/2000/svg",$r="http://www.w3.org/1999/xhtml",nr=$r,Kn=!1,Jt=null,en=xa({},[Rn,si,$r],Qse),In,hn=["application/xhtml+xml","text/html"],Fr="text/html",Pt,ei=null,nn=l.createElement("form"),hi=function(bt){return bt instanceof RegExp||bt instanceof Function},Hi=function(bt){ei&&ei===bt||((!bt||a6(bt)!=="object")&&(bt={}),bt=U8(bt),In=hn.indexOf(bt.PARSER_MEDIA_TYPE)===-1?In=Fr:In=bt.PARSER_MEDIA_TYPE,Pt=In==="application/xhtml+xml"?Qse:bG,Te="ALLOWED_TAGS"in bt?xa({},bt.ALLOWED_TAGS,Pt):Be,Ge="ALLOWED_ATTR"in bt?xa({},bt.ALLOWED_ATTR,Pt):Xe,Jt="ALLOWED_NAMESPACES"in bt?xa({},bt.ALLOWED_NAMESPACES,Qse):en,Wn="ADD_URI_SAFE_ATTR"in bt?xa(U8(Gt),bt.ADD_URI_SAFE_ATTR,Pt):Gt,Yt="ADD_DATA_URI_TAGS"in bt?xa(U8(Ut),bt.ADD_DATA_URI_TAGS,Pt):Ut,We="FORBID_CONTENTS"in bt?xa({},bt.FORBID_CONTENTS,Pt):ct,Ze="FORBID_TAGS"in bt?xa({},bt.FORBID_TAGS,Pt):{},Ie="FORBID_ATTR"in bt?xa({},bt.FORBID_ATTR,Pt):{},be="USE_PROFILES"in bt?bt.USE_PROFILES:!1,Oe=bt.ALLOW_ARIA_ATTR!==!1,Le=bt.ALLOW_DATA_ATTR!==!1,$e=bt.ALLOW_UNKNOWN_PROTOCOLS||!1,xe=bt.SAFE_FOR_TEMPLATES||!1,Ae=bt.WHOLE_DOCUMENT||!1,vt=bt.RETURN_DOM||!1,ve=bt.RETURN_DOM_FRAGMENT||!1,Zt=bt.RETURN_TRUSTED_TYPE||!1,me=bt.FORCE_BODY||!1,nt=bt.SANITIZE_DOM!==!1,xn=bt.SANITIZE_NAMED_PROPS||!1,jt=bt.KEEP_CONTENT!==!1,ot=bt.IN_PLACE||!1,_e=bt.ALLOWED_URI_REGEXP||_e,nr=bt.NAMESPACE||$r,bt.CUSTOM_ELEMENT_HANDLING&&hi(bt.CUSTOM_ELEMENT_HANDLING.tagNameCheck)&&(Ee.tagNameCheck=bt.CUSTOM_ELEMENT_HANDLING.tagNameCheck),bt.CUSTOM_ELEMENT_HANDLING&&hi(bt.CUSTOM_ELEMENT_HANDLING.attributeNameCheck)&&(Ee.attributeNameCheck=bt.CUSTOM_ELEMENT_HANDLING.attributeNameCheck),bt.CUSTOM_ELEMENT_HANDLING&&typeof bt.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements=="boolean"&&(Ee.allowCustomizedBuiltInElements=bt.CUSTOM_ELEMENT_HANDLING.allowCustomizedBuiltInElements),xe&&(Le=!1),ve&&(vt=!0),be&&(Te=xa({},Cv(tCe)),Ge=[],be.html===!0&&(xa(Te,eCe),xa(Ge,nCe)),be.svg===!0&&(xa(Te,Jse),xa(Ge,nae),xa(Ge,wG)),be.svgFilters===!0&&(xa(Te,eae),xa(Ge,nae),xa(Ge,wG)),be.mathMl===!0&&(xa(Te,tae),xa(Ge,rCe),xa(Ge,wG))),bt.ADD_TAGS&&(Te===Be&&(Te=U8(Te)),xa(Te,bt.ADD_TAGS,Pt)),bt.ADD_ATTR&&(Ge===Xe&&(Ge=U8(Ge)),xa(Ge,bt.ADD_ATTR,Pt)),bt.ADD_URI_SAFE_ATTR&&xa(Wn,bt.ADD_URI_SAFE_ATTR,Pt),bt.FORBID_CONTENTS&&(We===ct&&(We=U8(We)),xa(We,bt.FORBID_CONTENTS,Pt)),jt&&(Te["#text"]=!0),Ae&&xa(Te,["html","head","body"]),Te.table&&(xa(Te,["tbody"]),delete Ze.tbody),Wd&&Wd(bt),ei=bt)},ss=xa({},["mi","mo","mn","ms","mtext"]),ls=xa({},["foreignobject","desc","title","annotation-xml"]),vs=xa({},["title","style","font","a","script"]),ti=xa({},Jse);xa(ti,eae),xa(ti,gPt);var zi=xa({},tae);xa(zi,pPt);var as=function(bt){var $n=F(bt);(!$n||!$n.tagName)&&($n={namespaceURI:nr,tagName:"template"});var Er=bG(bt.tagName),Ss=bG($n.tagName);return Jt[bt.namespaceURI]?bt.namespaceURI===si?$n.namespaceURI===$r?Er==="svg":$n.namespaceURI===Rn?Er==="svg"&&(Ss==="annotation-xml"||ss[Ss]):Boolean(ti[Er]):bt.namespaceURI===Rn?$n.namespaceURI===$r?Er==="math":$n.namespaceURI===si?Er==="math"&&ls[Ss]:Boolean(zi[Er]):bt.namespaceURI===$r?$n.namespaceURI===si&&!ls[Ss]||$n.namespaceURI===Rn&&!ss[Ss]?!1:!zi[Er]&&(vs[Er]||!ti[Er]):!!(In==="application/xhtml+xml"&&Jt[bt.namespaceURI]):!1},ai=function(bt){JL(i.removed,{element:bt});try{bt.parentNode.removeChild(bt)}catch{try{bt.outerHTML=X}catch{bt.remove()}}},hc=function(bt,$n){try{JL(i.removed,{attribute:$n.getAttributeNode(bt),from:$n})}catch{JL(i.removed,{attribute:null,from:$n})}if($n.removeAttribute(bt),bt==="is"&&!Ge[bt])if(vt||ve)try{ai($n)}catch{}else try{$n.setAttribute(bt,"")}catch{}},xu=function(bt){var $n,Er;if(me)bt=""+bt;else{var Ss=lPt(bt,/^[\r\n\t ]+/);Er=Ss&&Ss[0]}In==="application/xhtml+xml"&&nr===$r&&(bt=''+bt+"");var wo=q?q.createHTML(bt):bt;if(nr===$r)try{$n=new C().parseFromString(wo,In)}catch{}if(!$n||!$n.documentElement){$n=H.createDocument(nr,"template",null);try{$n.documentElement.innerHTML=Kn?X:wo}catch{}}var Po=$n.body||$n.documentElement;return bt&&Er&&Po.insertBefore(l.createTextNode(Er),Po.childNodes[0]||null),nr===$r?W.call($n,Ae?"html":"body")[0]:Ae?$n.documentElement:Po},No=function(bt){return Y.call(bt.ownerDocument||bt,bt,y.SHOW_ELEMENT|y.SHOW_COMMENT|y.SHOW_TEXT,null,!1)},Si=function(bt){return bt instanceof T&&(typeof bt.nodeName!="string"||typeof bt.textContent!="string"||typeof bt.removeChild!="function"||!(bt.attributes instanceof E)||typeof bt.removeAttribute!="function"||typeof bt.setAttribute!="function"||typeof bt.namespaceURI!="string"||typeof bt.insertBefore!="function"||typeof bt.hasChildNodes!="function")},Yc=function(bt){return a6(d)==="object"?bt instanceof d:bt&&a6(bt)==="object"&&typeof bt.nodeType=="number"&&typeof bt.nodeName=="string"},lh=function(bt,$n,Er){!ae[bt]||uPt(ae[bt],function(Ss){Ss.call(i,$n,Er,ei)})},su=function(bt){var $n;if(lh("beforeSanitizeElements",bt,null),Si(bt)||Kd(/[\u0080-\uFFFF]/,bt.nodeName))return ai(bt),!0;var Er=Pt(bt.nodeName);if(lh("uponSanitizeElement",bt,{tagName:Er,allowedTags:Te}),bt.hasChildNodes()&&!Yc(bt.firstElementChild)&&(!Yc(bt.content)||!Yc(bt.content.firstElementChild))&&Kd(/<[/\w]/g,bt.innerHTML)&&Kd(/<[/\w]/g,bt.textContent)||Er==="select"&&Kd(/