Compare commits
534 Commits
version2.4
...
version2.6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c325c6869f | ||
|
|
60848b21dc | ||
|
|
14c70c092d | ||
|
|
66bdf8b29a | ||
|
|
72a034a1ff | ||
|
|
faaa3bb1b6 | ||
|
|
0485d01d67 | ||
|
|
f48914f56d | ||
|
|
443f23521c | ||
|
|
cd6a1fd399 | ||
|
|
f10ea20351 | ||
|
|
2e044d97c7 | ||
|
|
ea7fd53a97 | ||
|
|
b2fba01487 | ||
|
|
fb3d0948a0 | ||
|
|
c8349e766b | ||
|
|
de8d20bcc2 | ||
|
|
512e3f7a32 | ||
|
|
717fae8984 | ||
|
|
2b352de7df | ||
|
|
bc3f3429a5 | ||
|
|
7e14229229 | ||
|
|
2aab3acfea | ||
|
|
dd648bd446 | ||
|
|
a2002ebd85 | ||
|
|
ff50f30669 | ||
|
|
0ac7734c7d | ||
|
|
23686bfc77 | ||
|
|
7e541a7229 | ||
|
|
5c251f03eb | ||
|
|
0c21b1edd9 | ||
|
|
0c58795a5e | ||
|
|
a70c08a3c4 | ||
|
|
2712d99d08 | ||
|
|
76ee4c2c55 | ||
|
|
fc222bf287 | ||
|
|
eee5763b15 | ||
|
|
ff55bbd498 | ||
|
|
703ff7f814 | ||
|
|
16f7a52207 | ||
|
|
7cad5fa594 | ||
|
|
0b1d833804 | ||
|
|
7d414f67f0 | ||
|
|
3e1cecd9f5 | ||
|
|
98724cd395 | ||
|
|
8ac9b454e3 | ||
|
|
5e602cabf5 | ||
|
|
c6610b2aab | ||
|
|
7e53cf7c7e | ||
|
|
3d6e4ee3a7 | ||
|
|
613be5509b | ||
|
|
d40fa20ce8 | ||
|
|
40bd857c70 | ||
|
|
6e1976d9b8 | ||
|
|
88a86635c6 | ||
|
|
acbeebd18d | ||
|
|
7515863503 | ||
|
|
a1af5a99e0 | ||
|
|
84f6ee2fb7 | ||
|
|
c090df34fa | ||
|
|
3cb46534b6 | ||
|
|
c41eb0e997 | ||
|
|
c43a3e6f59 | ||
|
|
a8399d2727 | ||
|
|
d41d0db810 | ||
|
|
19d6323801 | ||
|
|
929c0afc9b | ||
|
|
3748979133 | ||
|
|
6e8c5637aa | ||
|
|
f336ba060d | ||
|
|
4fe4626608 | ||
|
|
e6fbf13c67 | ||
|
|
6bc5cbce20 | ||
|
|
3c9d63c37b | ||
|
|
93d5fc2f1a | ||
|
|
2f2ad59823 | ||
|
|
b841d58a26 | ||
|
|
3d66e3eb79 | ||
|
|
4dad114ce7 | ||
|
|
f8d565c5a1 | ||
|
|
6bbc10f5b9 | ||
|
|
8bf2956ff7 | ||
|
|
270889a533 | ||
|
|
a72b95d2b9 | ||
|
|
7167c84394 | ||
|
|
d587189ceb | ||
|
|
4a4fb661df | ||
|
|
a7083873c0 | ||
|
|
a13ed231d3 | ||
|
|
0b960df309 | ||
|
|
42d366be94 | ||
|
|
fc331681b4 | ||
|
|
e965c36db3 | ||
|
|
ad208ff7cf | ||
|
|
1256387488 | ||
|
|
88919db63e | ||
|
|
bc8415b905 | ||
|
|
21a3519c50 | ||
|
|
a3dd982159 | ||
|
|
f38929b149 | ||
|
|
c8a9069ee3 | ||
|
|
53b099e3a6 | ||
|
|
0d387fa699 | ||
|
|
acf0349215 | ||
|
|
869de46078 | ||
|
|
6ce9b724ec | ||
|
|
49a6ff6a7c | ||
|
|
3725122de1 | ||
|
|
1f6defedfc | ||
|
|
0666fec86e | ||
|
|
ea031ab05b | ||
|
|
47445fdc90 | ||
|
|
e6cf5532a9 | ||
|
|
d741f884c5 | ||
|
|
58db0b04fa | ||
|
|
c5ce25d581 | ||
|
|
53cfed89d5 | ||
|
|
3592a0de11 | ||
|
|
91d07c329a | ||
|
|
ab373c5bf7 | ||
|
|
f714bfc59f | ||
|
|
09ab60c46d | ||
|
|
6383113e85 | ||
|
|
d52b4d6dbb | ||
|
|
2f9ec385c9 | ||
|
|
3249b31155 | ||
|
|
9cd15c6f88 | ||
|
|
300484f301 | ||
|
|
476a174320 | ||
|
|
b475a4f32a | ||
|
|
c07196698e | ||
|
|
e371b82ea3 | ||
|
|
3de941ee5e | ||
|
|
2120c074c1 | ||
|
|
8dbae2c68a | ||
|
|
50dfccc010 | ||
|
|
036bd93115 | ||
|
|
b7dca67f6e | ||
|
|
33dcbf5093 | ||
|
|
a5785446c0 | ||
|
|
8a83f8315b | ||
|
|
9344c414b6 | ||
|
|
042d06846b | ||
|
|
2a8d6e1d53 | ||
|
|
ce1cf491b6 | ||
|
|
28a4188332 | ||
|
|
e2770fe37f | ||
|
|
c2dcab0e12 | ||
|
|
8597dba5f2 | ||
|
|
1f09afcb8f | ||
|
|
533359e19f | ||
|
|
2e3f6b3126 | ||
|
|
3d3d259125 | ||
|
|
fffa536303 | ||
|
|
2df4742815 | ||
|
|
9ca7a90590 | ||
|
|
d1a18d293a | ||
|
|
769f2fe7d7 | ||
|
|
991cd29395 | ||
|
|
9a12adf853 | ||
|
|
928bef8983 | ||
|
|
f14aa4818a | ||
|
|
a4d731b190 | ||
|
|
0079733bfd | ||
|
|
1055fdaab7 | ||
|
|
0b3f7b8821 | ||
|
|
e8cf757dc0 | ||
|
|
06f8094a0a | ||
|
|
d4ed4efa03 | ||
|
|
aa7574dcec | ||
|
|
62a946e499 | ||
|
|
0b2b0a83d6 | ||
|
|
f8b2524aa3 | ||
|
|
079916f56c | ||
|
|
1da77af2a2 | ||
|
|
946481b774 | ||
|
|
d32a52c8e9 | ||
|
|
85d85d850a | ||
|
|
dcaa7a1808 | ||
|
|
785893b64f | ||
|
|
8aa2b48816 | ||
|
|
3269f430ff | ||
|
|
dad6a64194 | ||
|
|
2126a5ce74 | ||
|
|
7ee257a854 | ||
|
|
ddb39453fd | ||
|
|
eda3c6d345 | ||
|
|
745734b601 | ||
|
|
2bb1f3dd30 | ||
|
|
82952882eb | ||
|
|
971b45f332 | ||
|
|
04504b1d99 | ||
|
|
85e71f8a71 | ||
|
|
c96a253568 | ||
|
|
24780ee628 | ||
|
|
b87bfeaddb | ||
|
|
effa1421b4 | ||
|
|
3d95e42dc8 | ||
|
|
602fbb08da | ||
|
|
7f0393b2b0 | ||
|
|
79c617e437 | ||
|
|
e5bd6186d5 | ||
|
|
2418c45159 | ||
|
|
3aa446cf19 | ||
|
|
23c1b14ca3 | ||
|
|
f1b0e5f0f7 | ||
|
|
535525e9d5 | ||
|
|
c54be726bf | ||
|
|
858c40b0a5 | ||
|
|
ed33bf5a15 | ||
|
|
1b793e3397 | ||
|
|
f58f4fbbf8 | ||
|
|
b1ed86ee7d | ||
|
|
5570b94ad1 | ||
|
|
5aab515bac | ||
|
|
9b45a66137 | ||
|
|
7658842bdd | ||
|
|
a96a865265 | ||
|
|
ad48645db9 | ||
|
|
80fed7135a | ||
|
|
5e003070cd | ||
|
|
a10a23e347 | ||
|
|
c2062c05cb | ||
|
|
2dd674b0b3 | ||
|
|
88e92bfd8c | ||
|
|
872888d957 | ||
|
|
dcbfabf657 | ||
|
|
75754718c1 | ||
|
|
97193065f6 | ||
|
|
a999487b8e | ||
|
|
c330fa6be1 | ||
|
|
5b9de09c11 | ||
|
|
01265c5934 | ||
|
|
4888656a72 | ||
|
|
4556559e53 | ||
|
|
900b752e61 | ||
|
|
174146b5d7 | ||
|
|
3387b5acb0 | ||
|
|
bf3eb0bfab | ||
|
|
9540cf9448 | ||
|
|
55ef4acea9 | ||
|
|
8e0f401bf3 | ||
|
|
99e13e5895 | ||
|
|
190b547373 | ||
|
|
eee4cb361c | ||
|
|
2420d62a33 | ||
|
|
3af0bbdbe4 | ||
|
|
bfa6661367 | ||
|
|
d79dfe2fc7 | ||
|
|
919b15b242 | ||
|
|
a469d8714d | ||
|
|
15d9d9a307 | ||
|
|
a8bd564cd1 | ||
|
|
a51cfbc625 | ||
|
|
d10fec81fa | ||
|
|
a0841c6e6c | ||
|
|
594f4b24f6 | ||
|
|
629d022e8a | ||
|
|
c5355a9ca4 | ||
|
|
0218efaae7 | ||
|
|
1533c4b604 | ||
|
|
b64596de0e | ||
|
|
9752af934e | ||
|
|
70d9300972 | ||
|
|
47ea28693a | ||
|
|
172eb4a977 | ||
|
|
666a7448a0 | ||
|
|
9ce3497183 | ||
|
|
2c963cc368 | ||
|
|
b0dfef48e9 | ||
|
|
c85923b17b | ||
|
|
5e8eb6253c | ||
|
|
833d136fb9 | ||
|
|
753f9d50ff | ||
|
|
9ad21838fe | ||
|
|
7b8de7884f | ||
|
|
b5b0f6a3ce | ||
|
|
44a605e766 | ||
|
|
939dfa6ac9 | ||
|
|
8c3a8a2e3b | ||
|
|
d58802af01 | ||
|
|
9593b0d09d | ||
|
|
f7d50cd9fa | ||
|
|
14a7d00037 | ||
|
|
94e75d2718 | ||
|
|
6fc2423ae3 | ||
|
|
da8cb77314 | ||
|
|
a87ce5bb77 | ||
|
|
a098d08750 | ||
|
|
ab879ca4b7 | ||
|
|
dde672c63d | ||
|
|
030bfb4568 | ||
|
|
149ef28071 | ||
|
|
16caf34800 | ||
|
|
666dde9f74 | ||
|
|
167be41621 | ||
|
|
a71edeea95 | ||
|
|
87c09368da | ||
|
|
72f23cbbef | ||
|
|
a3952be1cb | ||
|
|
fa7464ae44 | ||
|
|
e5cc1eacd7 | ||
|
|
60506eff9f | ||
|
|
b655feedde | ||
|
|
e04d57cddd | ||
|
|
739cec9ab9 | ||
|
|
0b03c797bc | ||
|
|
cec44805a5 | ||
|
|
a88a42799f | ||
|
|
36890a14bf | ||
|
|
dca98d404b | ||
|
|
db8c8afd74 | ||
|
|
125fa7c378 | ||
|
|
285fa4690c | ||
|
|
380bfe6984 | ||
|
|
badf4090c5 | ||
|
|
a3d179c2fa | ||
|
|
9564a5e113 | ||
|
|
ac4fce05cf | ||
|
|
fda48fd37d | ||
|
|
44e77dc741 | ||
|
|
5d03dd37d2 | ||
|
|
ba0c17ba53 | ||
|
|
cd421d8074 | ||
|
|
363e45508b | ||
|
|
b073477905 | ||
|
|
743d18cd98 | ||
|
|
80e0c4e388 | ||
|
|
6d8c8cd3f0 | ||
|
|
d57d529aa1 | ||
|
|
e470ee1f7f | ||
|
|
a360cd7e74 | ||
|
|
16ce033d86 | ||
|
|
81b0118730 | ||
|
|
35847e4ec4 | ||
|
|
1f7f71c6b9 | ||
|
|
06a04f14d8 | ||
|
|
c21bdaae52 | ||
|
|
44155bcc24 | ||
|
|
4d02ea9863 | ||
|
|
82742f3ea5 | ||
|
|
6dd83fb1b4 | ||
|
|
2bf30d8a1e | ||
|
|
0b1f351cba | ||
|
|
0975b60c72 | ||
|
|
e3d763acff | ||
|
|
51c612920d | ||
|
|
bfdc2dee9a | ||
|
|
3c5122b529 | ||
|
|
97cd98d5a2 | ||
|
|
bd4bf71c4b | ||
|
|
9598029620 | ||
|
|
d6e4fc27ad | ||
|
|
e4b3523947 | ||
|
|
ad75886941 | ||
|
|
83fef07f58 | ||
|
|
3134e13d87 | ||
|
|
48cc477e48 | ||
|
|
77e34565e6 | ||
|
|
dc4fe3f8c2 | ||
|
|
4698ec6b98 | ||
|
|
a6c4b8d764 | ||
|
|
92d4400d19 | ||
|
|
11c641748f | ||
|
|
6867c5eed4 | ||
|
|
5c6d272950 | ||
|
|
0f28564fea | ||
|
|
403dd2fa59 | ||
|
|
3ac330dff1 | ||
|
|
cbcdd39239 | ||
|
|
e79b0c0835 | ||
|
|
730cd1e0e3 | ||
|
|
c78254cd86 | ||
|
|
23776b90b9 | ||
|
|
8849095776 | ||
|
|
8f60e962de | ||
|
|
b100680f72 | ||
|
|
5d22785e5a | ||
|
|
3f635bc4aa | ||
|
|
17abd29d50 | ||
|
|
4699395425 | ||
|
|
33adfc35df | ||
|
|
4b21ebdba6 | ||
|
|
17d9a060d8 | ||
|
|
7d5aaa5aee | ||
|
|
67215bcec5 | ||
|
|
e381dce78c | ||
|
|
4d70bc0288 | ||
|
|
c90391a902 | ||
|
|
4e06c350bf | ||
|
|
3981555466 | ||
|
|
403d66c3c2 | ||
|
|
6ed2b259db | ||
|
|
2bedba2e17 | ||
|
|
ebf365841e | ||
|
|
e76d8cfbc2 | ||
|
|
71d2f01685 | ||
|
|
6f1e9b63c2 | ||
|
|
d0e3ca7671 | ||
|
|
61b4ea6d1b | ||
|
|
1805f081d3 | ||
|
|
17c6524b8d | ||
|
|
c7e1b86b52 | ||
|
|
51bde973a1 | ||
|
|
954bc36d76 | ||
|
|
c06c60b977 | ||
|
|
b9f1a89812 | ||
|
|
67d1d88ebd | ||
|
|
9ac1068f56 | ||
|
|
9a1d4a0d72 | ||
|
|
043a9ea068 | ||
|
|
256e61b64d | ||
|
|
b9f2792983 | ||
|
|
28cd1dbf98 | ||
|
|
e1ee65eb66 | ||
|
|
d19127b5a9 | ||
|
|
2cb1effd45 | ||
|
|
7b48bdf880 | ||
|
|
8f7d9ad2d7 | ||
|
|
6ef2280281 | ||
|
|
d26fa46b27 | ||
|
|
da19fa1992 | ||
|
|
ab05cf6a01 | ||
|
|
d08edf7801 | ||
|
|
fab0c5dd63 | ||
|
|
cefd025700 | ||
|
|
e0aa6389cf | ||
|
|
e2618a0d3e | ||
|
|
39dde3b803 | ||
|
|
ec41c1e9f3 | ||
|
|
098eff8c68 | ||
|
|
127588c624 | ||
|
|
e341596d4b | ||
|
|
84dd6084cf | ||
|
|
5ab4ba2db2 | ||
|
|
d9686ef25e | ||
|
|
01e42acfe4 | ||
|
|
9de97da5e3 | ||
|
|
81741bc3f6 | ||
|
|
9c5cf2b1f7 | ||
|
|
6bc7f95633 | ||
|
|
29c1c898ba | ||
|
|
79914bb6aa | ||
|
|
56bf460c97 | ||
|
|
c22b4c39a2 | ||
|
|
6d55c4fbe1 | ||
|
|
f76ec644bf | ||
|
|
66dfd11efe | ||
|
|
d4a3566b21 | ||
|
|
f04d9755bf | ||
|
|
bb8b0567ac | ||
|
|
dc58745f4c | ||
|
|
6505fea0b7 | ||
|
|
652a153b3c | ||
|
|
877283ec05 | ||
|
|
5772fae7c5 | ||
|
|
d6ced8bfac | ||
|
|
f138b13024 | ||
|
|
c31f63cf6c | ||
|
|
922fdc3c50 | ||
|
|
6e593fd678 | ||
|
|
57996bf005 | ||
|
|
7cb01f2379 | ||
|
|
1def3cecfa | ||
|
|
8f739cfcdd | ||
|
|
54cd677d27 | ||
|
|
7f3b7221fd | ||
|
|
667cefe391 | ||
|
|
e32ae33965 | ||
|
|
1f9c90f0e0 | ||
|
|
b017a3d167 | ||
|
|
8b4b30a846 | ||
|
|
d29f72ce10 | ||
|
|
7186d9b17e | ||
|
|
86924fffa5 | ||
|
|
fedc748e17 | ||
|
|
273e8f38d9 | ||
|
|
7187f079c8 | ||
|
|
77408f795e | ||
|
|
32f36a609e | ||
|
|
93c13aa97a | ||
|
|
f238a34bb0 | ||
|
|
644c287a24 | ||
|
|
abd9077cd2 | ||
|
|
fa22df9cb6 | ||
|
|
3f559ec4cb | ||
|
|
c9abcef3e5 | ||
|
|
36b3c70b25 | ||
|
|
1ac203195f | ||
|
|
1c937edcb1 | ||
|
|
248e18e2ba | ||
|
|
86cd069ca7 | ||
|
|
2e67b516d9 | ||
|
|
7c20e79c01 | ||
|
|
4a2c3eec10 | ||
|
|
09ae862403 | ||
|
|
ac2c8cab1f | ||
|
|
1f0c4b1454 | ||
|
|
b1a6cfb799 | ||
|
|
b3a67b84b9 | ||
|
|
513d62570f | ||
|
|
5a9aa65f49 | ||
|
|
e39c511444 | ||
|
|
6781279019 | ||
|
|
3f6ddf85e9 | ||
|
|
1a301e0133 | ||
|
|
32824f3736 | ||
|
|
604ba40bdb | ||
|
|
889b719b09 | ||
|
|
a5c122b309 | ||
|
|
e5b7613fc5 | ||
|
|
1c4e853484 | ||
|
|
adbcc22a64 | ||
|
|
d4434219cd | ||
|
|
3ea231ee5d | ||
|
|
2881e080f7 | ||
|
|
a287230baa | ||
|
|
37f4544e0f | ||
|
|
2a6b17ed5e | ||
|
|
98f37e9ea7 | ||
|
|
85ff193e53 | ||
|
|
54914358c7 | ||
|
|
1fa9a79c3d | ||
|
|
dfa76157c8 | ||
|
|
0382ae2c72 | ||
|
|
8ce9266733 | ||
|
|
7103ffcdf3 | ||
|
|
3a2511ec1a | ||
|
|
787b5be7af | ||
|
|
2f94951996 | ||
|
|
5066fc8757 | ||
|
|
4afc7b3dda | ||
|
|
1faffeca49 | ||
|
|
5092e710c5 |
12
.github/ISSUE_TEMPLATE/bug_report.md
vendored
12
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -7,11 +7,17 @@ assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug 简述**
|
||||
- **(1) Describe the bug 简述**
|
||||
|
||||
**Screen Shot 截图**
|
||||
|
||||
**Terminal Traceback 终端traceback(如果有)**
|
||||
- **(2) Screen Shot 截图**
|
||||
|
||||
|
||||
- **(3) Terminal Traceback 终端traceback(如有)**
|
||||
|
||||
|
||||
- **(4) Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)**
|
||||
|
||||
|
||||
|
||||
Before submitting an issue 提交issue之前:
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -55,6 +55,7 @@ coverage.xml
|
||||
*.pot
|
||||
github
|
||||
.github
|
||||
.idea/
|
||||
TEMP
|
||||
TRASH
|
||||
|
||||
@@ -132,6 +133,7 @@ dmypy.json
|
||||
.pyre/
|
||||
|
||||
.vscode
|
||||
.idea
|
||||
|
||||
history
|
||||
ssr_conf
|
||||
@@ -140,4 +142,5 @@ gpt_log
|
||||
private.md
|
||||
private_upload
|
||||
other_llms
|
||||
cradle.py
|
||||
cradle*
|
||||
debug*
|
||||
|
||||
@@ -4,10 +4,11 @@ RUN echo '[global]' > /etc/pip.conf && \
|
||||
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||
|
||||
RUN pip3 install gradio requests[socks] mdtex2html
|
||||
|
||||
COPY . /gpt
|
||||
WORKDIR /gpt
|
||||
COPY requirements.txt .
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
CMD ["python3", "main.py"]
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
|
||||
73
README.md
73
README.md
@@ -1,20 +1,16 @@
|
||||
|
||||
|
||||
# ChatGPT 学术优化
|
||||
|
||||
**如果喜欢这个项目,请给它一个Star;如果你发明了更好用的快捷键或函数插件,欢迎发issue或者pull requests(dev分支)**
|
||||
**如果喜欢这个项目,请给它一个Star;如果你发明了更好用的快捷键或函数插件,欢迎发issue或者pull requests**
|
||||
|
||||
If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request (to `dev` branch).
|
||||
If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a [README in English](img/README_EN.md) translated by this project itself.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> 1.请注意只有“红颜色”标识的函数插件(按钮)才支持读取文件。目前对pdf/word格式文件的支持插件正在逐步完善中,需要更多developer的帮助。
|
||||
> 1.请注意只有**红颜色**标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR!
|
||||
>
|
||||
> 2.本项目中每个文件的功能都在自译解[`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题汇总在[`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)当中。
|
||||
>
|
||||
> 3.如果您不太习惯部分中文命名的函数、注释或者界面,您可以随时点击相关函数插件,调用ChatGPT一键生成纯英文的项目源代码。
|
||||
>
|
||||
> 4.项目使用OpenAI的gpt-3.5-turbo模型,期待gpt-4早点放宽门槛😂
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
@@ -26,58 +22,59 @@ If you like this project, please give it a Star. If you've come up with more use
|
||||
[自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
|
||||
[配置代理服务器](https://www.bilibili.com/video/BV1rc411W7Dr) | 支持配置代理服务器
|
||||
模块化设计 | 支持自定义高阶的实验性功能与[函数插件],插件支持[热更新](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[自我程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] 一键读懂本项目的源代码
|
||||
[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] 一键可以剖析其他Python/C/C++/Java项目树
|
||||
[自我程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] [一键读懂](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)本项目的源代码
|
||||
[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] 一键可以剖析其他Python/C/C++/Java/Lua/...项目树
|
||||
读论文 | [函数插件] 一键解读latex论文全文并生成摘要
|
||||
Latex全文翻译、润色 | [函数插件] 一键翻译或润色latex论文
|
||||
批量注释生成 | [函数插件] 一键批量生成函数注释
|
||||
chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
|
||||
[arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
|
||||
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [函数插件] PDF论文提取题目&摘要+翻译全文(多线程)
|
||||
公式显示 | 可以同时显示公式的tex形式和渲染形式
|
||||
图片显示 | 可以在markdown中显示图片
|
||||
[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [函数插件] 给定任意谷歌学术搜索页面URL,让gpt帮你选择有趣的文章
|
||||
公式/图片/表格显示 | 可以同时显示公式的tex形式和渲染形式,支持公式、代码高亮
|
||||
多线程函数插件支持 | 支持多线调用chatgpt,一键处理海量文本或程序
|
||||
支持GPT输出的markdown表格 | 可以输出支持GPT的markdown表格
|
||||
启动暗色gradio[主题](https://github.com/binary-husky/chatgpt_academic/issues/173) | 在浏览器url后面添加```/?__dark-theme=true```可以切换dark主题
|
||||
[多LLM模型](https://www.bilibili.com/video/BV1EM411K7VH/)支持([v3.0分支](https://github.com/binary-husky/chatgpt_academic/tree/v3.0)) | 同时被ChatGPT和[清华ChatGLM](https://github.com/THUDM/ChatGLM-6B)伺候的感觉一定会很不错吧?
|
||||
兼容[TGUI](https://github.com/oobabooga/text-generation-webui)接入更多样的语言模型 | 接入opt-1.3b, galactica-1.3b等模型([v3.0分支](https://github.com/binary-husky/chatgpt_academic/tree/v3.0)测试中)
|
||||
huggingface免科学上网[在线体验](https://huggingface.co/spaces/qingxu98/gpt-academic) | 登陆huggingface后复制[此空间](https://huggingface.co/spaces/qingxu98/gpt-academic)
|
||||
…… | ……
|
||||
|
||||
</div>
|
||||
|
||||
<!-- - 新界面(左:master主分支, 右:dev开发前沿) -->
|
||||
- 新界面
|
||||
|
||||
- 新界面(修改config.py中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放粘贴板
|
||||
<div align="center">
|
||||
<img src="img/公式.gif" width="700" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- 润色/纠错
|
||||
<div align="center">
|
||||
<img src="img/润色.gif" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
- 支持GPT输出的markdown表格
|
||||
<div align="center">
|
||||
<img src="img/demo2.jpg" width="500" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- 如果输出包含公式,会同时以tex形式和渲染形式显示,方便复制和阅读
|
||||
<div align="center">
|
||||
<img src="img/demo.jpg" width="500" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
- 懒得看项目代码?整个工程直接给chatgpt炫嘴里
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 多种大语言模型混合调用([v3.0分支](https://github.com/binary-husky/chatgpt_academic/tree/v3.0)测试中)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231222778-34776885-a7f0-4f2c-b5f4-7cc2ef3ecb58.png" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
## 直接运行 (Windows, Linux or MacOS)
|
||||
|
||||
### 1. 下载项目
|
||||
@@ -159,13 +156,13 @@ input区域 输入 ./crazy_functions/test_project/python/dqn , 然后点击 "[
|
||||
```
|
||||
|
||||
## 其他部署方式
|
||||
|
||||
- 远程云服务器部署
|
||||
请访问[部署wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
- 使用WSL2(Windows Subsystem for Linux 子系统)
|
||||
请访问[部署wiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
- nginx远程部署
|
||||
请访问[部署wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E7%9A%84%E6%8C%87%E5%AF%BC)
|
||||
|
||||
|
||||
## 自定义新的便捷按钮(学术快捷键自定义)
|
||||
打开functional.py,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。)
|
||||
例如
|
||||
@@ -258,13 +255,10 @@ python check_proxy.py
|
||||
|
||||
## Todo 与 版本规划:
|
||||
|
||||
- version 3 (Todo):
|
||||
- - 支持gpt4和其他更多llm
|
||||
- version 2.3+ (Todo):
|
||||
- - 总结大工程源代码时文本过长、token溢出的问题
|
||||
- - 实现项目打包部署
|
||||
- - 函数插件参数接口优化
|
||||
- - 自更新
|
||||
- version 3.0 (Todo): 优化对chatglm和其他小型llm的支持
|
||||
- version 2.6: 重构了插件结构,提高了交互性,加入更多插件
|
||||
- version 2.5: 自更新,解决总结大工程源代码时文本过长、token溢出的问题
|
||||
- version 2.4: (1)新增PDF全文翻译功能; (2)新增输入区切换位置的功能; (3)新增垂直布局选项; (4)多线程函数插件优化。
|
||||
- version 2.3: 增强多线程交互性
|
||||
- version 2.2: 函数插件支持热重载
|
||||
- version 2.1: 可折叠式布局
|
||||
@@ -280,8 +274,7 @@ python check_proxy.py
|
||||
# 借鉴项目1:借鉴了ChuanhuChatGPT中读取OpenAI json的方法、记录历史问询记录的方法以及gradio queue的使用技巧
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# 借鉴项目2:借鉴了mdtex2html中公式处理的方法
|
||||
https://github.com/polarwinkel/mdtex2html
|
||||
|
||||
# 借鉴项目2:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
|
||||
```
|
||||
|
||||
131
check_proxy.py
131
check_proxy.py
@@ -20,31 +20,118 @@ def check_proxy(proxies):
|
||||
return result
|
||||
|
||||
|
||||
def auto_update():
|
||||
def backup_and_download(current_version, remote_version):
|
||||
"""
|
||||
一键更新协议:备份和下载
|
||||
"""
|
||||
from toolbox import get_conf
|
||||
import shutil
|
||||
import os
|
||||
import requests
|
||||
import time
|
||||
import json
|
||||
import zipfile
|
||||
os.makedirs(f'./history', exist_ok=True)
|
||||
backup_dir = f'./history/backup-{current_version}/'
|
||||
new_version_dir = f'./history/new-version-{remote_version}/'
|
||||
if os.path.exists(new_version_dir):
|
||||
return new_version_dir
|
||||
os.makedirs(new_version_dir)
|
||||
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
|
||||
proxies, = get_conf('proxies')
|
||||
response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version",
|
||||
proxies=proxies, timeout=1)
|
||||
remote_json_data = json.loads(response.text)
|
||||
remote_version = remote_json_data['version']
|
||||
if remote_json_data["show_feature"]:
|
||||
new_feature = "新功能:" + remote_json_data["new_feature"]
|
||||
else:
|
||||
new_feature = ""
|
||||
with open('./version', 'r', encoding='utf8') as f:
|
||||
current_version = f.read()
|
||||
current_version = json.loads(current_version)['version']
|
||||
if (remote_version - current_version) >= 0.05:
|
||||
print(
|
||||
f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}')
|
||||
print('Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
|
||||
time.sleep(3)
|
||||
return
|
||||
else:
|
||||
return
|
||||
r = requests.get(
|
||||
'https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
||||
zip_file_path = backup_dir+'/master.zip'
|
||||
with open(zip_file_path, 'wb+') as f:
|
||||
f.write(r.content)
|
||||
dst_path = new_version_dir
|
||||
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
|
||||
for zip_info in zip_ref.infolist():
|
||||
dst_file_path = os.path.join(dst_path, zip_info.filename)
|
||||
if os.path.exists(dst_file_path):
|
||||
os.remove(dst_file_path)
|
||||
zip_ref.extract(zip_info, dst_path)
|
||||
return new_version_dir
|
||||
|
||||
|
||||
def patch_and_restart(path):
|
||||
"""
|
||||
一键更新协议:覆盖和重启
|
||||
"""
|
||||
import distutils
|
||||
import shutil
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from colorful import print亮黄, print亮绿, print亮红
|
||||
# if not using config_private, move origin config.py as config_private.py
|
||||
if not os.path.exists('config_private.py'):
|
||||
print亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,',
|
||||
'另外您可以随时在history子文件夹下找回旧版的程序。')
|
||||
shutil.copyfile('config.py', 'config_private.py')
|
||||
distutils.dir_util.copy_tree(path+'/chatgpt_academic-master', './')
|
||||
import subprocess
|
||||
print亮绿('代码已经更新,即将更新pip包依赖……')
|
||||
for i in reversed(range(5)): time.sleep(1); print(i)
|
||||
try:
|
||||
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])
|
||||
except:
|
||||
print亮红('pip包依赖安装出现问题,需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。')
|
||||
print亮绿('更新完成,您可以随时在history子文件夹下找回旧版的程序,5s之后重启')
|
||||
print亮红('假如重启失败,您可能需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。')
|
||||
print(' ------------------------------ -----------------------------------')
|
||||
for i in reversed(range(8)): time.sleep(1); print(i)
|
||||
os.execl(sys.executable, sys.executable, *sys.argv)
|
||||
|
||||
|
||||
def get_current_version():
|
||||
import json
|
||||
try:
|
||||
with open('./version', 'r', encoding='utf8') as f:
|
||||
current_version = json.loads(f.read())['version']
|
||||
except:
|
||||
current_version = ""
|
||||
return current_version
|
||||
|
||||
|
||||
def auto_update():
|
||||
"""
|
||||
一键更新协议:查询版本和用户意见
|
||||
"""
|
||||
try:
|
||||
from toolbox import get_conf
|
||||
import requests
|
||||
import time
|
||||
import json
|
||||
proxies, = get_conf('proxies')
|
||||
response = requests.get(
|
||||
"https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=1)
|
||||
remote_json_data = json.loads(response.text)
|
||||
remote_version = remote_json_data['version']
|
||||
if remote_json_data["show_feature"]:
|
||||
new_feature = "新功能:" + remote_json_data["new_feature"]
|
||||
else:
|
||||
new_feature = ""
|
||||
with open('./version', 'r', encoding='utf8') as f:
|
||||
current_version = f.read()
|
||||
current_version = json.loads(current_version)['version']
|
||||
if (remote_version - current_version) >= 0.01:
|
||||
from colorful import print亮黄
|
||||
print亮黄(
|
||||
f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}')
|
||||
print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
|
||||
user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?')
|
||||
if user_instruction in ['Y', 'y']:
|
||||
path = backup_and_download(current_version, remote_version)
|
||||
try:
|
||||
patch_and_restart(path)
|
||||
except:
|
||||
print('更新失败。')
|
||||
else:
|
||||
print('自动更新程序:已禁用')
|
||||
return
|
||||
else:
|
||||
return
|
||||
except:
|
||||
print('自动更新程序:已禁用')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
91
colorful.py
Normal file
91
colorful.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import platform
|
||||
from sys import stdout
|
||||
|
||||
if platform.system()=="Linux":
|
||||
pass
|
||||
else:
|
||||
from colorama import init
|
||||
init()
|
||||
|
||||
# Do you like the elegance of Chinese characters?
|
||||
def print红(*kw,**kargs):
|
||||
print("\033[0;31m",*kw,"\033[0m",**kargs)
|
||||
def print绿(*kw,**kargs):
|
||||
print("\033[0;32m",*kw,"\033[0m",**kargs)
|
||||
def print黄(*kw,**kargs):
|
||||
print("\033[0;33m",*kw,"\033[0m",**kargs)
|
||||
def print蓝(*kw,**kargs):
|
||||
print("\033[0;34m",*kw,"\033[0m",**kargs)
|
||||
def print紫(*kw,**kargs):
|
||||
print("\033[0;35m",*kw,"\033[0m",**kargs)
|
||||
def print靛(*kw,**kargs):
|
||||
print("\033[0;36m",*kw,"\033[0m",**kargs)
|
||||
|
||||
def print亮红(*kw,**kargs):
|
||||
print("\033[1;31m",*kw,"\033[0m",**kargs)
|
||||
def print亮绿(*kw,**kargs):
|
||||
print("\033[1;32m",*kw,"\033[0m",**kargs)
|
||||
def print亮黄(*kw,**kargs):
|
||||
print("\033[1;33m",*kw,"\033[0m",**kargs)
|
||||
def print亮蓝(*kw,**kargs):
|
||||
print("\033[1;34m",*kw,"\033[0m",**kargs)
|
||||
def print亮紫(*kw,**kargs):
|
||||
print("\033[1;35m",*kw,"\033[0m",**kargs)
|
||||
def print亮靛(*kw,**kargs):
|
||||
print("\033[1;36m",*kw,"\033[0m",**kargs)
|
||||
|
||||
|
||||
|
||||
def print亮红(*kw,**kargs):
|
||||
print("\033[1;31m",*kw,"\033[0m",**kargs)
|
||||
def print亮绿(*kw,**kargs):
|
||||
print("\033[1;32m",*kw,"\033[0m",**kargs)
|
||||
def print亮黄(*kw,**kargs):
|
||||
print("\033[1;33m",*kw,"\033[0m",**kargs)
|
||||
def print亮蓝(*kw,**kargs):
|
||||
print("\033[1;34m",*kw,"\033[0m",**kargs)
|
||||
def print亮紫(*kw,**kargs):
|
||||
print("\033[1;35m",*kw,"\033[0m",**kargs)
|
||||
def print亮靛(*kw,**kargs):
|
||||
print("\033[1;36m",*kw,"\033[0m",**kargs)
|
||||
|
||||
print_red = print红
|
||||
print_green = print绿
|
||||
print_yellow = print黄
|
||||
print_blue = print蓝
|
||||
print_purple = print紫
|
||||
print_indigo = print靛
|
||||
|
||||
print_bold_red = print亮红
|
||||
print_bold_green = print亮绿
|
||||
print_bold_yellow = print亮黄
|
||||
print_bold_blue = print亮蓝
|
||||
print_bold_purple = print亮紫
|
||||
print_bold_indigo = print亮靛
|
||||
|
||||
if not stdout.isatty():
|
||||
# redirection, avoid a fucked up log file
|
||||
print红 = print
|
||||
print绿 = print
|
||||
print黄 = print
|
||||
print蓝 = print
|
||||
print紫 = print
|
||||
print靛 = print
|
||||
print亮红 = print
|
||||
print亮绿 = print
|
||||
print亮黄 = print
|
||||
print亮蓝 = print
|
||||
print亮紫 = print
|
||||
print亮靛 = print
|
||||
print_red = print
|
||||
print_green = print
|
||||
print_yellow = print
|
||||
print_blue = print
|
||||
print_purple = print
|
||||
print_indigo = print
|
||||
print_bold_red = print
|
||||
print_bold_green = print
|
||||
print_bold_yellow = print
|
||||
print_bold_blue = print
|
||||
print_bold_purple = print
|
||||
print_bold_indigo = print
|
||||
10
config.py
10
config.py
@@ -19,16 +19,24 @@ if USE_PROXY:
|
||||
else:
|
||||
proxies = None
|
||||
|
||||
# 多线程函数插件中,默认允许多少路线程同时访问OpenAI。
|
||||
# Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次。提高限制请查询:
|
||||
# https://platform.openai.com/docs/guides/rate-limits/overview
|
||||
DEFAULT_WORKER_NUM = 3
|
||||
|
||||
|
||||
# [step 3]>> 以下配置可以优化体验,但大部分场合下并不需要修改
|
||||
# 对话窗的高度
|
||||
CHATBOT_HEIGHT = 1115
|
||||
|
||||
# 代码高亮
|
||||
CODE_HIGHLIGHT = True
|
||||
|
||||
# 窗口布局
|
||||
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
||||
|
||||
# 发送请求到OpenAI后,等待多久判定为超时
|
||||
TIMEOUT_SECONDS = 25
|
||||
TIMEOUT_SECONDS = 30
|
||||
|
||||
# 网页的端口, -1代表随机端口
|
||||
WEB_PORT = -1
|
||||
|
||||
@@ -56,7 +56,7 @@ def get_core_functions():
|
||||
"Color": "secondary",
|
||||
},
|
||||
"英译中": {
|
||||
"Prefix": r"请翻译成中文:" + "\n\n",
|
||||
"Prefix": r"翻译成地道的中文:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
},
|
||||
"找图片": {
|
||||
|
||||
@@ -15,12 +15,10 @@ def get_crazy_functions():
|
||||
from crazy_functions.解析项目源代码 import 解析一个Rect项目
|
||||
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
||||
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
||||
|
||||
from crazy_functions.Latex全文润色 import Latex英文润色
|
||||
from crazy_functions.解析项目源代码 import 解析一个Lua项目
|
||||
function_plugins = {
|
||||
"请解析并解构此项目本身(源码自译解)": {
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析项目本身)
|
||||
},
|
||||
|
||||
"解析整个Python项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(解析一个Python项目)
|
||||
@@ -29,7 +27,7 @@ def get_crazy_functions():
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(解析一个C项目的头文件)
|
||||
},
|
||||
"解析整个C++项目(.cpp/.h)": {
|
||||
"解析整个C++项目(.cpp/.hpp/.c/.h)": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个C项目)
|
||||
@@ -49,6 +47,11 @@ def get_crazy_functions():
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Rect项目)
|
||||
},
|
||||
"解析整个Lua项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Lua项目)
|
||||
},
|
||||
"读Tex论文写摘要": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(读文章写摘要)
|
||||
@@ -57,14 +60,19 @@ def get_crazy_functions():
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(批量生成函数注释)
|
||||
},
|
||||
"[多线程Demo] 解析此项目本身(源码自译解)": {
|
||||
"Function": HotReload(解析项目本身)
|
||||
},
|
||||
"[多线程demo] 把本项目源代码切换成全英文": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(全项目切换英文)
|
||||
},
|
||||
"[函数插件模板demo] 历史上的今天": {
|
||||
"[函数插件模板Demo] 历史上的今天": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Function": HotReload(高阶功能模板函数)
|
||||
},
|
||||
|
||||
}
|
||||
###################### 第二组插件 ###########################
|
||||
# [第二组插件]: 经过充分测试,但功能上距离达到完美状态还差一点点
|
||||
@@ -72,6 +80,13 @@ def get_crazy_functions():
|
||||
from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer
|
||||
from crazy_functions.总结word文档 import 总结word文档
|
||||
from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
|
||||
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
||||
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
||||
from crazy_functions.Latex全文润色 import Latex中文润色
|
||||
from crazy_functions.Latex全文翻译 import Latex中译英
|
||||
from crazy_functions.Latex全文翻译 import Latex英译中
|
||||
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||
from crazy_functions.批量Markdown翻译 import Markdown英译中
|
||||
|
||||
function_plugins.update({
|
||||
"批量翻译PDF文档(多线程)": {
|
||||
@@ -79,21 +94,69 @@ def get_crazy_functions():
|
||||
"AsButton": True, # 加入下拉菜单中
|
||||
"Function": HotReload(批量翻译PDF文档)
|
||||
},
|
||||
"[仅供开发调试] 批量总结PDF文档": {
|
||||
"[测试功能] 批量总结PDF文档": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Function": HotReload(批量总结PDF文档)
|
||||
},
|
||||
"[仅供开发调试] 批量总结PDF文档pdfminer": {
|
||||
"[测试功能] 批量总结PDF文档pdfminer": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(批量总结PDF文档pdfminer)
|
||||
},
|
||||
"谷歌学术检索助手(输入谷歌学术搜索页url)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(谷歌检索小助手)
|
||||
},
|
||||
"批量总结Word文档": {
|
||||
"Color": "stop",
|
||||
"Function": HotReload(总结word文档)
|
||||
},
|
||||
"理解PDF文档内容 (模仿ChatPDF)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(理解PDF文档内容标准文件输入)
|
||||
},
|
||||
"[测试功能] 英文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex英文润色)
|
||||
},
|
||||
"[测试功能] 中文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex中文润色)
|
||||
},
|
||||
"[测试功能] Latex项目全文中译英(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex中译英)
|
||||
},
|
||||
"[测试功能] Latex项目全文英译中(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex英译中)
|
||||
},
|
||||
"[测试功能] 批量Markdown中译英(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Markdown中译英)
|
||||
},
|
||||
"[测试功能] 批量Markdown英译中(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Markdown英译中)
|
||||
},
|
||||
|
||||
})
|
||||
|
||||
###################### 第三组插件 ###########################
|
||||
@@ -110,6 +173,8 @@ def get_crazy_functions():
|
||||
|
||||
except Exception as err:
|
||||
print(f'[下载arxiv论文并翻译摘要] 插件导入失败 {str(err)}')
|
||||
|
||||
|
||||
|
||||
###################### 第n组插件 ###########################
|
||||
return function_plugins
|
||||
|
||||
176
crazy_functions/Latex全文润色.py
Normal file
176
crazy_functions/Latex全文润色.py
Normal file
@@ -0,0 +1,176 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
fast_debug = False
|
||||
|
||||
class PaperFileGroup():
|
||||
def __init__(self):
|
||||
self.file_paths = []
|
||||
self.file_contents = []
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
import tiktoken
|
||||
from toolbox import get_conf
|
||||
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
"""
|
||||
for index, file_content in enumerate(self.file_contents):
|
||||
if self.get_token_num(file_content) < max_token_limit:
|
||||
self.sp_file_contents.append(file_content)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||
|
||||
print('Segmentation: done')
|
||||
|
||||
def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
|
||||
import time, os, re
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
|
||||
# <-------- 读取Latex文件,删除其中的所有注释 ---------->
|
||||
pfg = PaperFileGroup()
|
||||
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
# 定义注释的正则表达式
|
||||
comment_pattern = r'%.*'
|
||||
# 使用正则表达式查找注释,并替换为空字符串
|
||||
clean_tex_content = re.sub(comment_pattern, '', file_content)
|
||||
# 记录删除注释后的文本
|
||||
pfg.file_paths.append(fp)
|
||||
pfg.file_contents.append(clean_tex_content)
|
||||
|
||||
# <-------- 拆分过长的latex文件 ---------->
|
||||
pfg.run_file_split(max_token_limit=1024)
|
||||
n_split = len(pfg.sp_file_contents)
|
||||
|
||||
# <-------- 抽取摘要 ---------->
|
||||
# if language == 'en':
|
||||
# abs_extract_inputs = f"Please write an abstract for this paper"
|
||||
|
||||
# # 单线,获取文章meta信息
|
||||
# paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
# inputs=abs_extract_inputs,
|
||||
# inputs_show_user=f"正在抽取摘要信息。",
|
||||
# llm_kwargs=llm_kwargs,
|
||||
# chatbot=chatbot, history=[],
|
||||
# sys_prompt="Your job is to collect information from materials。",
|
||||
# )
|
||||
|
||||
# <-------- 多线程润色开始 ---------->
|
||||
if language == 'en':
|
||||
inputs_array = ["Below is a section from an academic paper, polish this section to meet the academic standard, improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"Polish {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
||||
elif language == 'zh':
|
||||
inputs_array = [f"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array=["你是一位专业的中文学术论文作家。" for _ in range(n_split)]
|
||||
|
||||
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=inputs_array,
|
||||
inputs_show_user_array=inputs_show_user_array,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[""] for _ in range(n_split)],
|
||||
sys_prompt_array=sys_prompt_array,
|
||||
# max_workers=5, # 并行任务数量限制,最多同时执行5个,其他的排队等待
|
||||
scroller_max_len = 80
|
||||
)
|
||||
|
||||
# <-------- 整理结果,退出 ---------->
|
||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
||||
history = gpt_response_collection
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh')
|
||||
176
crazy_functions/Latex全文翻译.py
Normal file
176
crazy_functions/Latex全文翻译.py
Normal file
@@ -0,0 +1,176 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
fast_debug = False
|
||||
|
||||
class PaperFileGroup():
|
||||
def __init__(self):
|
||||
self.file_paths = []
|
||||
self.file_contents = []
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
import tiktoken
|
||||
from toolbox import get_conf
|
||||
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
"""
|
||||
for index, file_content in enumerate(self.file_contents):
|
||||
if self.get_token_num(file_content) < max_token_limit:
|
||||
self.sp_file_contents.append(file_content)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||
|
||||
print('Segmentation: done')
|
||||
|
||||
def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
|
||||
import time, os, re
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
# <-------- 读取Latex文件,删除其中的所有注释 ---------->
|
||||
pfg = PaperFileGroup()
|
||||
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
# 定义注释的正则表达式
|
||||
comment_pattern = r'%.*'
|
||||
# 使用正则表达式查找注释,并替换为空字符串
|
||||
clean_tex_content = re.sub(comment_pattern, '', file_content)
|
||||
# 记录删除注释后的文本
|
||||
pfg.file_paths.append(fp)
|
||||
pfg.file_contents.append(clean_tex_content)
|
||||
|
||||
# <-------- 拆分过长的latex文件 ---------->
|
||||
pfg.run_file_split(max_token_limit=1024)
|
||||
n_split = len(pfg.sp_file_contents)
|
||||
|
||||
# <-------- 抽取摘要 ---------->
|
||||
# if language == 'en':
|
||||
# abs_extract_inputs = f"Please write an abstract for this paper"
|
||||
|
||||
# # 单线,获取文章meta信息
|
||||
# paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
# inputs=abs_extract_inputs,
|
||||
# inputs_show_user=f"正在抽取摘要信息。",
|
||||
# llm_kwargs=llm_kwargs,
|
||||
# chatbot=chatbot, history=[],
|
||||
# sys_prompt="Your job is to collect information from materials。",
|
||||
# )
|
||||
|
||||
# <-------- 多线程润色开始 ---------->
|
||||
if language == 'en->zh':
|
||||
inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||
elif language == 'zh->en':
|
||||
inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=inputs_array,
|
||||
inputs_show_user_array=inputs_show_user_array,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[""] for _ in range(n_split)],
|
||||
sys_prompt_array=sys_prompt_array,
|
||||
# max_workers=5, # OpenAI所允许的最大并行过载
|
||||
scroller_max_len = 80
|
||||
)
|
||||
|
||||
# <-------- 整理结果,退出 ---------->
|
||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
||||
history = gpt_response_collection
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
||||
@@ -1,19 +1,120 @@
|
||||
import traceback
|
||||
from toolbox import update_ui, get_conf
|
||||
|
||||
def input_clipping(inputs, history, max_token_limit):
|
||||
import tiktoken
|
||||
import numpy as np
|
||||
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
|
||||
def request_gpt_model_in_new_thread_with_ui_alive(inputs, inputs_show_user, top_p, temperature, chatbot, history, sys_prompt, refresh_interval=0.2):
|
||||
mode = 'input-and-history'
|
||||
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
|
||||
input_token_num = get_token_num(inputs)
|
||||
if input_token_num < max_token_limit//2:
|
||||
mode = 'only-history'
|
||||
max_token_limit = max_token_limit - input_token_num
|
||||
|
||||
everything = [inputs] if mode == 'input-and-history' else ['']
|
||||
everything.extend(history)
|
||||
n_token = get_token_num('\n'.join(everything))
|
||||
everything_token = [get_token_num(e) for e in everything]
|
||||
delta = max(everything_token) // 16 # 截断时的颗粒度
|
||||
|
||||
while n_token > max_token_limit:
|
||||
where = np.argmax(everything_token)
|
||||
encoded = enc.encode(everything[where], disallowed_special=())
|
||||
clipped_encoded = encoded[:len(encoded)-delta]
|
||||
everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
|
||||
everything_token[where] = get_token_num(everything[where])
|
||||
n_token = get_token_num('\n'.join(everything))
|
||||
|
||||
if mode == 'input-and-history':
|
||||
inputs = everything[0]
|
||||
else:
|
||||
pass
|
||||
history = everything[1:]
|
||||
return inputs, history
|
||||
|
||||
def request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs, inputs_show_user, llm_kwargs,
|
||||
chatbot, history, sys_prompt, refresh_interval=0.2,
|
||||
handle_token_exceed=True,
|
||||
retry_times_at_unknown_error=2,
|
||||
):
|
||||
"""
|
||||
Request GPT model,请求GPT模型同时维持用户界面活跃。
|
||||
|
||||
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
|
||||
inputs (string): List of inputs (输入)
|
||||
inputs_show_user (string): List of inputs to show user(展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
|
||||
top_p (float): Top p value for sampling from model distribution (GPT参数,浮点数)
|
||||
temperature (float): Temperature value for sampling from model distribution(GPT参数,浮点数)
|
||||
chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
|
||||
history (list): List of chat history (历史,对话历史列表)
|
||||
sys_prompt (string): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
|
||||
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
|
||||
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
|
||||
retry_times_at_unknown_error:失败时的重试次数
|
||||
|
||||
输出 Returns:
|
||||
future: 输出,GPT返回的结果
|
||||
"""
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from request_llm.bridge_chatgpt import predict_no_ui_long_connection
|
||||
# 用户反馈
|
||||
chatbot.append([inputs_show_user, ""])
|
||||
msg = '正常'
|
||||
yield chatbot, [], msg
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
executor = ThreadPoolExecutor(max_workers=16)
|
||||
mutable = ["", time.time()]
|
||||
future = executor.submit(lambda:
|
||||
predict_no_ui_long_connection(
|
||||
inputs=inputs, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt, observe_window=mutable)
|
||||
)
|
||||
def _req_gpt(inputs, history, sys_prompt):
|
||||
retry_op = retry_times_at_unknown_error
|
||||
exceeded_cnt = 0
|
||||
while True:
|
||||
# watchdog error
|
||||
if len(mutable) >= 2 and (time.time()-mutable[1]) > 5:
|
||||
raise RuntimeError("检测到程序终止。")
|
||||
try:
|
||||
# 【第一种情况】:顺利完成
|
||||
result = predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs,
|
||||
history=history, sys_prompt=sys_prompt, observe_window=mutable)
|
||||
return result
|
||||
except ConnectionAbortedError as token_exceeded_error:
|
||||
# 【第二种情况】:Token溢出
|
||||
if handle_token_exceed:
|
||||
exceeded_cnt += 1
|
||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||
from toolbox import get_reduce_token_percent
|
||||
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
||||
MAX_TOKEN = 4096
|
||||
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
||||
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
||||
mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
||||
continue # 返回重试
|
||||
else:
|
||||
# 【选择放弃】
|
||||
tb_str = '```\n' + traceback.format_exc() + '```'
|
||||
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||
return mutable[0] # 放弃
|
||||
except:
|
||||
# 【第三种情况】:其他错误:重试几次
|
||||
tb_str = '```\n' + traceback.format_exc() + '```'
|
||||
print(tb_str)
|
||||
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||
if retry_op > 0:
|
||||
retry_op -= 1
|
||||
mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}:\n\n"
|
||||
if "Rate limit reached" in tb_str:
|
||||
time.sleep(30)
|
||||
time.sleep(5)
|
||||
continue # 返回重试
|
||||
else:
|
||||
time.sleep(5)
|
||||
return mutable[0] # 放弃
|
||||
|
||||
# 提交任务
|
||||
future = executor.submit(_req_gpt, inputs, history, sys_prompt)
|
||||
while True:
|
||||
# yield一次以刷新前端页面
|
||||
time.sleep(refresh_interval)
|
||||
@@ -22,32 +123,130 @@ def request_gpt_model_in_new_thread_with_ui_alive(inputs, inputs_show_user, top_
|
||||
if future.done():
|
||||
break
|
||||
chatbot[-1] = [chatbot[-1][0], mutable[0]]
|
||||
msg = "正常"
|
||||
yield chatbot, [], msg
|
||||
return future.result()
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
|
||||
final_result = future.result()
|
||||
chatbot[-1] = [chatbot[-1][0], final_result]
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
|
||||
return final_result
|
||||
|
||||
|
||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(inputs_array, inputs_show_user_array, top_p, temperature, chatbot, history_array, sys_prompt_array, refresh_interval=0.2, max_workers=10, scroller_max_len=30):
|
||||
import time
|
||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array, inputs_show_user_array, llm_kwargs,
|
||||
chatbot, history_array, sys_prompt_array,
|
||||
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
|
||||
handle_token_exceed=True, show_user_at_complete=False,
|
||||
retry_times_at_unknown_error=2,
|
||||
):
|
||||
"""
|
||||
Request GPT model using multiple threads with UI and high efficiency
|
||||
请求GPT模型的[多线程]版。
|
||||
具备以下功能:
|
||||
实时在UI上反馈远程数据流
|
||||
使用线程池,可调节线程池的大小避免openai的流量限制错误
|
||||
处理中途中止的情况
|
||||
网络等出问题时,会把traceback和已经接收的数据转入输出
|
||||
|
||||
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
|
||||
inputs_array (list): List of inputs (每个子任务的输入)
|
||||
inputs_show_user_array (list): List of inputs to show user(每个子任务展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
|
||||
llm_kwargs: llm_kwargs参数
|
||||
chatbot: chatbot (用户界面对话窗口句柄,用于数据流可视化)
|
||||
history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史)
|
||||
sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
|
||||
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
|
||||
max_workers (int, optional): Maximum number of threads (default: see config.py) (最大线程数,如果子任务非常多,需要用此选项防止高频地请求openai导致错误)
|
||||
scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果)
|
||||
handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本)
|
||||
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
|
||||
show_user_at_complete (bool, optional): (在结束时,把完整输入-输出结果显示在聊天框)
|
||||
retry_times_at_unknown_error:子任务失败时的重试次数
|
||||
|
||||
输出 Returns:
|
||||
list: List of GPT model responses (每个子任务的输出汇总,如果某个子任务出错,response中会携带traceback报错信息,方便调试和定位问题。)
|
||||
"""
|
||||
import time, random
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from request_llm.bridge_chatgpt import predict_no_ui_long_connection
|
||||
assert len(inputs_array) == len(history_array)
|
||||
assert len(inputs_array) == len(sys_prompt_array)
|
||||
if max_workers == -1: # 读取配置文件
|
||||
try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
|
||||
except: max_workers = 8
|
||||
if max_workers <= 0 or max_workers >= 20: max_workers = 8
|
||||
executor = ThreadPoolExecutor(max_workers=max_workers)
|
||||
n_frag = len(inputs_array)
|
||||
# 用户反馈
|
||||
chatbot.append(["请开始多线程操作。", ""])
|
||||
msg = '正常'
|
||||
yield chatbot, [], msg
|
||||
# 异步原子
|
||||
mutable = [["", time.time()] for _ in range(n_frag)]
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
# 跨线程传递
|
||||
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
|
||||
|
||||
# 子线程任务
|
||||
def _req_gpt(index, inputs, history, sys_prompt):
|
||||
gpt_say = predict_no_ui_long_connection(
|
||||
inputs=inputs, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt, observe_window=mutable[
|
||||
index]
|
||||
)
|
||||
return gpt_say
|
||||
gpt_say = ""
|
||||
retry_op = retry_times_at_unknown_error
|
||||
exceeded_cnt = 0
|
||||
mutable[index][2] = "执行中"
|
||||
while True:
|
||||
# watchdog error
|
||||
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5:
|
||||
raise RuntimeError("检测到程序终止。")
|
||||
try:
|
||||
# 【第一种情况】:顺利完成
|
||||
# time.sleep(10); raise RuntimeError("测试")
|
||||
gpt_say = predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
||||
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
|
||||
)
|
||||
mutable[index][2] = "已成功"
|
||||
return gpt_say
|
||||
except ConnectionAbortedError as token_exceeded_error:
|
||||
# 【第二种情况】:Token溢出,
|
||||
if handle_token_exceed:
|
||||
exceeded_cnt += 1
|
||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||
from toolbox import get_reduce_token_percent
|
||||
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
||||
MAX_TOKEN = 4096
|
||||
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
||||
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
||||
gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
||||
mutable[index][2] = f"截断重试"
|
||||
continue # 返回重试
|
||||
else:
|
||||
# 【选择放弃】
|
||||
tb_str = '```\n' + traceback.format_exc() + '```'
|
||||
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
|
||||
mutable[index][2] = "输入过长已放弃"
|
||||
return gpt_say # 放弃
|
||||
except:
|
||||
# 【第三种情况】:其他错误
|
||||
tb_str = '```\n' + traceback.format_exc() + '```'
|
||||
print(tb_str)
|
||||
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
|
||||
if retry_op > 0:
|
||||
retry_op -= 1
|
||||
wait = random.randint(5, 20)
|
||||
if "Rate limit reached" in tb_str:
|
||||
wait = wait * 3
|
||||
fail_info = "OpenAI请求速率限制 "
|
||||
else:
|
||||
fail_info = ""
|
||||
# 也许等待十几秒后,情况会好转
|
||||
for i in range(wait):
|
||||
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
|
||||
# 开始重试
|
||||
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
|
||||
continue # 返回重试
|
||||
else:
|
||||
mutable[index][2] = "已失败"
|
||||
wait = 5
|
||||
time.sleep(5)
|
||||
return gpt_say # 放弃
|
||||
|
||||
# 异步任务开始
|
||||
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
|
||||
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
|
||||
@@ -71,17 +270,27 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(inp
|
||||
replace('\n', '').replace('```', '...').replace(
|
||||
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
||||
observe_win.append(print_something_really_funny)
|
||||
stat_str = ''.join([f'执行中: {obs}\n\n' if not done else '已完成\n\n' for done, obs in zip(
|
||||
worker_done, observe_win)])
|
||||
chatbot[-1] = [chatbot[-1][0],
|
||||
f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
|
||||
msg = "正常"
|
||||
yield chatbot, [], msg
|
||||
# 在前端打印些好玩的东西
|
||||
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||
if not done else f'`{mutable[thread_index][2]}`\n\n'
|
||||
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
|
||||
# 在前端打印些好玩的东西
|
||||
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
|
||||
# 异步任务结束
|
||||
gpt_response_collection = []
|
||||
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
||||
gpt_res = f.result()
|
||||
gpt_response_collection.extend([inputs_show_user, gpt_res])
|
||||
|
||||
# 是否在结束时,在界面上显示结果
|
||||
if show_user_at_complete:
|
||||
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
||||
gpt_res = f.result()
|
||||
chatbot.append([inputs_show_user, gpt_res])
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
time.sleep(0.3)
|
||||
return gpt_response_collection
|
||||
|
||||
|
||||
@@ -103,7 +312,6 @@ def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
if cnt == 0:
|
||||
print('what the fuck ?')
|
||||
raise RuntimeError("存在一行极长的文本!")
|
||||
# print(len(post))
|
||||
# 列表递归接龙
|
||||
@@ -116,8 +324,18 @@ def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
|
||||
return cut(txt, must_break_at_empty_line=False)
|
||||
|
||||
|
||||
def force_breakdown(txt, limit, get_token_fn):
|
||||
"""
|
||||
当无法用标点、空行分割时,我们用最暴力的方法切割
|
||||
"""
|
||||
for i in reversed(range(len(txt))):
|
||||
if get_token_fn(txt[:i]) < limit:
|
||||
return txt[:i], txt[i:]
|
||||
return "Tiktoken未知错误", "Tiktoken未知错误"
|
||||
|
||||
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
||||
def cut(txt_tocut, must_break_at_empty_line): # 递归
|
||||
# 递归
|
||||
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
|
||||
if get_token_fn(txt_tocut) <= limit:
|
||||
return [txt_tocut]
|
||||
else:
|
||||
@@ -129,25 +347,215 @@ def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
||||
if must_break_at_empty_line:
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
print(cnt)
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
if cnt == 0:
|
||||
# print('what the fuck ? 存在一行极长的文本!')
|
||||
raise RuntimeError("存在一行极长的文本!")
|
||||
if break_anyway:
|
||||
prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
|
||||
else:
|
||||
raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
|
||||
# print(len(post))
|
||||
# 列表递归接龙
|
||||
result = [prev]
|
||||
result.extend(cut(post, must_break_at_empty_line))
|
||||
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
|
||||
return result
|
||||
try:
|
||||
# 第1次尝试,将双空行(\n\n)作为切分点
|
||||
return cut(txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第2次尝试,将单空行(\n)作为切分点
|
||||
return cut(txt, must_break_at_empty_line=False)
|
||||
except RuntimeError:
|
||||
# 这个中文的句号是故意的,作为一个标识而存在
|
||||
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False)
|
||||
return [r.replace('。\n', '.') for r in res]
|
||||
try:
|
||||
# 第3次尝试,将英文句号(.)作为切分点
|
||||
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
|
||||
return [r.replace('。\n', '.') for r in res]
|
||||
except RuntimeError as e:
|
||||
try:
|
||||
# 第4次尝试,将中文句号(。)作为切分点
|
||||
res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
|
||||
return [r.replace('。。\n', '。') for r in res]
|
||||
except RuntimeError as e:
|
||||
# 第5次尝试,没办法了,随便切一下敷衍吧
|
||||
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
|
||||
|
||||
|
||||
|
||||
def read_and_clean_pdf_text(fp):
|
||||
"""
|
||||
这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好
|
||||
|
||||
**输入参数说明**
|
||||
- `fp`:需要读取和清理文本的pdf文件路径
|
||||
|
||||
**输出参数说明**
|
||||
- `meta_txt`:清理后的文本内容字符串
|
||||
- `page_one_meta`:第一页清理后的文本内容列表
|
||||
|
||||
**函数功能**
|
||||
读取pdf文件并清理其中的文本内容,清理规则包括:
|
||||
- 提取所有块元的文本信息,并合并为一个字符串
|
||||
- 去除短块(字符数小于100)并替换为回车符
|
||||
- 清理多余的空行
|
||||
- 合并小写字母开头的段落块并替换为空格
|
||||
- 清除重复的换行
|
||||
- 将每个换行符替换为两个换行符,使每个段落之间有两个换行符分隔
|
||||
"""
|
||||
import fitz, copy
|
||||
import re
|
||||
import numpy as np
|
||||
from colorful import print亮黄, print亮绿
|
||||
fc = 0 # Index 0 文本
|
||||
fs = 1 # Index 1 字体
|
||||
fb = 2 # Index 2 框框
|
||||
REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
|
||||
REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的?时,判定为不是正文(有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
|
||||
def primary_ffsize(l):
|
||||
"""
|
||||
提取文本块主字体
|
||||
"""
|
||||
fsize_statiscs = {}
|
||||
for wtf in l['spans']:
|
||||
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
|
||||
fsize_statiscs[wtf['size']] += len(wtf['text'])
|
||||
return max(fsize_statiscs, key=fsize_statiscs.get)
|
||||
|
||||
def ffsize_same(a,b):
|
||||
"""
|
||||
提取字体大小是否近似相等
|
||||
"""
|
||||
return abs((a-b)/max(a,b)) < 0.02
|
||||
|
||||
with fitz.open(fp) as doc:
|
||||
meta_txt = []
|
||||
meta_font = []
|
||||
|
||||
meta_line = []
|
||||
meta_span = []
|
||||
############################## <第 1 步,搜集初始信息> ##################################
|
||||
for index, page in enumerate(doc):
|
||||
# file_content += page.get_text()
|
||||
text_areas = page.get_text("dict") # 获取页面上的文本信息
|
||||
for t in text_areas['blocks']:
|
||||
if 'lines' in t:
|
||||
pf = 998
|
||||
for l in t['lines']:
|
||||
txt_line = "".join([wtf['text'] for wtf in l['spans']])
|
||||
pf = primary_ffsize(l)
|
||||
meta_line.append([txt_line, pf, l['bbox'], l])
|
||||
for wtf in l['spans']: # for l in t['lines']:
|
||||
meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
|
||||
# meta_line.append(["NEW_BLOCK", pf])
|
||||
# 块元提取 for each word segment with in line for each line cross-line words for each block
|
||||
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
||||
'- ', '') for t in text_areas['blocks'] if 'lines' in t])
|
||||
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
|
||||
for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
|
||||
if index == 0:
|
||||
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
||||
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
|
||||
|
||||
############################## <第 2 步,获取正文主字体> ##################################
|
||||
fsize_statiscs = {}
|
||||
for span in meta_span:
|
||||
if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
|
||||
fsize_statiscs[span[1]] += span[2]
|
||||
main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
|
||||
if REMOVE_FOOT_NOTE:
|
||||
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
|
||||
|
||||
############################## <第 3 步,切分和重新整合> ##################################
|
||||
mega_sec = []
|
||||
sec = []
|
||||
for index, line in enumerate(meta_line):
|
||||
if index == 0:
|
||||
sec.append(line[fc])
|
||||
continue
|
||||
if REMOVE_FOOT_NOTE:
|
||||
if meta_line[index][fs] <= give_up_fize_threshold:
|
||||
continue
|
||||
if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]):
|
||||
# 尝试识别段落
|
||||
if meta_line[index][fc].endswith('.') and\
|
||||
(meta_line[index-1][fc] != 'NEW_BLOCK') and \
|
||||
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7:
|
||||
sec[-1] += line[fc]
|
||||
sec[-1] += "\n\n"
|
||||
else:
|
||||
sec[-1] += " "
|
||||
sec[-1] += line[fc]
|
||||
else:
|
||||
if (index+1 < len(meta_line)) and \
|
||||
meta_line[index][fs] > main_fsize:
|
||||
# 单行 + 字体大
|
||||
mega_sec.append(copy.deepcopy(sec))
|
||||
sec = []
|
||||
sec.append("# " + line[fc])
|
||||
else:
|
||||
# 尝试识别section
|
||||
if meta_line[index-1][fs] > meta_line[index][fs]:
|
||||
sec.append("\n" + line[fc])
|
||||
else:
|
||||
sec.append(line[fc])
|
||||
mega_sec.append(copy.deepcopy(sec))
|
||||
|
||||
finals = []
|
||||
for ms in mega_sec:
|
||||
final = " ".join(ms)
|
||||
final = final.replace('- ', ' ')
|
||||
finals.append(final)
|
||||
meta_txt = finals
|
||||
|
||||
############################## <第 4 步,乱七八糟的后处理> ##################################
|
||||
def 把字符太少的块清除为回车(meta_txt):
|
||||
for index, block_txt in enumerate(meta_txt):
|
||||
if len(block_txt) < 100:
|
||||
meta_txt[index] = '\n'
|
||||
return meta_txt
|
||||
meta_txt = 把字符太少的块清除为回车(meta_txt)
|
||||
|
||||
def 清理多余的空行(meta_txt):
|
||||
for index in reversed(range(1, len(meta_txt))):
|
||||
if meta_txt[index] == '\n' and meta_txt[index-1] == '\n':
|
||||
meta_txt.pop(index)
|
||||
return meta_txt
|
||||
meta_txt = 清理多余的空行(meta_txt)
|
||||
|
||||
def 合并小写开头的段落块(meta_txt):
|
||||
def starts_with_lowercase_word(s):
|
||||
pattern = r"^[a-z]+"
|
||||
match = re.match(pattern, s)
|
||||
if match:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
for _ in range(100):
|
||||
for index, block_txt in enumerate(meta_txt):
|
||||
if starts_with_lowercase_word(block_txt):
|
||||
if meta_txt[index-1] != '\n':
|
||||
meta_txt[index-1] += ' '
|
||||
else:
|
||||
meta_txt[index-1] = ''
|
||||
meta_txt[index-1] += meta_txt[index]
|
||||
meta_txt[index] = '\n'
|
||||
return meta_txt
|
||||
meta_txt = 合并小写开头的段落块(meta_txt)
|
||||
meta_txt = 清理多余的空行(meta_txt)
|
||||
|
||||
meta_txt = '\n'.join(meta_txt)
|
||||
# 清除重复的换行
|
||||
for _ in range(5):
|
||||
meta_txt = meta_txt.replace('\n\n', '\n')
|
||||
|
||||
# 换行 -> 双换行
|
||||
meta_txt = meta_txt.replace('\n', '\n\n')
|
||||
|
||||
############################## <第 5 步,展示分割效果> ##################################
|
||||
for f in finals:
|
||||
print亮黄(f)
|
||||
print亮绿('***************************')
|
||||
|
||||
return meta_txt, page_one_meta
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from request_llm.bridge_chatgpt import predict_no_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down, get_conf
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, get_conf
|
||||
import re, requests, unicodedata, os
|
||||
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
def download_arxiv_(url_pdf):
|
||||
if 'arxiv.org' not in url_pdf:
|
||||
if ('.' in url_pdf) and ('/' not in url_pdf):
|
||||
@@ -132,7 +132,7 @@ def get_name(_url_):
|
||||
|
||||
|
||||
@CatchException
|
||||
def 下载arxiv论文并翻译摘要(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
|
||||
CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……"
|
||||
import glob
|
||||
@@ -140,7 +140,7 @@ def 下载arxiv论文并翻译摘要(txt, top_p, temperature, chatbot, history,
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append(["函数插件功能?", CRAZY_FUNCTION_INFO])
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
@@ -149,7 +149,7 @@ def 下载arxiv论文并翻译摘要(txt, top_p, temperature, chatbot, history,
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
@@ -162,25 +162,33 @@ def 下载arxiv论文并翻译摘要(txt, top_p, temperature, chatbot, history,
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"下载pdf文件未成功")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 翻译摘要等
|
||||
i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}"
|
||||
i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
||||
# 单线,获取文章meta信息
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot, history=[],
|
||||
sys_prompt="Your job is to collect information from materials and translate to Chinese。",
|
||||
)
|
||||
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
# 写入文件
|
||||
import shutil
|
||||
# 重置文件的创建时间
|
||||
shutil.copyfile(pdf_path, f'./gpt_log/{os.path.basename(pdf_path)}'); os.remove(pdf_path)
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载"))
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import threading
|
||||
from request_llm.bridge_chatgpt import predict_no_ui_long_connection
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, write_results_to_file, report_execption
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit
|
||||
|
||||
@@ -22,22 +23,22 @@ def break_txt_into_half_at_some_linebreak(txt):
|
||||
|
||||
|
||||
@CatchException
|
||||
def 全项目切换英文(txt, top_p, temperature, chatbot, history, sys_prompt, WEB_PORT):
|
||||
def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
|
||||
# 第1步:清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
# 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import openai, transformers
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade openai transformers```。")
|
||||
yield chatbot, history, '正常'
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 第3步:集合文件
|
||||
import time, glob, os, shutil, re, openai
|
||||
import time, glob, os, shutil, re
|
||||
os.makedirs('gpt_log/generated_english_version', exist_ok=True)
|
||||
os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
|
||||
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
||||
@@ -48,21 +49,20 @@ def 全项目切换英文(txt, top_p, temperature, chatbot, history, sys_prompt,
|
||||
# 第4步:随便显示点什么防止卡顿的感觉
|
||||
for index, fp in enumerate(file_manifest):
|
||||
# if 'test_project' in fp: continue
|
||||
with open(fp, 'r', encoding='utf-8') as f:
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}'
|
||||
i_say_show_user_buffer.append(i_say_show_user)
|
||||
chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
# 第5步:Token限制下的截断与处理
|
||||
MAX_TOKEN = 3000
|
||||
from transformers import GPT2TokenizerFast
|
||||
print('加载tokenizer中')
|
||||
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
|
||||
get_token_fn = lambda txt: len(tokenizer(txt)["input_ids"])
|
||||
print('加载tokenizer结束')
|
||||
import tiktoken
|
||||
from toolbox import get_conf
|
||||
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
||||
def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
|
||||
|
||||
# 第6步:任务函数
|
||||
@@ -72,7 +72,7 @@ def 全项目切换英文(txt, top_p, temperature, chatbot, history, sys_prompt,
|
||||
if index > 10:
|
||||
time.sleep(60)
|
||||
print('Openai 限制免费用户每分钟20次请求,降低请求频率中。')
|
||||
with open(fp, 'r', encoding='utf-8') as f:
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
|
||||
try:
|
||||
@@ -82,7 +82,7 @@ def 全项目切换英文(txt, top_p, temperature, chatbot, history, sys_prompt,
|
||||
for file_content_partial in file_content_breakdown:
|
||||
i_say = i_say_template(fp, file_content_partial)
|
||||
# # ** gpt request **
|
||||
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, top_p=top_p, temperature=temperature, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index])
|
||||
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index])
|
||||
gpt_say_partial = extract_code_block_carefully(gpt_say_partial)
|
||||
gpt_say += gpt_say_partial
|
||||
mutable_return[index] = gpt_say
|
||||
@@ -97,7 +97,7 @@ def 全项目切换英文(txt, top_p, temperature, chatbot, history, sys_prompt,
|
||||
h.daemon = True
|
||||
h.start()
|
||||
chatbot.append(('开始了吗?', f'多线程操作已经开始'))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 第8步:循环轮询各个线程是否执行完毕
|
||||
cnt = 0
|
||||
@@ -113,7 +113,7 @@ def 全项目切换英文(txt, top_p, temperature, chatbot, history, sys_prompt,
|
||||
stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)]
|
||||
stat_str = ''.join(stat)
|
||||
chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1)))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 第9步:把结果写入文件
|
||||
for index, h in enumerate(handles):
|
||||
@@ -130,10 +130,10 @@ def 全项目切换英文(txt, top_p, temperature, chatbot, history, sys_prompt,
|
||||
shutil.copyfile(file_manifest[index], where_to_relocate)
|
||||
chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
time.sleep(1)
|
||||
|
||||
# 第10步:备份一个文件
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("生成一份任务执行报告", res))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
from request_llm.bridge_chatgpt import predict_no_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
fast_debug = False
|
||||
|
||||
|
||||
def 解析docx(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
|
||||
def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, os
|
||||
# pip install python-docx 用于docx格式,跨平台
|
||||
# pip install pywin32 用于doc格式,仅支持Win平台
|
||||
@@ -35,58 +36,69 @@ def 解析docx(file_manifest, project_folder, top_p, temperature, chatbot, histo
|
||||
f'文章内容是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 假设你是论文审稿专家,请对下面的文章片段做概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature,
|
||||
history=[]) # 带超时倒计时
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=[],
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user);
|
||||
history.append(i_say_show_user)
|
||||
history.append(gpt_say)
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
"""
|
||||
# 可按需启用
|
||||
i_say = f'根据你上述的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一篇英文的。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
i_say = f'我想让你做一个论文写作导师。您的任务是使用人工智能工具(例如自然语言处理)提供有关如何改进其上述文章的反馈。' \
|
||||
f'您还应该利用您在有效写作技巧方面的修辞知识和经验来建议作者可以更好地以书面形式表达他们的想法和想法的方法。' \
|
||||
f'根据你之前的分析,提出建议'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
"""
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature,
|
||||
history=history) # 带超时倒计时
|
||||
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=history,
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say)
|
||||
history.append(gpt_say)
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
@CatchException
|
||||
def 总结word文档(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量总结Word文档。函数插件贡献者: JasonGuo1"])
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
@@ -95,7 +107,7 @@ def 总结word文档(txt, top_p, temperature, chatbot, history, systemPromptTxt,
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
@@ -107,7 +119,7 @@ def 总结word文档(txt, top_p, temperature, chatbot, history, systemPromptTxt,
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 搜索需要处理的文件清单
|
||||
@@ -120,8 +132,8 @@ def 总结word文档(txt, top_p, temperature, chatbot, history, systemPromptTxt,
|
||||
# 如果没找到任何文件
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 开始正式执行任务
|
||||
yield from 解析docx(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
162
crazy_functions/批量Markdown翻译.py
Normal file
162
crazy_functions/批量Markdown翻译.py
Normal file
@@ -0,0 +1,162 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
fast_debug = False
|
||||
|
||||
class PaperFileGroup():
|
||||
def __init__(self):
|
||||
self.file_paths = []
|
||||
self.file_contents = []
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
import tiktoken
|
||||
from toolbox import get_conf
|
||||
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
"""
|
||||
for index, file_content in enumerate(self.file_contents):
|
||||
if self.get_token_num(file_content) < max_token_limit:
|
||||
self.sp_file_contents.append(file_content)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md")
|
||||
|
||||
print('Segmentation: done')
|
||||
|
||||
def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
|
||||
import time, os, re
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
# <-------- 读取Markdown文件,删除其中的所有注释 ---------->
|
||||
pfg = PaperFileGroup()
|
||||
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
# 记录删除注释后的文本
|
||||
pfg.file_paths.append(fp)
|
||||
pfg.file_contents.append(file_content)
|
||||
|
||||
# <-------- 拆分过长的Markdown文件 ---------->
|
||||
pfg.run_file_split(max_token_limit=2048)
|
||||
n_split = len(pfg.sp_file_contents)
|
||||
|
||||
# <-------- 多线程润色开始 ---------->
|
||||
if language == 'en->zh':
|
||||
inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||
elif language == 'zh->en':
|
||||
inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=inputs_array,
|
||||
inputs_show_user_array=inputs_show_user_array,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[""] for _ in range(n_split)],
|
||||
sys_prompt_array=sys_prompt_array,
|
||||
# max_workers=5, # OpenAI所允许的最大并行过载
|
||||
scroller_max_len = 80
|
||||
)
|
||||
|
||||
# <-------- 整理结果,退出 ---------->
|
||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
||||
history = gpt_response_collection
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
if txt.endswith('.md'):
|
||||
file_manifest = [txt]
|
||||
else:
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
||||
@@ -1,8 +1,9 @@
|
||||
from request_llm.bridge_chatgpt import predict_no_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
import re
|
||||
import unicodedata
|
||||
fast_debug = False
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
|
||||
def is_paragraph_break(match):
|
||||
"""
|
||||
@@ -57,7 +58,7 @@ def clean_text(raw_text):
|
||||
|
||||
return final_text.strip()
|
||||
|
||||
def 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
|
||||
def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, glob, os, fitz
|
||||
print('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
@@ -72,49 +73,60 @@ def 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, histor
|
||||
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
print('[1] yield chatbot, history')
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=[],
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
|
||||
|
||||
print('[2] end gpt req')
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
print('[3] yield chatbot, history')
|
||||
yield chatbot, history, msg
|
||||
print('[4] next')
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
||||
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=history,
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
@CatchException
|
||||
def 批量总结PDF文档(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量总结PDF文档。函数插件贡献者: ValeriaWong,Eralien"])
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
@@ -123,7 +135,7 @@ def 批量总结PDF文档(txt, top_p, temperature, chatbot, history, systemPromp
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
@@ -135,7 +147,7 @@ def 批量总结PDF文档(txt, top_p, temperature, chatbot, history, systemPromp
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 搜索需要处理的文件清单
|
||||
@@ -147,8 +159,8 @@ def 批量总结PDF文档(txt, top_p, temperature, chatbot, history, systemPromp
|
||||
# 如果没找到任何文件
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 开始正式执行任务
|
||||
yield from 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from request_llm.bridge_chatgpt import predict_no_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
|
||||
fast_debug = False
|
||||
|
||||
@@ -61,13 +62,13 @@ def readPdf(pdfPath):
|
||||
return outTextList
|
||||
|
||||
|
||||
def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
|
||||
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, glob, os
|
||||
from bs4 import BeautifulSoup
|
||||
print('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
if ".tex" in fp:
|
||||
with open(fp, 'r', encoding='utf-8') as f:
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
if ".pdf" in fp.lower():
|
||||
file_content = readPdf(fp)
|
||||
@@ -77,43 +78,51 @@ def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, hist
|
||||
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
print('[1] yield chatbot, history')
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
||||
|
||||
print('[2] end gpt req')
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=[],
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
print('[3] yield chatbot, history')
|
||||
yield chatbot, history, msg
|
||||
print('[4] next')
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
||||
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
||||
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=history,
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 批量总结PDF文档pdfminer(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
|
||||
@@ -121,7 +130,7 @@ def 批量总结PDF文档pdfminer(txt, top_p, temperature, chatbot, history, sys
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"])
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
@@ -130,14 +139,14 @@ def 批量总结PDF文档pdfminer(txt, top_p, temperature, chatbot, history, sys
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
|
||||
@@ -145,7 +154,7 @@ def 批量总结PDF文档pdfminer(txt, top_p, temperature, chatbot, history, sys
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@@ -1,102 +1,20 @@
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from toolbox import update_ui
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
|
||||
def read_and_clean_pdf_text(fp):
|
||||
"""
|
||||
**输入参数说明**
|
||||
- `fp`:需要读取和清理文本的pdf文件路径
|
||||
|
||||
**输出参数说明**
|
||||
- `meta_txt`:清理后的文本内容字符串
|
||||
- `page_one_meta`:第一页清理后的文本内容列表
|
||||
|
||||
**函数功能**
|
||||
读取pdf文件并清理其中的文本内容,清理规则包括:
|
||||
- 提取所有块元的文本信息,并合并为一个字符串
|
||||
- 去除短块(字符数小于100)并替换为回车符
|
||||
- 清理多余的空行
|
||||
- 合并小写字母开头的段落块并替换为空格
|
||||
- 清除重复的换行
|
||||
- 将每个换行符替换为两个换行符,使每个段落之间有两个换行符分隔
|
||||
"""
|
||||
import fitz
|
||||
import re
|
||||
import numpy as np
|
||||
# file_content = ""
|
||||
with fitz.open(fp) as doc:
|
||||
meta_txt = []
|
||||
meta_font = []
|
||||
for index, page in enumerate(doc):
|
||||
# file_content += page.get_text()
|
||||
text_areas = page.get_text("dict") # 获取页面上的文本信息
|
||||
|
||||
# 块元提取 for each word segment with in line for each line cross-line words for each block
|
||||
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
||||
'- ', '') for t in text_areas['blocks'] if 'lines' in t])
|
||||
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
|
||||
for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
|
||||
if index == 0:
|
||||
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
||||
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
|
||||
|
||||
def 把字符太少的块清除为回车(meta_txt):
|
||||
for index, block_txt in enumerate(meta_txt):
|
||||
if len(block_txt) < 100:
|
||||
meta_txt[index] = '\n'
|
||||
return meta_txt
|
||||
meta_txt = 把字符太少的块清除为回车(meta_txt)
|
||||
|
||||
def 清理多余的空行(meta_txt):
|
||||
for index in reversed(range(1, len(meta_txt))):
|
||||
if meta_txt[index] == '\n' and meta_txt[index-1] == '\n':
|
||||
meta_txt.pop(index)
|
||||
return meta_txt
|
||||
meta_txt = 清理多余的空行(meta_txt)
|
||||
|
||||
def 合并小写开头的段落块(meta_txt):
|
||||
def starts_with_lowercase_word(s):
|
||||
pattern = r"^[a-z]+"
|
||||
match = re.match(pattern, s)
|
||||
if match:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
for _ in range(100):
|
||||
for index, block_txt in enumerate(meta_txt):
|
||||
if starts_with_lowercase_word(block_txt):
|
||||
if meta_txt[index-1] != '\n':
|
||||
meta_txt[index-1] += ' '
|
||||
else:
|
||||
meta_txt[index-1] = ''
|
||||
meta_txt[index-1] += meta_txt[index]
|
||||
meta_txt[index] = '\n'
|
||||
return meta_txt
|
||||
meta_txt = 合并小写开头的段落块(meta_txt)
|
||||
meta_txt = 清理多余的空行(meta_txt)
|
||||
|
||||
meta_txt = '\n'.join(meta_txt)
|
||||
# 清除重复的换行
|
||||
for _ in range(5):
|
||||
meta_txt = meta_txt.replace('\n\n', '\n')
|
||||
|
||||
# 换行 -> 双换行
|
||||
meta_txt = meta_txt.replace('\n', '\n\n')
|
||||
|
||||
return meta_txt, page_one_meta
|
||||
|
||||
from .crazy_utils import read_and_clean_pdf_text
|
||||
from colorful import *
|
||||
|
||||
@CatchException
|
||||
def 批量翻译PDF文档(txt, top_p, temperature, chatbot, history, sys_prompt, WEB_PORT):
|
||||
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
|
||||
import glob
|
||||
import os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量总结PDF文档。函数插件贡献者: Binary-Husky(二进制哈士奇)"])
|
||||
yield chatbot, history, '正常'
|
||||
"批量总结PDF文档。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
@@ -106,7 +24,7 @@ def 批量翻译PDF文档(txt, top_p, temperature, chatbot, history, sys_prompt,
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken```。")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
@@ -120,7 +38,7 @@ def 批量翻译PDF文档(txt, top_p, temperature, chatbot, history, sys_prompt,
|
||||
txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 搜索需要处理的文件清单
|
||||
@@ -131,63 +49,73 @@ def 批量翻译PDF文档(txt, top_p, temperature, chatbot, history, sys_prompt,
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 开始正式执行任务
|
||||
yield from 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, history, sys_prompt)
|
||||
yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt)
|
||||
|
||||
|
||||
def 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, history, sys_prompt):
|
||||
def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt):
|
||||
import os
|
||||
import tiktoken
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 1600
|
||||
generated_conclusion_files = []
|
||||
for index, fp in enumerate(file_manifest):
|
||||
|
||||
# 读取PDF文件
|
||||
file_content, page_one = read_and_clean_pdf_text(fp)
|
||||
|
||||
# 递归地切割PDF文件
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
enc = tiktoken.get_encoding("gpt2")
|
||||
def get_token_num(txt): return len(enc.encode(txt))
|
||||
# 分解文本
|
||||
from toolbox import get_conf
|
||||
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split(
|
||||
'Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
# 单线,获取文章meta信息
|
||||
paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=f"以下是一篇学术论文的基础信息,请从中提取出“标题”、“收录会议或期刊”、“作者”、“摘要”、“编号”、“作者邮箱”这六个部分。请用markdown格式输出,最后用中文翻译摘要部分。请提取:{paper_meta}",
|
||||
inputs_show_user=f"请从{fp}中提取出“标题”、“收录会议或期刊”等基本信息。",
|
||||
top_p=top_p, temperature=temperature,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot, history=[],
|
||||
sys_prompt="Your job is to collect information from materials。",
|
||||
)
|
||||
|
||||
# 多线,翻译
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=[
|
||||
f"以下是你需要翻译的文章段落:\n{frag}" for frag in paper_fragments],
|
||||
inputs_show_user_array=[f"" for _ in paper_fragments],
|
||||
top_p=top_p, temperature=temperature,
|
||||
f"以下是你需要翻译的论文片段:\n{frag}" for frag in paper_fragments],
|
||||
inputs_show_user_array=[f"\n---\n 原文: \n\n {frag.replace('#', '')} \n---\n 翻译:\n " for frag in paper_fragments],
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[paper_meta] for _ in paper_fragments],
|
||||
sys_prompt_array=[
|
||||
"请你作为一个学术翻译,把整个段落翻译成中文,要求语言简洁,禁止重复输出原文。" for _ in paper_fragments],
|
||||
max_workers=16 # OpenAI所允许的最大并行过载
|
||||
"请你作为一个学术翻译,负责把学术论文的片段准确翻译成中文。" for _ in paper_fragments],
|
||||
# max_workers=5 # OpenAI所允许的最大并行过载
|
||||
)
|
||||
|
||||
final = ["", paper_meta_info + '\n\n---\n\n---\n\n---\n\n']
|
||||
# 整理报告的格式
|
||||
for i,k in enumerate(gpt_response_collection):
|
||||
if i%2==0:
|
||||
gpt_response_collection[i] = f"\n\n---\n\n ## 原文[{i//2}/{len(gpt_response_collection)//2}]: \n\n {paper_fragments[i//2].replace('#', '')} \n\n---\n\n ## 翻译[{i//2}/{len(gpt_response_collection)//2}]:\n "
|
||||
else:
|
||||
gpt_response_collection[i] = gpt_response_collection[i]
|
||||
final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""]
|
||||
final.extend(gpt_response_collection)
|
||||
create_report_file_name = f"{os.path.basename(fp)}.trans.md"
|
||||
res = write_results_to_file(final, file_name=create_report_file_name)
|
||||
generated_conclusion_files.append(
|
||||
f'./gpt_log/{create_report_file_name}')
|
||||
|
||||
# 更新UI
|
||||
generated_conclusion_files.append(f'./gpt_log/{create_report_file_name}')
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
msg = "完成"
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 准备文件的下载
|
||||
import shutil
|
||||
@@ -200,4 +128,4 @@ def 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, histor
|
||||
if os.path.exists(pdf_path):
|
||||
os.remove(pdf_path)
|
||||
chatbot.append(("给出输出文件清单", str(generated_conclusion_files)))
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
112
crazy_functions/理解PDF文档内容.py
Normal file
112
crazy_functions/理解PDF文档内容.py
Normal file
@@ -0,0 +1,112 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption
|
||||
from .crazy_utils import read_and_clean_pdf_text
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
fast_debug = False
|
||||
|
||||
|
||||
def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import tiktoken
|
||||
print('begin analysis on:', file_name)
|
||||
|
||||
############################## <第 0 步,切割PDF> ##################################
|
||||
# 递归地切割PDF文件,每一块(尽量是完整的一个section,比如introduction,experiment等,必要时再进行切割)
|
||||
# 的长度必须小于 2500 个 Token
|
||||
file_content, page_one = read_and_clean_pdf_text(file_name) # (尝试)按照章节切割PDF
|
||||
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from toolbox import get_conf
|
||||
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
############################## <第 1 步,从摘要中提取高价值信息,放到history中> ##################################
|
||||
final_results = []
|
||||
final_results.append(paper_meta)
|
||||
|
||||
############################## <第 2 步,迭代地历遍整个文章,提取精炼信息> ##################################
|
||||
i_say_show_user = f'首先你在英文语境下通读整篇论文。'; gpt_say = "[Local Message] 收到。" # 用户提示
|
||||
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI
|
||||
|
||||
iteration_results = []
|
||||
last_iteration_result = paper_meta # 初始值是摘要
|
||||
MAX_WORD_TOTAL = 4096
|
||||
n_fragment = len(paper_fragments)
|
||||
if n_fragment >= 20: print('文章极长,不能达到预期效果')
|
||||
for i in range(n_fragment):
|
||||
NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment
|
||||
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}"
|
||||
i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]}"
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
||||
llm_kwargs, chatbot,
|
||||
history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
|
||||
sys_prompt="Extract the main idea of this section." # 提示
|
||||
)
|
||||
iteration_results.append(gpt_say)
|
||||
last_iteration_result = gpt_say
|
||||
|
||||
############################## <第 3 步,整理history> ##################################
|
||||
final_results.extend(iteration_results)
|
||||
final_results.append(f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。')
|
||||
# 接下来两句话只显示在界面上,不起实际作用
|
||||
i_say_show_user = f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。'; gpt_say = "[Local Message] 收到。"
|
||||
chatbot.append([i_say_show_user, gpt_say])
|
||||
|
||||
############################## <第 4 步,设置一个token上限,防止回答时Token溢出> ##################################
|
||||
from .crazy_utils import input_clipping
|
||||
_, final_results = input_clipping("", final_results, max_token_limit=3200)
|
||||
yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了
|
||||
|
||||
|
||||
@CatchException
|
||||
def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"理解PDF论文内容,并且将结合上下文内容,进行学术解答。函数插件贡献者: Hanzoe, binary-husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import fitz
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
# 检测输入参数,如没有给定输入参数,直接退出
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "":
|
||||
txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 搜索需要处理的文件清单
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
|
||||
# 如果没找到任何文件
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
txt = file_manifest[0]
|
||||
# 开始正式执行任务
|
||||
yield from 解析PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
@@ -1,43 +1,40 @@
|
||||
from request_llm.bridge_chatgpt import predict_no_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
fast_debug = False
|
||||
|
||||
|
||||
def 生成函数注释(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
|
||||
import time, glob, os
|
||||
def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, os
|
||||
print('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8') as f:
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
|
||||
i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```'
|
||||
i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
print('[1] yield chatbot, history')
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
|
||||
|
||||
print('[2] end gpt req')
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
print('[3] yield chatbot, history')
|
||||
yield chatbot, history, msg
|
||||
print('[4] next')
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
if not fast_debug:
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 批量生成函数注释(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -45,13 +42,13 @@ def 批量生成函数注释(txt, top_p, temperature, chatbot, history, systemPr
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
|
||||
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 生成函数注释(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
@@ -1,96 +1,103 @@
|
||||
from request_llm.bridge_chatgpt import predict_no_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
||||
fast_debug = False
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
|
||||
def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
|
||||
import time, glob, os
|
||||
print('begin analysis on:', file_manifest)
|
||||
def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import os, copy
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
msg = '正常'
|
||||
inputs_array = []
|
||||
inputs_show_user_array = []
|
||||
history_array = []
|
||||
sys_prompt_array = []
|
||||
report_part_1 = []
|
||||
|
||||
assert len(file_manifest) <= 1024, "源文件太多(超过1024个), 请缩减输入文件的数量。或者,您也可以选择删除此行警告,并修改代码拆分file_manifest列表,从而实现分批次处理。"
|
||||
############################## <第一步,逐个文件分析,多线程> ##################################
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8') as f:
|
||||
# 读取文件
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
|
||||
prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
|
||||
i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
# 装载请求内容
|
||||
inputs_array.append(i_say)
|
||||
inputs_show_user_array.append(i_say_show_user)
|
||||
history_array.append([])
|
||||
sys_prompt_array.append("你是一个程序架构分析师,正在分析一个源代码项目。你的回答必须简单明了。")
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# 文件读取完成,对每一个源代码文件,生成一个请求线程,发送到chatgpt进行分析
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array = inputs_array,
|
||||
inputs_show_user_array = inputs_show_user_array,
|
||||
history_array = history_array,
|
||||
sys_prompt_array = sys_prompt_array,
|
||||
llm_kwargs = llm_kwargs,
|
||||
chatbot = chatbot,
|
||||
show_user_at_complete = True
|
||||
)
|
||||
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
||||
# 全部文件解析完成,结果写入文件,准备对工程源代码进行汇总分析
|
||||
report_part_1 = copy.deepcopy(gpt_response_collection)
|
||||
history_to_return = report_part_1
|
||||
res = write_results_to_file(report_part_1)
|
||||
chatbot.append(("完成?", "逐个文件分析已完成。" + res + "\n\n正在开始汇总。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
||||
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield chatbot, history, msg
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
||||
i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{all_file})。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
||||
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield chatbot, history, msg
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield chatbot, history, msg
|
||||
############################## <第二步,综合,单线程,分组+迭代处理> ##################################
|
||||
batchsize = 16 # 10个文件为一组
|
||||
report_part_2 = []
|
||||
previous_iteration_files = []
|
||||
last_iteration_result = ""
|
||||
while True:
|
||||
if len(file_manifest) == 0: break
|
||||
this_iteration_file_manifest = file_manifest[:batchsize]
|
||||
this_iteration_gpt_response_collection = gpt_response_collection[:batchsize*2]
|
||||
file_rel_path = [os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)]
|
||||
# 把“请对下面的程序文件做一个概述” 替换成 精简的 "文件名:{all_file[index]}"
|
||||
for index, content in enumerate(this_iteration_gpt_response_collection):
|
||||
if index%2==0: this_iteration_gpt_response_collection[index] = f"{file_rel_path[index//2]}" # 只保留文件名节省token
|
||||
previous_iteration_files.extend([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
|
||||
previous_iteration_files_string = ', '.join(previous_iteration_files)
|
||||
current_iteration_focus = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
|
||||
i_say = f'根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括{previous_iteration_files_string})。'
|
||||
inputs_show_user = f'根据以上分析,对程序的整体功能和构架重新做出概括,由于输入长度限制,可能需要分组处理,本组文件为 {current_iteration_focus} + 已经汇总的文件组。'
|
||||
this_iteration_history = copy.deepcopy(this_iteration_gpt_response_collection)
|
||||
this_iteration_history.append(last_iteration_result)
|
||||
result = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say, inputs_show_user=inputs_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot,
|
||||
history=this_iteration_history, # 迭代之前的分析
|
||||
sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。")
|
||||
report_part_2.extend([i_say, result])
|
||||
last_iteration_result = result
|
||||
|
||||
file_manifest = file_manifest[batchsize:]
|
||||
gpt_response_collection = gpt_response_collection[batchsize*2:]
|
||||
|
||||
############################## <END> ##################################
|
||||
history_to_return.extend(report_part_2)
|
||||
res = write_results_to_file(history_to_return)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import time, glob, os
|
||||
import glob
|
||||
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
||||
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
||||
for index, fp in enumerate(file_manifest):
|
||||
# if 'test_project' in fp: continue
|
||||
with open(fp, 'r', encoding='utf-8') as f:
|
||||
file_content = f.read()
|
||||
|
||||
prefix = "接下来请你分析自己的程序构成,别紧张," if index==0 else ""
|
||||
i_say = prefix + f'请对下面的程序文件做一个概述文件名是{fp},文件代码是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
|
||||
if not fast_debug:
|
||||
# ** gpt request **
|
||||
# gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature)
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], long_connection=True) # 带超时倒计时
|
||||
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield chatbot, history, '正常'
|
||||
time.sleep(2)
|
||||
|
||||
i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{file_manifest})。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
|
||||
if not fast_debug:
|
||||
# ** gpt request **
|
||||
# gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history, long_connection=True) # 带超时倒计时
|
||||
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield chatbot, history, '正常'
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield chatbot, history, '正常'
|
||||
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]+ \
|
||||
[f for f in glob.glob('./request_llm/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
||||
project_folder = './'
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
@CatchException
|
||||
def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -98,18 +105,18 @@ def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPr
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -117,19 +124,19 @@ def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, s
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
@CatchException
|
||||
def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -137,7 +144,7 @@ def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptT
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
||||
@@ -145,13 +152,13 @@ def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptT
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个Java项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -159,7 +166,7 @@ def 解析一个Java项目(txt, top_p, temperature, chatbot, history, systemProm
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \
|
||||
@@ -167,13 +174,13 @@ def 解析一个Java项目(txt, top_p, temperature, chatbot, history, systemProm
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个Rect项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 解析一个Rect项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -181,7 +188,7 @@ def 解析一个Rect项目(txt, top_p, temperature, chatbot, history, systemProm
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \
|
||||
@@ -190,13 +197,13 @@ def 解析一个Rect项目(txt, top_p, temperature, chatbot, history, systemProm
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何Rect文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个Golang项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -204,11 +211,36 @@ def 解析一个Golang项目(txt, top_p, temperature, chatbot, history, systemPr
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)]
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/go.mod', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
@@ -1,56 +1,53 @@
|
||||
from request_llm.bridge_chatgpt import predict_no_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
fast_debug = False
|
||||
|
||||
|
||||
def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt):
|
||||
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, glob, os
|
||||
print('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8') as f:
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
|
||||
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
||||
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
print('[1] yield chatbot, history')
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
|
||||
|
||||
print('[2] end gpt req')
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
print('[3] yield chatbot, history')
|
||||
yield chatbot, history, msg
|
||||
print('[4] next')
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
||||
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时
|
||||
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield chatbot, history, msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 读文章写摘要(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
@@ -58,13 +55,13 @@ def 读文章写摘要(txt, top_p, temperature, chatbot, history, systemPromptTx
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt)
|
||||
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
106
crazy_functions/谷歌检索小助手.py
Normal file
106
crazy_functions/谷歌检索小助手.py
Normal file
@@ -0,0 +1,106 @@
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from toolbox import update_ui
|
||||
|
||||
def get_meta_information(url, chatbot, history):
|
||||
import requests
|
||||
import arxiv
|
||||
import difflib
|
||||
from bs4 import BeautifulSoup
|
||||
from toolbox import get_conf
|
||||
proxies, = get_conf('proxies')
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',
|
||||
}
|
||||
# 发送 GET 请求
|
||||
response = requests.get(url, proxies=proxies, headers=headers)
|
||||
|
||||
# 解析网页内容
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
def string_similar(s1, s2):
|
||||
return difflib.SequenceMatcher(None, s1, s2).quick_ratio()
|
||||
|
||||
profile = []
|
||||
# 获取所有文章的标题和作者
|
||||
for result in soup.select(".gs_ri"):
|
||||
title = result.a.text.replace('\n', ' ').replace(' ', ' ')
|
||||
author = result.select_one(".gs_a").text
|
||||
try:
|
||||
citation = result.select_one(".gs_fl > a[href*='cites']").text # 引用次数是链接中的文本,直接取出来
|
||||
except:
|
||||
citation = 'cited by 0'
|
||||
abstract = result.select_one(".gs_rs").text.strip() # 摘要在 .gs_rs 中的文本,需要清除首尾空格
|
||||
search = arxiv.Search(
|
||||
query = title,
|
||||
max_results = 1,
|
||||
sort_by = arxiv.SortCriterion.Relevance,
|
||||
)
|
||||
paper = next(search.results())
|
||||
if string_similar(title, paper.title) > 0.90: # same paper
|
||||
abstract = paper.summary.replace('\n', ' ')
|
||||
is_paper_in_arxiv = True
|
||||
else: # different paper
|
||||
abstract = abstract
|
||||
is_paper_in_arxiv = False
|
||||
paper = next(search.results())
|
||||
print(title)
|
||||
print(author)
|
||||
print(citation)
|
||||
profile.append({
|
||||
'title':title,
|
||||
'author':author,
|
||||
'citation':citation,
|
||||
'abstract':abstract,
|
||||
'is_paper_in_arxiv':is_paper_in_arxiv,
|
||||
})
|
||||
|
||||
chatbot[-1] = [chatbot[-1][0], title + f'\n\n是否在arxiv中(不在arxiv中无法获取完整摘要):{is_paper_in_arxiv}\n\n' + abstract]
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
return profile
|
||||
|
||||
@CatchException
|
||||
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"分析用户提供的谷歌学术(google scholar)搜索页面中,出现的所有文章: binary-husky,插件初始化中..."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import arxiv
|
||||
from bs4 import BeautifulSoup
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
meta_paper_info_list = yield from get_meta_information(txt, chatbot, history)
|
||||
|
||||
if len(meta_paper_info_list[:10]) > 0:
|
||||
i_say = "下面是一些学术文献的数据,请从中提取出以下内容。" + \
|
||||
"1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \
|
||||
f"以下是信息源:{str(meta_paper_info_list[:10])}"
|
||||
|
||||
inputs_show_user = f"请分析此页面中出现的所有文章:{txt}"
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say, inputs_show_user=inputs_show_user,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||
sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown格式。你必须逐个文献进行处理。"
|
||||
)
|
||||
|
||||
history.extend([ "第一批", gpt_say ])
|
||||
meta_paper_info_list = meta_paper_info_list[10:]
|
||||
|
||||
chatbot.append(["状态?", "已经全部完成"])
|
||||
msg = '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res));
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
@@ -1,20 +1,29 @@
|
||||
from toolbox import CatchException
|
||||
from toolbox import CatchException, update_ui
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
import datetime
|
||||
@CatchException
|
||||
def 高阶功能模板函数(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!"))
|
||||
yield chatbot, history, '正常' # 由于请求gpt需要一段时间,我们先及时地做一次状态显示
|
||||
chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
for i in range(5):
|
||||
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
|
||||
currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day
|
||||
i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。'
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say, inputs_show_user=i_say,
|
||||
top_p=top_p, temperature=temperature, chatbot=chatbot, history=[],
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||
sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。"
|
||||
)
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say);history.append(gpt_say)
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
|
||||
294
img/README_EN.md
Normal file
294
img/README_EN.md
Normal file
@@ -0,0 +1,294 @@
|
||||
# ChatGPT Academic Optimization
|
||||
> **Note**
|
||||
>
|
||||
> This English readme is automatically generated by the markdown translation plugin in this project, and may not be 100% correct.
|
||||
>
|
||||
|
||||
|
||||
**If you like this project, please give it a star. If you have come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request (to the `dev` branch).**
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> 1. Please note that only function plugins (buttons) marked in **red** support reading files, and some plugins are located in the **dropdown menu** in the plugin area. Additionally, we welcome and process PRs for any new plugins with the **highest priority**!
|
||||
>
|
||||
> 2. The functions of each file in this project are detailed in the self-translation report [self_analysis.md](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). With the version iteration, you can click on a relevant function plugin at any time to call GPT to regenerate the self-analysis report for the project. Commonly asked questions are summarized in the [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98).
|
||||
>
|
||||
> 3. If you are not used to the function, comments or interface with some Chinese names, you can click on the relevant function plugin at any time to call ChatGPT to generate the source code of the project in English.
|
||||
|
||||
<div align="center">
|
||||
|
||||
Function | Description
|
||||
--- | ---
|
||||
One-click refinement | Supports one-click refinement, one-click searching for grammatical errors in papers.
|
||||
One-click translation between Chinese and English | One-click translation between Chinese and English.
|
||||
One-click code interpretation | Can correctly display and interpret the code.
|
||||
[Custom shortcuts](https://www.bilibili.com/video/BV14s4y1E7jN) | Supports custom shortcuts.
|
||||
[Configure proxy server](https://www.bilibili.com/video/BV1rc411W7Dr) | Supports configuring proxy server.
|
||||
Modular design | Supports custom high-order experimental features and [function plug-ins], and plug-ins support [hot update](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||
[Self-program analysis](https://www.bilibili.com/video/BV1cj411A7VW) | [Function Plug-in] [One-Key Understanding](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) the source code of this project.
|
||||
[Program analysis](https://www.bilibili.com/video/BV1cj411A7VW) | [Function Plug-in] One-click can analyze other Python/C/C++/Java/Golang/Lua/Rect project trees.
|
||||
Read papers | [Function Plug-in] One-click reads the full text of a latex paper and generates an abstract.
|
||||
Latex full-text translation/refinement | [Function Plug-in] One-click translates or refines a latex paper.
|
||||
Batch annotation generation | [Function Plug-in] One-click generates function annotations in batches.
|
||||
Chat analysis report generation | [Function Plug-in] Automatically generate summary reports after running.
|
||||
[Arxiv assistant](https://www.bilibili.com/video/BV1LM4y1279X) | [Function Plug-in] Enter the arxiv paper url and you can translate the abstract and download the PDF with one click.
|
||||
[PDF paper full-text translation function](https://www.bilibili.com/video/BV1KT411x7Wn) | [Function Plug-in] Extract title and abstract of PDF papers + translate full text (multi-threaded).
|
||||
[Google Scholar integration assistant](https://www.bilibili.com/video/BV19L411U7ia) (Version>=2.45) | [Function Plug-in] Given any Google Scholar search page URL, let GPT help you choose interesting articles.
|
||||
Formula display | Can simultaneously display the tex form and rendering form of formulas.
|
||||
Image display | Can display images in Markdown.
|
||||
Multithreaded function plug-in support | Supports multi-threaded calling of chatgpt, one-click processing of massive texts or programs.
|
||||
Support for markdown tables output by GPT | Can output markdown tables that support GPT.
|
||||
Start dark gradio theme [theme](https://github.com/binary-husky/chatgpt_academic/issues/173) | Add ```/?__dark-theme=true``` to the browser URL to switch to the dark theme.
|
||||
Huggingface free scientific online experience](https://huggingface.co/spaces/qingxu98/gpt-academic) | After logging in to Huggingface, copy [this space](https://huggingface.co/spaces/qingxu98/gpt-academic).
|
||||
[Mixed support for multiple LLM models](https://www.bilibili.com/video/BV1EM411K7VH/) ([v3.0 branch](https://github.com/binary-husky/chatgpt_academic/tree/v3.0) in testing) | It must feel great to be served by both ChatGPT and [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B)!
|
||||
Compatible with [TGUI](https://github.com/oobabooga/text-generation-webui) to access more language models | Access to opt-1.3b, galactica-1.3b and other models ([v3.0 branch](https://github.com/binary-husky/chatgpt_academic/tree/v3.0) under testing).
|
||||
… | ...
|
||||
|
||||
</div>
|
||||
|
||||
<!-- - New interface (left: master branch, right: dev development frontier) -->
|
||||
- New interface (modify the `LAYOUT` option in `config.py` to switch between "left and right layout" and "up and down layout").
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- All buttons are dynamically generated by reading `functional.py`, and custom functions can be added freely, freeing up the clipboard.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Refinement/Correction
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Supports markdown tables output by GPT.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- If the output contains formulas, both the tex form and the rendering form are displayed simultaneously for easy copying and reading.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Don't want to read project code? Let chatgpt boast about the whole project.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Multiple large language models mixed calling. ([v3.0 branch](https://github.com/binary-husky/chatgpt_academic/tree/v3.0) in testing)
|
||||
|
||||
|
||||
## Running Directly (Windows, Linux or MacOS)
|
||||
|
||||
### 1. Download the Project
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
```
|
||||
|
||||
### 2. Configure API_KEY and Proxy Settings
|
||||
|
||||
In `config.py`, configure the overseas Proxy and OpenAI API KEY, as follows:
|
||||
```
|
||||
1. If you are in China, you need to set an overseas proxy to use the OpenAI API smoothly. Please read the instructions in config.py carefully (1. Modify the USE_PROXY to True; 2. Modify the proxies according to the instructions).
|
||||
2. Configure OpenAI API KEY. You need to register on the OpenAI official website and obtain an API KEY. Once you get the API KEY, configure it in the config.py file.
|
||||
3. Issues related to proxy network (network timeout, proxy not working) are summarized to https://github.com/binary-husky/chatgpt_academic/issues/1
|
||||
```
|
||||
(Note: When the program is running, it will first check whether there is a private configuration file named `config_private.py`, and use the configuration in it to overwrite the same name configuration in `config.py`. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file next to `config.py` named `config_private.py` and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not managed by Git, which can make your privacy information more secure.)
|
||||
|
||||
### 3. Install Dependencies
|
||||
```sh
|
||||
# (Option 1) Recommended
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Option 2) If you use anaconda, the steps are also similar:
|
||||
# (Option 2.1) conda create -n gptac_venv python=3.11
|
||||
# (Option 2.2) conda activate gptac_venv
|
||||
# (Option 2.3) python -m pip install -r requirements.txt
|
||||
|
||||
# Note: Use the official pip source or the Ali pip source. Other pip sources (such as some university pips) may have problems. Temporary substitution method:
|
||||
# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
```
|
||||
|
||||
### 4. Run
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
|
||||
### 5. Test Experimental Features
|
||||
```
|
||||
- Test C++ Project Header Analysis
|
||||
In the input area, enter `./crazy_functions/test_project/cpp/libJPG` , and then click "[Experiment] Parse the entire C++ project (input inputs the root path of the project)"
|
||||
- Test Writing Abstracts for Latex Projects
|
||||
In the input area, enter `./crazy_functions/test_project/latex/attention` , and then click "[Experiment] Read the tex paper and write an abstract (input inputs the root path of the project)"
|
||||
- Test Python Project Analysis
|
||||
In the input area, enter `./crazy_functions/test_project/python/dqn` , and then click "[Experiment] Parse the entire py project (input inputs the root path of the project)"
|
||||
- Test Self-code Interpretation
|
||||
Click "[Experiment] Please analyze and deconstruct this project itself"
|
||||
- Test Experimental Function Template (asking GPT what happened in history today), you can implement more complex functions based on this template function
|
||||
Click "[Experiment] Experimental function template"
|
||||
```
|
||||
|
||||
## Use Docker (Linux)
|
||||
|
||||
``` sh
|
||||
# Download Project
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
# Configure Overseas Proxy and OpenAI API KEY
|
||||
Configure config.py with any text editor
|
||||
# Installation
|
||||
docker build -t gpt-academic .
|
||||
# Run
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
|
||||
# Test Experimental Features
|
||||
## Test Self-code Interpretation
|
||||
Click "[Experiment] Please analyze and deconstruct this project itself"
|
||||
## Test Experimental Function Template (asking GPT what happened in history today), you can implement more complex functions based on this template function
|
||||
Click "[Experiment] Experimental function template"
|
||||
## (Please note that when running in docker, you need to pay extra attention to file access rights issues of the program.)
|
||||
## Test C++ Project Header Analysis
|
||||
In the input area, enter ./crazy_functions/test_project/cpp/libJPG , and then click "[Experiment] Parse the entire C++ project (input inputs the root path of the project)"
|
||||
## Test Writing Abstracts for Latex Projects
|
||||
In the input area, enter ./crazy_functions/test_project/latex/attention , and then click "[Experiment] Read the tex paper and write an abstract (input inputs the root path of the project)"
|
||||
## Test Python Project Analysis
|
||||
In the input area, enter ./crazy_functions/test_project/python/dqn , and then click "[Experiment] Parse the entire py project (input inputs the root path of the project)"
|
||||
|
||||
```
|
||||
|
||||
## Other Deployment Methods
|
||||
- Use WSL2 (Windows Subsystem for Linux subsystem)
|
||||
Please visit [Deploy Wiki-1] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
- nginx remote deployment
|
||||
Please visit [Deploy Wiki-2] (https://github.com/binary-husky/chatgpt_academic/wiki/%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E7%9A%84%E6%8C%87%E5%AF%BC)
|
||||
|
||||
|
||||
## Customizing New Convenient Buttons (Academic Shortcut Key Customization)
|
||||
Open functional.py and add the entry as follows, and then restart the program. (If the button has been successfully added and is visible, both the prefix and suffix support hot modification and take effect without restarting the program.)
|
||||
|
||||
For example,
|
||||
```
|
||||
"Super English to Chinese Translation": {
|
||||
|
||||
# Prefix, which will be added before your input. For example, it is used to describe your requirements, such as translation, code interpretation, polishing, etc.
|
||||
"Prefix": "Please translate the following content into Chinese, and then use a markdown table to explain each proprietary term in the text:\n\n",
|
||||
|
||||
# Suffix, which will be added after your input. For example, in conjunction with the prefix, you can bracket your input in quotes.
|
||||
"Suffix": "",
|
||||
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
If you invent a more user-friendly academic shortcut key, welcome to post an issue or pull request!
|
||||
|
||||
## Configure Proxy
|
||||
### Method 1: General Method
|
||||
Modify the port and proxy software corresponding in ```config.py```
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226571294-37a47cd9-4d40-4c16-97a2-d360845406f7.png" width="500" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226838985-e5c95956-69c2-4c23-a4dd-cd7944eeb451.png" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
After configuring, you can use the following command to test whether the proxy works. If everything is normal, the code below will output the location of your proxy server:
|
||||
|
||||
```
|
||||
python check_proxy.py
|
||||
```
|
||||
|
||||
### Method Two: Pure Beginner Tutorial
|
||||
[Pure Beginner Tutorial](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89)
|
||||
|
||||
## Compatibility Testing
|
||||
|
||||
### Image Display:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/228737599-bf0a9d9c-1808-4f43-ae15-dfcc7af0f295.png" width="800" >
|
||||
</div>
|
||||
|
||||
|
||||
### If the program can read and analyze itself:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="800" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936618-9b487e4b-ab5b-4b6e-84c6-16942102e917.png" width="800" >
|
||||
</div>
|
||||
|
||||
### Any other Python/Cpp project analysis:
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="800" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="800" >
|
||||
</div>
|
||||
|
||||
### Latex paper reading comprehension and abstract generation with one click
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504406-86ab97cd-f208-41c3-8e4a-7000e51cf980.png" width="800" >
|
||||
</div>
|
||||
|
||||
### Automatic Report Generation
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||
</div>
|
||||
|
||||
### Modular Function Design
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||
</div>
|
||||
|
||||
|
||||
### Translating source code to English
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229720562-fe6c3508-6142-4635-a83d-21eb3669baee.png" height="400" >
|
||||
</div>
|
||||
|
||||
## Todo and Version Planning:
|
||||
|
||||
- version 3 (Todo):
|
||||
- - Support for gpt4 and other llm
|
||||
- version 2.4+ (Todo):
|
||||
- - Summary of long text and token overflow problems in large project source code
|
||||
- - Implementation of project packaging and deployment
|
||||
- - Function plugin parameter interface optimization
|
||||
- - Self-updating
|
||||
- version 2.4: (1) Added PDF full-text translation function; (2) Added input area switching function; (3) Added vertical layout option; (4) Optimized multi-threaded function plugin.
|
||||
- version 2.3: Enhanced multi-threaded interactivity
|
||||
- version 2.2: Function plug-in supports hot reloading
|
||||
- version 2.1: Collapsible layout
|
||||
- version 2.0: Introduction of modular function plugins
|
||||
- version 1.0: Basic functions
|
||||
|
||||
## References and Learning
|
||||
|
||||
|
||||
```
|
||||
The code refers to the design of many other excellent projects, mainly including:
|
||||
|
||||
# Reference Project 1: Referenced the method of reading OpenAI json, recording historical inquiry records, and using gradio queue in ChuanhuChatGPT
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# Reference Project 2:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
|
||||
```
|
||||
|
||||
|
||||
BIN
img/公式.gif
BIN
img/公式.gif
Binary file not shown.
|
Before Width: | Height: | Size: 11 MiB |
BIN
img/润色.gif
BIN
img/润色.gif
Binary file not shown.
|
Before Width: | Height: | Size: 12 MiB |
30
main.py
30
main.py
@@ -4,15 +4,16 @@ from request_llm.bridge_chatgpt import predict
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT = \
|
||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT')
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||
|
||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||
if not AUTHENTICATION: AUTHENTICATION = None
|
||||
|
||||
from check_proxy import get_current_version
|
||||
initial_prompt = "Serve me as a writing and programming assistant."
|
||||
title_html = "<h1 align=\"center\">ChatGPT 学术优化</h1>"
|
||||
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
||||
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
||||
|
||||
# 问询记录, python 版本建议3.9+(越新越好)
|
||||
@@ -49,8 +50,9 @@ if LAYOUT == "TOP-DOWN":
|
||||
CHATBOT_HEIGHT /= 2
|
||||
|
||||
cancel_handles = []
|
||||
with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||
gr.HTML(title_html)
|
||||
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
||||
with gr_L1():
|
||||
with gr_L2(scale=2):
|
||||
chatbot = gr.Chatbot()
|
||||
@@ -116,16 +118,16 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as de
|
||||
return ret
|
||||
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2] )
|
||||
# 整理反复出现的控件句柄组合
|
||||
input_combo = [txt, txt2, top_p, temperature, chatbot, history, system_prompt]
|
||||
output_combo = [chatbot, history, status]
|
||||
input_combo = [cookies, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
|
||||
output_combo = [cookies, chatbot, history, status]
|
||||
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
||||
# 提交按钮、重置按钮
|
||||
cancel_handles.append(txt.submit(**predict_args))
|
||||
cancel_handles.append(txt2.submit(**predict_args))
|
||||
cancel_handles.append(submitBtn.click(**predict_args))
|
||||
cancel_handles.append(submitBtn2.click(**predict_args))
|
||||
resetBtn.click(lambda: ([], [], "已重置"), None, output_combo)
|
||||
resetBtn2.click(lambda: ([], [], "已重置"), None, output_combo)
|
||||
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
||||
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
||||
# 基础功能区的回调函数注册
|
||||
for k in functional:
|
||||
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
||||
@@ -160,15 +162,13 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as de
|
||||
def auto_opentab_delay():
|
||||
import threading, webbrowser, time
|
||||
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
||||
print(f"\t(亮色主体): http://localhost:{PORT}")
|
||||
print(f"\t(暗色主体): http://localhost:{PORT}/?__dark-theme=true")
|
||||
print(f"\t(亮色主题): http://localhost:{PORT}")
|
||||
print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
|
||||
def open():
|
||||
time.sleep(2)
|
||||
try: auto_update() # 检查新版本
|
||||
except: pass
|
||||
time.sleep(2) # 打开浏览器
|
||||
webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
|
||||
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
||||
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
||||
|
||||
auto_opentab_delay()
|
||||
demo.title = "ChatGPT 学术优化"
|
||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=True, server_port=PORT, auth=AUTHENTICATION)
|
||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# 如何使用其他大语言模型(dev分支测试中)
|
||||
# 如何使用其他大语言模型(v3.0分支测试中)
|
||||
|
||||
## 1. 先运行text-generation
|
||||
``` sh
|
||||
|
||||
@@ -21,7 +21,7 @@ import importlib
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
from toolbox import get_conf
|
||||
from toolbox import get_conf, update_ui
|
||||
proxies, API_URL, API_KEY, TIMEOUT_SECONDS, MAX_RETRY, LLM_MODEL = \
|
||||
get_conf('proxies', 'API_URL', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'LLM_MODEL')
|
||||
|
||||
@@ -39,47 +39,15 @@ def get_full_error(chunk, stream_response):
|
||||
break
|
||||
return chunk
|
||||
|
||||
def predict_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""):
|
||||
"""
|
||||
发送至chatGPT,等待回复,一次性完成,不显示中间过程。
|
||||
predict函数的简化版。
|
||||
用于payload比较大的情况,或者用于实现多线、带嵌套的复杂功能。
|
||||
|
||||
inputs 是本次问询的输入
|
||||
top_p, temperature是chatGPT的内部调优参数
|
||||
history 是之前的对话列表
|
||||
(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误,然后raise ConnectionAbortedError)
|
||||
"""
|
||||
headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt=sys_prompt, stream=False)
|
||||
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
# make a POST request to the API endpoint, stream=False
|
||||
response = requests.post(API_URL, headers=headers, proxies=proxies,
|
||||
json=payload, stream=False, timeout=TIMEOUT_SECONDS*2); break
|
||||
except requests.exceptions.ReadTimeout as e:
|
||||
retry += 1
|
||||
traceback.print_exc()
|
||||
if retry > MAX_RETRY: raise TimeoutError
|
||||
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||
|
||||
try:
|
||||
result = json.loads(response.text)["choices"][0]["message"]["content"]
|
||||
return result
|
||||
except Exception as e:
|
||||
if "choices" not in response.text: print(response.text)
|
||||
raise ConnectionAbortedError("Json解析不合常规,可能是文本过长" + response.text)
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, top_p, temperature, history=[], sys_prompt="", observe_window=None):
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||
"""
|
||||
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||
inputs:
|
||||
是本次问询的输入
|
||||
sys_prompt:
|
||||
系统静默prompt
|
||||
top_p, temperature:
|
||||
llm_kwargs:
|
||||
chatGPT的内部调优参数
|
||||
history:
|
||||
是之前的对话列表
|
||||
@@ -87,7 +55,7 @@ def predict_no_ui_long_connection(inputs, top_p, temperature, history=[], sys_pr
|
||||
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||
"""
|
||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||
headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt=sys_prompt, stream=True)
|
||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
@@ -104,7 +72,10 @@ def predict_no_ui_long_connection(inputs, top_p, temperature, history=[], sys_pr
|
||||
result = ''
|
||||
while True:
|
||||
try: chunk = next(stream_response).decode()
|
||||
except StopIteration: break
|
||||
except StopIteration:
|
||||
break
|
||||
except requests.exceptions.ConnectionError:
|
||||
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
|
||||
if len(chunk)==0: continue
|
||||
if not chunk.startswith('data:'):
|
||||
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
|
||||
@@ -118,22 +89,21 @@ def predict_no_ui_long_connection(inputs, top_p, temperature, history=[], sys_pr
|
||||
if "role" in delta: continue
|
||||
if "content" in delta:
|
||||
result += delta["content"]
|
||||
print(delta["content"], end='')
|
||||
if not console_slience: print(delta["content"], end='')
|
||||
if observe_window is not None:
|
||||
# 观测窗,把已经获取的数据显示出去
|
||||
if len(observe_window) >= 1: observe_window[0] += delta["content"]
|
||||
# 看门狗,如果超过期限没有喂狗,则终止
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("程序终止。")
|
||||
raise RuntimeError("用户取消了程序。")
|
||||
else: raise RuntimeError("意外Json结构:"+delta)
|
||||
if json_data['finish_reason'] == 'length':
|
||||
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
||||
return result
|
||||
|
||||
|
||||
def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='',
|
||||
stream = True, additional_fn=None):
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
发送至chatGPT,流式获取输出。
|
||||
用于基础的对话功能。
|
||||
@@ -143,6 +113,16 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||
"""
|
||||
if inputs.startswith('sk-') and len(inputs) == 51:
|
||||
chatbot._cookies['api_key'] = inputs
|
||||
chatbot.append(("输入已识别为openai的api_key", "api_key已导入"))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
||||
return
|
||||
elif len(chatbot._cookies['api_key']) != 51:
|
||||
chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
||||
return
|
||||
|
||||
if additional_fn is not None:
|
||||
import core_functional
|
||||
importlib.reload(core_functional) # 热更新prompt
|
||||
@@ -154,9 +134,9 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
||||
raw_input = inputs
|
||||
logging.info(f'[raw_input] {raw_input}')
|
||||
chatbot.append((inputs, ""))
|
||||
yield chatbot, history, "等待响应"
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||
|
||||
headers, payload = generate_payload(inputs, top_p, temperature, history, system_prompt, stream)
|
||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||
history.append(inputs); history.append(" ")
|
||||
|
||||
retry = 0
|
||||
@@ -169,7 +149,7 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
||||
retry += 1
|
||||
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
||||
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
||||
yield chatbot, history, "请求超时"+retry_msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
||||
if retry > MAX_RETRY: raise TimeoutError
|
||||
|
||||
gpt_replying_buffer = ""
|
||||
@@ -197,11 +177,11 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
||||
gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield chatbot, history, status_text
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield chatbot, history, "Json解析不合常规"
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
||||
chunk = get_full_error(chunk, stream_response)
|
||||
error_msg = chunk.decode()
|
||||
if "reduce the length" in error_msg:
|
||||
@@ -215,16 +195,19 @@ def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt=''
|
||||
from toolbox import regular_txt_to_markdown
|
||||
tb_str = '```\n' + traceback.format_exc() + '```'
|
||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode()[4:])}")
|
||||
yield chatbot, history, "Json异常" + error_msg
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
||||
return
|
||||
|
||||
def generate_payload(inputs, top_p, temperature, history, system_prompt, stream):
|
||||
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||
"""
|
||||
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
||||
"""
|
||||
if len(llm_kwargs['api_key']) != 51:
|
||||
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {API_KEY}"
|
||||
"Authorization": f"Bearer {llm_kwargs['api_key']}"
|
||||
}
|
||||
|
||||
conversation_cnt = len(history) // 2
|
||||
@@ -252,17 +235,19 @@ def generate_payload(inputs, top_p, temperature, history, system_prompt, stream)
|
||||
messages.append(what_i_ask_now)
|
||||
|
||||
payload = {
|
||||
"model": LLM_MODEL,
|
||||
"model": llm_kwargs['llm_model'],
|
||||
"messages": messages,
|
||||
"temperature": temperature, # 1.0,
|
||||
"top_p": top_p, # 1.0,
|
||||
"temperature": llm_kwargs['temperature'], # 1.0,
|
||||
"top_p": llm_kwargs['top_p'], # 1.0,
|
||||
"n": 1,
|
||||
"stream": stream,
|
||||
"presence_penalty": 0,
|
||||
"frequency_penalty": 0,
|
||||
}
|
||||
|
||||
print(f" {LLM_MODEL} : {conversation_cnt} : {inputs}")
|
||||
try:
|
||||
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
|
||||
except:
|
||||
print('输入中可能存在乱码。')
|
||||
return headers,payload
|
||||
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import logging
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
from toolbox import get_conf
|
||||
from toolbox import get_conf, update_ui
|
||||
LLM_MODEL, = get_conf('LLM_MODEL')
|
||||
|
||||
# "TGUI:galactica-1.3b@localhost:7860"
|
||||
@@ -90,7 +90,7 @@ async def run(context, max_token=512):
|
||||
|
||||
|
||||
|
||||
def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
def predict_tgui(inputs, top_p, temperature, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
发送至chatGPT,流式获取输出。
|
||||
用于基础的对话功能。
|
||||
@@ -111,7 +111,7 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom
|
||||
logging.info(f'[raw_input] {raw_input}')
|
||||
history.extend([inputs, ""])
|
||||
chatbot.append([inputs, ""])
|
||||
yield chatbot, history, "等待响应"
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||
|
||||
prompt = inputs
|
||||
tgui_say = ""
|
||||
@@ -138,7 +138,7 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom
|
||||
tgui_say = mutable[0]
|
||||
history[-1] = tgui_say
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield chatbot, history, "status_text"
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
logging.info(f'[response] {tgui_say}')
|
||||
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
gradio>=3.23
|
||||
gradio==3.25.0
|
||||
tiktoken>=0.3.3
|
||||
requests[socks]
|
||||
mdtex2html
|
||||
Markdown
|
||||
latex2mathml
|
||||
openai
|
||||
transformers
|
||||
numpy
|
||||
python-markdown-math
|
||||
beautifulsoup4
|
||||
latex2mathml
|
||||
python-docx
|
||||
mdtex2html
|
||||
colorama
|
||||
Markdown
|
||||
pygments
|
||||
pymupdf
|
||||
openai
|
||||
numpy
|
||||
arxiv
|
||||
|
||||
80
show_math.py
80
show_math.py
@@ -1,80 +0,0 @@
|
||||
# This program is written by: https://github.com/polarwinkel/mdtex2html
|
||||
|
||||
from latex2mathml.converter import convert as tex2mathml
|
||||
import re
|
||||
|
||||
incomplete = '<font style="color:orange;" class="tooltip">⚠<span class="tooltiptext">formula incomplete</span></font>'
|
||||
convError = '<font style="color:red" class="tooltip">⚠<span class="tooltiptext">LaTeX-convert-error</span></font>'
|
||||
|
||||
def convert(mdtex, extensions=[], splitParagraphs=True):
|
||||
''' converts recursively the Markdown-LaTeX-mixture to HTML with MathML '''
|
||||
found = False
|
||||
# handle all paragraphs separately (prevents aftereffects)
|
||||
if splitParagraphs:
|
||||
parts = re.split("\n\n", mdtex)
|
||||
result = ''
|
||||
for part in parts:
|
||||
result += convert(part, extensions, splitParagraphs=False)
|
||||
return result
|
||||
# find first $$-formula:
|
||||
parts = re.split('\${2}', mdtex, 2)
|
||||
if len(parts)>1:
|
||||
found = True
|
||||
result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
|
||||
try:
|
||||
result += '<div class="blockformula">'+tex2mathml(parts[1])+'</div>\n'
|
||||
except:
|
||||
result += '<div class="blockformula">'+convError+'</div>'
|
||||
if len(parts)==3:
|
||||
result += convert(parts[2], extensions, splitParagraphs=False)
|
||||
else:
|
||||
result += '<div class="blockformula">'+incomplete+'</div>'
|
||||
# else find first $-formulas:
|
||||
else:
|
||||
parts = re.split('\${1}', mdtex, 2)
|
||||
if len(parts)>1 and not found:
|
||||
found = True
|
||||
try:
|
||||
mathml = tex2mathml(parts[1])
|
||||
except:
|
||||
mathml = convError
|
||||
if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
|
||||
parts[0]=parts[0]+'​'
|
||||
if len(parts)==3:
|
||||
result = convert(parts[0]+mathml+parts[2], extensions, splitParagraphs=False)
|
||||
else:
|
||||
result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
|
||||
# else find first \[..\]-equation:
|
||||
else:
|
||||
parts = re.split(r'\\\[', mdtex, 1)
|
||||
if len(parts)>1 and not found:
|
||||
found = True
|
||||
result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
|
||||
parts = re.split(r'\\\]', parts[1], 1)
|
||||
try:
|
||||
result += '<div class="blockformula">'+tex2mathml(parts[0])+'</div>\n'
|
||||
except:
|
||||
result += '<div class="blockformula">'+convError+'</div>'
|
||||
if len(parts)==2:
|
||||
result += convert(parts[1], extensions, splitParagraphs=False)
|
||||
else:
|
||||
result += '<div class="blockformula">'+incomplete+'</div>'
|
||||
# else find first \(..\)-equation:
|
||||
else:
|
||||
parts = re.split(r'\\\(', mdtex, 1)
|
||||
if len(parts)>1 and not found:
|
||||
found = True
|
||||
subp = re.split(r'\\\)', parts[1], 1)
|
||||
try:
|
||||
mathml = tex2mathml(subp[0])
|
||||
except:
|
||||
mathml = convError
|
||||
if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
|
||||
parts[0]=parts[0]+'​'
|
||||
if len(subp)==2:
|
||||
result = convert(parts[0]+mathml+subp[1], extensions, splitParagraphs=False)
|
||||
else:
|
||||
result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
|
||||
if not found:
|
||||
result = mdtex
|
||||
return result
|
||||
77
theme.py
77
theme.py
@@ -1,5 +1,6 @@
|
||||
import gradio as gr
|
||||
|
||||
from toolbox import get_conf
|
||||
CODE_HIGHLIGHT, = get_conf('CODE_HIGHLIGHT')
|
||||
# gradio可用颜色列表
|
||||
# gr.themes.utils.colors.slate (石板色)
|
||||
# gr.themes.utils.colors.gray (灰色)
|
||||
@@ -153,4 +154,78 @@ advanced_css = """
|
||||
padding: 1em;
|
||||
margin: 1em 2em 1em 0.5em;
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
if CODE_HIGHLIGHT:
|
||||
advanced_css += """
|
||||
|
||||
.hll { background-color: #ffffcc }
|
||||
.c { color: #3D7B7B; font-style: italic } /* Comment */
|
||||
.err { border: 1px solid #FF0000 } /* Error */
|
||||
.k { color: hsl(197, 94%, 51%); font-weight: bold } /* Keyword */
|
||||
.o { color: #666666 } /* Operator */
|
||||
.ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */
|
||||
.cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */
|
||||
.cp { color: #9C6500 } /* Comment.Preproc */
|
||||
.cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */
|
||||
.c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */
|
||||
.cs { color: #3D7B7B; font-style: italic } /* Comment.Special */
|
||||
.gd { color: #A00000 } /* Generic.Deleted */
|
||||
.ge { font-style: italic } /* Generic.Emph */
|
||||
.gr { color: #E40000 } /* Generic.Error */
|
||||
.gh { color: #000080; font-weight: bold } /* Generic.Heading */
|
||||
.gi { color: #008400 } /* Generic.Inserted */
|
||||
.go { color: #717171 } /* Generic.Output */
|
||||
.gp { color: #000080; font-weight: bold } /* Generic.Prompt */
|
||||
.gs { font-weight: bold } /* Generic.Strong */
|
||||
.gu { color: #800080; font-weight: bold } /* Generic.Subheading */
|
||||
.gt { color: #a9dd00 } /* Generic.Traceback */
|
||||
.kc { color: #008000; font-weight: bold } /* Keyword.Constant */
|
||||
.kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
|
||||
.kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
|
||||
.kp { color: #008000 } /* Keyword.Pseudo */
|
||||
.kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
|
||||
.kt { color: #B00040 } /* Keyword.Type */
|
||||
.m { color: #666666 } /* Literal.Number */
|
||||
.s { color: #BA2121 } /* Literal.String */
|
||||
.na { color: #687822 } /* Name.Attribute */
|
||||
.nb { color: #e5f8c3 } /* Name.Builtin */
|
||||
.nc { color: #ffad65; font-weight: bold } /* Name.Class */
|
||||
.no { color: #880000 } /* Name.Constant */
|
||||
.nd { color: #AA22FF } /* Name.Decorator */
|
||||
.ni { color: #717171; font-weight: bold } /* Name.Entity */
|
||||
.ne { color: #CB3F38; font-weight: bold } /* Name.Exception */
|
||||
.nf { color: #f9f978 } /* Name.Function */
|
||||
.nl { color: #767600 } /* Name.Label */
|
||||
.nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
|
||||
.nt { color: #008000; font-weight: bold } /* Name.Tag */
|
||||
.nv { color: #19177C } /* Name.Variable */
|
||||
.ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
|
||||
.w { color: #bbbbbb } /* Text.Whitespace */
|
||||
.mb { color: #666666 } /* Literal.Number.Bin */
|
||||
.mf { color: #666666 } /* Literal.Number.Float */
|
||||
.mh { color: #666666 } /* Literal.Number.Hex */
|
||||
.mi { color: #666666 } /* Literal.Number.Integer */
|
||||
.mo { color: #666666 } /* Literal.Number.Oct */
|
||||
.sa { color: #BA2121 } /* Literal.String.Affix */
|
||||
.sb { color: #BA2121 } /* Literal.String.Backtick */
|
||||
.sc { color: #BA2121 } /* Literal.String.Char */
|
||||
.dl { color: #BA2121 } /* Literal.String.Delimiter */
|
||||
.sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
|
||||
.s2 { color: #2bf840 } /* Literal.String.Double */
|
||||
.se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */
|
||||
.sh { color: #BA2121 } /* Literal.String.Heredoc */
|
||||
.si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */
|
||||
.sx { color: #008000 } /* Literal.String.Other */
|
||||
.sr { color: #A45A77 } /* Literal.String.Regex */
|
||||
.s1 { color: #BA2121 } /* Literal.String.Single */
|
||||
.ss { color: #19177C } /* Literal.String.Symbol */
|
||||
.bp { color: #008000 } /* Name.Builtin.Pseudo */
|
||||
.fm { color: #0000FF } /* Name.Function.Magic */
|
||||
.vc { color: #19177C } /* Name.Variable.Class */
|
||||
.vg { color: #19177C } /* Name.Variable.Global */
|
||||
.vi { color: #19177C } /* Name.Variable.Instance */
|
||||
.vm { color: #19177C } /* Name.Variable.Magic */
|
||||
.il { color: #666666 } /* Literal.Number.Integer.Long */
|
||||
"""
|
||||
|
||||
185
toolbox.py
185
toolbox.py
@@ -3,26 +3,65 @@ import mdtex2html
|
||||
import threading
|
||||
import importlib
|
||||
import traceback
|
||||
import importlib
|
||||
import inspect
|
||||
import re
|
||||
from show_math import convert as convert_math
|
||||
from latex2mathml.converter import convert as tex2mathml
|
||||
from functools import wraps, lru_cache
|
||||
|
||||
############################### 插件输入输出接驳区 #######################################
|
||||
class ChatBotWithCookies(list):
|
||||
def __init__(self, cookie):
|
||||
self._cookies = cookie
|
||||
|
||||
def write_list(self, list):
|
||||
for t in list:
|
||||
self.append(t)
|
||||
|
||||
def get_list(self):
|
||||
return [t for t in self]
|
||||
|
||||
def get_cookies(self):
|
||||
return self._cookies
|
||||
|
||||
def ArgsGeneralWrapper(f):
|
||||
"""
|
||||
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
|
||||
"""
|
||||
def decorated(txt, txt2, *args, **kwargs):
|
||||
def decorated(cookies, txt, txt2, top_p, temperature, chatbot, history, system_prompt, *args):
|
||||
txt_passon = txt
|
||||
if txt == "" and txt2 != "":
|
||||
txt_passon = txt2
|
||||
yield from f(txt_passon, *args, **kwargs)
|
||||
if txt == "" and txt2 != "": txt_passon = txt2
|
||||
# 引入一个有cookie的chatbot
|
||||
cookies.update({
|
||||
'top_p':top_p,
|
||||
'temperature':temperature,
|
||||
})
|
||||
llm_kwargs = {
|
||||
'api_key': cookies['api_key'],
|
||||
'llm_model': cookies['llm_model'],
|
||||
'top_p':top_p,
|
||||
'temperature':temperature,
|
||||
}
|
||||
plugin_kwargs = {
|
||||
# 目前还没有
|
||||
}
|
||||
chatbot_with_cookie = ChatBotWithCookies(cookies)
|
||||
chatbot_with_cookie.write_list(chatbot)
|
||||
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
|
||||
return decorated
|
||||
|
||||
def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
|
||||
"""
|
||||
刷新用户界面
|
||||
"""
|
||||
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时,可用clear将其清空,然后用for+append循环重新赋值。"
|
||||
yield chatbot.get_cookies(), chatbot, history, msg
|
||||
############################### ################## #######################################
|
||||
##########################################################################################
|
||||
|
||||
def get_reduce_token_percent(text):
|
||||
"""
|
||||
* 此函数未来将被弃用
|
||||
"""
|
||||
try:
|
||||
# text = "maximum context length is 4097 tokens. However, your messages resulted in 4870 tokens"
|
||||
pattern = r"(\d+)\s+tokens\b"
|
||||
@@ -36,9 +75,10 @@ def get_reduce_token_percent(text):
|
||||
except:
|
||||
return 0.5, '不详'
|
||||
|
||||
|
||||
def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt='', long_connection=True):
|
||||
def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, llm_kwargs, history=[], sys_prompt='', long_connection=True):
|
||||
"""
|
||||
* 此函数未来将被弃用(替代函数 request_gpt_model_in_new_thread_with_ui_alive 文件 chatgpt_academic/crazy_functions/crazy_utils)
|
||||
|
||||
调用简单的predict_no_ui接口,但是依然保留了些许界面心跳功能,当对话太长时,会自动采用二分法截断
|
||||
i_say: 当前输入
|
||||
i_say_show_user: 显示到对话界面上的当前输入,例如,输入整个文件时,你绝对不想把文件的内容都糊到对话界面上
|
||||
@@ -46,10 +86,10 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
|
||||
top_p, temperature: gpt参数
|
||||
history: gpt参数 对话历史
|
||||
sys_prompt: gpt参数 sys_prompt
|
||||
long_connection: 是否采用更稳定的连接方式(推荐)
|
||||
long_connection: 是否采用更稳定的连接方式(推荐)(已弃用)
|
||||
"""
|
||||
import time
|
||||
from request_llm.bridge_chatgpt import predict_no_ui, predict_no_ui_long_connection
|
||||
from request_llm.bridge_chatgpt import predict_no_ui_long_connection
|
||||
from toolbox import get_conf
|
||||
TIMEOUT_SECONDS, MAX_RETRY = get_conf('TIMEOUT_SECONDS', 'MAX_RETRY')
|
||||
# 多线程的时候,需要一个mutable结构在不同线程之间传递信息
|
||||
@@ -60,13 +100,9 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
|
||||
def mt(i_say, history):
|
||||
while True:
|
||||
try:
|
||||
if long_connection:
|
||||
mutable[0] = predict_no_ui_long_connection(
|
||||
inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
|
||||
else:
|
||||
mutable[0] = predict_no_ui(
|
||||
inputs=i_say, top_p=top_p, temperature=temperature, history=history, sys_prompt=sys_prompt)
|
||||
break
|
||||
mutable[0] = predict_no_ui_long_connection(
|
||||
inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt)
|
||||
|
||||
except ConnectionAbortedError as token_exceeded_error:
|
||||
# 尝试计算比例,尽可能多地保留文本
|
||||
p_ratio, n_exceed = get_reduce_token_percent(
|
||||
@@ -92,7 +128,7 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
|
||||
cnt += 1
|
||||
chatbot[-1] = (i_say_show_user,
|
||||
f"[Local Message] {mutable[1]}waiting gpt response {cnt}/{TIMEOUT_SECONDS*2*(MAX_RETRY+1)}"+''.join(['.']*(cnt % 4)))
|
||||
yield chatbot, history, '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
time.sleep(1)
|
||||
# 把gpt的输出从mutable中取出来
|
||||
gpt_say = mutable[0]
|
||||
@@ -156,13 +192,19 @@ def CatchException(f):
|
||||
chatbot = [["插件调度异常", "异常原因"]]
|
||||
chatbot[-1] = (chatbot[-1][0],
|
||||
f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
|
||||
yield chatbot, history, f'异常 {e}'
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=f'异常 {e}') # 刷新界面
|
||||
return decorated
|
||||
|
||||
|
||||
def HotReload(f):
|
||||
"""
|
||||
装饰器函数,实现函数插件热更新
|
||||
HotReload的装饰器函数,用于实现Python函数插件的热更新。
|
||||
函数热更新是指在不停止程序运行的情况下,更新函数代码,从而达到实时更新功能。
|
||||
在装饰器内部,使用wraps(f)来保留函数的元信息,并定义了一个名为decorated的内部函数。
|
||||
内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块,
|
||||
然后通过getattr函数获取函数名,并在新模块中重新加载函数。
|
||||
最后,使用yield from语句返回重新加载过的函数,并在被装饰的函数上执行。
|
||||
最终,装饰器函数返回内部函数。这个内部函数可以将函数的原始定义更新为最新版本,并执行函数的新版本。
|
||||
"""
|
||||
@wraps(f)
|
||||
def decorated(*args, **kwargs):
|
||||
@@ -203,15 +245,76 @@ def markdown_convertion(txt):
|
||||
"""
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
if ('$' in txt) and ('```' not in txt):
|
||||
return pre + markdown.markdown(txt, extensions=['fenced_code', 'tables']) + '<br><br>' + markdown.markdown(convert_math(txt, splitParagraphs=False), extensions=['fenced_code', 'tables']) + suf
|
||||
markdown_extension_configs = {
|
||||
'mdx_math': {
|
||||
'enable_dollar_delimiter': True,
|
||||
'use_gitlab_delimiters': False,
|
||||
},
|
||||
}
|
||||
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
||||
|
||||
def tex2mathml_catch_exception(content, *args, **kwargs):
|
||||
try:
|
||||
content = tex2mathml(content, *args, **kwargs)
|
||||
except:
|
||||
content = content
|
||||
return content
|
||||
|
||||
def replace_math_no_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
content = content.replace('\n', '</br>')
|
||||
return f"<font color=\"#00FF00\">$$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$$</font>"
|
||||
else:
|
||||
return f"<font color=\"#00FF00\">$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$</font>"
|
||||
|
||||
def replace_math_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
if '\\begin{aligned}' in content:
|
||||
content = content.replace('\\begin{aligned}', '\\begin{array}')
|
||||
content = content.replace('\\end{aligned}', '\\end{array}')
|
||||
content = content.replace('&', ' ')
|
||||
content = tex2mathml_catch_exception(content, display="block")
|
||||
return content
|
||||
else:
|
||||
return tex2mathml_catch_exception(content)
|
||||
|
||||
def markdown_bug_hunt(content):
|
||||
"""
|
||||
解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
|
||||
"""
|
||||
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
|
||||
content = content.replace('</script>\n</script>', '</script>')
|
||||
return content
|
||||
|
||||
|
||||
if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
# convert everything to html format
|
||||
split = markdown.markdown(text='---')
|
||||
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
|
||||
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
||||
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
|
||||
# 1. convert to easy-to-copy tex (do not render math)
|
||||
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
|
||||
# 2. convert to rendered equation
|
||||
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
|
||||
# cat them together
|
||||
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
|
||||
else:
|
||||
return pre + markdown.markdown(txt, extensions=['fenced_code', 'tables']) + suf
|
||||
return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
|
||||
|
||||
|
||||
def close_up_code_segment_during_stream(gpt_reply):
|
||||
"""
|
||||
在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
|
||||
在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
|
||||
|
||||
Args:
|
||||
gpt_reply (str): GPT模型返回的回复字符串。
|
||||
|
||||
Returns:
|
||||
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
|
||||
|
||||
"""
|
||||
if '```' not in gpt_reply:
|
||||
return gpt_reply
|
||||
@@ -236,11 +339,9 @@ def format_io(self, y):
|
||||
return []
|
||||
i_ask, gpt_reply = y[-1]
|
||||
i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
|
||||
gpt_reply = close_up_code_segment_during_stream(
|
||||
gpt_reply) # 当代码输出半截的时候,试着补上后个```
|
||||
gpt_reply = close_up_code_segment_during_stream(gpt_reply) # 当代码输出半截的时候,试着补上后个```
|
||||
y[-1] = (
|
||||
None if i_ask is None else markdown.markdown(
|
||||
i_ask, extensions=['fenced_code', 'tables']),
|
||||
None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code', 'tables']),
|
||||
None if gpt_reply is None else markdown_convertion(gpt_reply)
|
||||
)
|
||||
return y
|
||||
@@ -367,27 +468,30 @@ def on_report_generated(files, chatbot):
|
||||
chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。'])
|
||||
return report_files, chatbot
|
||||
|
||||
def is_openai_api_key(key):
|
||||
# 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合
|
||||
API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
|
||||
return API_MATCH
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def read_single_conf_with_lru_cache(arg):
|
||||
from colorful import print亮红, print亮绿
|
||||
try:
|
||||
r = getattr(importlib.import_module('config_private'), arg)
|
||||
except:
|
||||
r = getattr(importlib.import_module('config'), arg)
|
||||
# 在读取API_KEY时,检查一下是不是忘了改config
|
||||
if arg == 'API_KEY':
|
||||
# 正确的 API_KEY 是 "sk-" + 48 位大小写字母数字的组合
|
||||
API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", r)
|
||||
if API_MATCH:
|
||||
print(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
||||
if is_openai_api_key(r):
|
||||
print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
||||
else:
|
||||
assert False, "正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
|
||||
"(如果您刚更新过代码,请确保旧版config_private文件中没有遗留任何新增键值)"
|
||||
print亮红( "[API_KEY] 正确的 API_KEY 是 'sk-' + '48 位大小写字母数字' 的组合,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \
|
||||
"(如果您刚更新过代码,请确保旧版config_private文件中没有遗留任何新增键值)")
|
||||
if arg == 'proxies':
|
||||
if r is None:
|
||||
print('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问。建议:检查USE_PROXY选项是否修改。')
|
||||
print亮红('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问。建议:检查USE_PROXY选项是否修改。')
|
||||
else:
|
||||
print('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
|
||||
print亮绿('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
|
||||
assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。'
|
||||
return r
|
||||
|
||||
@@ -409,6 +513,15 @@ def clear_line_break(txt):
|
||||
|
||||
|
||||
class DummyWith():
|
||||
"""
|
||||
这段代码定义了一个名为DummyWith的空上下文管理器,
|
||||
它的作用是……额……没用,即在代码结构不变得情况下取代其他的上下文管理器。
|
||||
上下文管理器是一种Python对象,用于与with语句一起使用,
|
||||
以确保一些资源在代码块执行期间得到正确的初始化和清理。
|
||||
上下文管理器必须实现两个方法,分别为 __enter__()和 __exit__()。
|
||||
在上下文执行开始的情况下,__enter__()方法会在代码块被执行前被调用,
|
||||
而在上下文执行结束时,__exit__()方法则会被调用。
|
||||
"""
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
|
||||
Reference in New Issue
Block a user