Compare commits
572 Commits
version3.4
...
hongyi-zha
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e8c17a099e | ||
|
|
3f36cfea38 | ||
|
|
f889ef7625 | ||
|
|
a93bf4410d | ||
|
|
1c0764753a | ||
|
|
c847209ac9 | ||
|
|
4f9d40c14f | ||
|
|
91926d24b7 | ||
|
|
ef311c4859 | ||
|
|
82795d3817 | ||
|
|
49e28a5a00 | ||
|
|
01def2e329 | ||
|
|
2291be2b28 | ||
|
|
c89ec7969f | ||
|
|
1506c19834 | ||
|
|
a6fdc493b7 | ||
|
|
113067c6ab | ||
|
|
7b6828ab07 | ||
|
|
d818c38dfe | ||
|
|
08b4e9796e | ||
|
|
b55d573819 | ||
|
|
06b0e800a2 | ||
|
|
7bbaf05961 | ||
|
|
3b83279855 | ||
|
|
37164a826e | ||
|
|
dd2a97e7a9 | ||
|
|
e579006c4a | ||
|
|
031f19b6dd | ||
|
|
142b516749 | ||
|
|
f2e73aa580 | ||
|
|
8565a35cf7 | ||
|
|
72d78eb150 | ||
|
|
7aeda537ac | ||
|
|
6cea17d4b7 | ||
|
|
20bc51d747 | ||
|
|
b8ebefa427 | ||
|
|
dcc9326f0b | ||
|
|
94fc396eb9 | ||
|
|
e594e1b928 | ||
|
|
8fe545d97b | ||
|
|
6f978fa72e | ||
|
|
19be471aa8 | ||
|
|
38956934fd | ||
|
|
32439e14b5 | ||
|
|
317389bf4b | ||
|
|
2c740fc641 | ||
|
|
96832a8228 | ||
|
|
361557da3c | ||
|
|
5f18d4a1af | ||
|
|
0d10bc570f | ||
|
|
3ce7d9347d | ||
|
|
8a78d7b89f | ||
|
|
0e43b08837 | ||
|
|
74bced2d35 | ||
|
|
961a24846f | ||
|
|
b7e4744f28 | ||
|
|
71adc40901 | ||
|
|
a2099f1622 | ||
|
|
c0a697f6c8 | ||
|
|
bdde1d2fd7 | ||
|
|
63373ab3b6 | ||
|
|
fb6566adde | ||
|
|
9f2ef9ec49 | ||
|
|
35c1aa21e4 | ||
|
|
627d739720 | ||
|
|
37f15185b6 | ||
|
|
9643e1c25f | ||
|
|
28eae2f80e | ||
|
|
7ab379688e | ||
|
|
3d4c6f54f1 | ||
|
|
1714116a89 | ||
|
|
2bc65a99ca | ||
|
|
0a2805513e | ||
|
|
d698b96209 | ||
|
|
6b1c6f0bf7 | ||
|
|
c22867b74c | ||
|
|
2abe665521 | ||
|
|
b0e6c4d365 | ||
|
|
d883c7f34b | ||
|
|
aba871342f | ||
|
|
37744a9cb1 | ||
|
|
480516380d | ||
|
|
60ba712131 | ||
|
|
a7c960dcb0 | ||
|
|
a96f842b3a | ||
|
|
417ca91e23 | ||
|
|
ef8fadfa18 | ||
|
|
865c4ca993 | ||
|
|
31304f481a | ||
|
|
1bd3637d32 | ||
|
|
160a683667 | ||
|
|
49ca03ca06 | ||
|
|
c625348ce1 | ||
|
|
6d4a74893a | ||
|
|
5c7499cada | ||
|
|
f522691529 | ||
|
|
ca85573ec1 | ||
|
|
2c7bba5c63 | ||
|
|
e22f0226d5 | ||
|
|
0f250305b4 | ||
|
|
7606f5c130 | ||
|
|
4f0dcc431c | ||
|
|
6ca0dd2f9e | ||
|
|
e3e9921f6b | ||
|
|
867ddd355e | ||
|
|
bb431db7d3 | ||
|
|
43568b83e1 | ||
|
|
2b90302851 | ||
|
|
f7588d4776 | ||
|
|
a0bfa7ba1c | ||
|
|
c60a7452bf | ||
|
|
68a49d3758 | ||
|
|
ac3d4cf073 | ||
|
|
9479dd984c | ||
|
|
3c271302cc | ||
|
|
6e9936531d | ||
|
|
439147e4b7 | ||
|
|
8d13821099 | ||
|
|
49fe06ed69 | ||
|
|
7882ce7304 | ||
|
|
dc68e601a5 | ||
|
|
d169fb4b16 | ||
|
|
36e19d5202 | ||
|
|
c5f1e4e392 | ||
|
|
d3f7267a63 | ||
|
|
f4127a9c9c | ||
|
|
c181ad38b4 | ||
|
|
107944f5b7 | ||
|
|
8c7569b689 | ||
|
|
fa374bf1fc | ||
|
|
c0a36e37be | ||
|
|
2f2b869efd | ||
|
|
2f148bada0 | ||
|
|
916b2e8aa7 | ||
|
|
0cb7dd5280 | ||
|
|
892ccb14c7 | ||
|
|
21bccf69d2 | ||
|
|
7bac8f4bd3 | ||
|
|
d0c2923ab1 | ||
|
|
8a6e96c369 | ||
|
|
49f3fcf2c0 | ||
|
|
2b96a60b76 | ||
|
|
ec60a85cac | ||
|
|
647d9f88db | ||
|
|
b0c627909a | ||
|
|
102bf2f1eb | ||
|
|
26291b33d1 | ||
|
|
4f04d810b7 | ||
|
|
6d2f126253 | ||
|
|
e5b296d221 | ||
|
|
7933675c12 | ||
|
|
692ff4b59c | ||
|
|
4d8b535c79 | ||
|
|
3c03f240ba | ||
|
|
9bfc3400f9 | ||
|
|
95504f0bb7 | ||
|
|
0cd3274d04 | ||
|
|
2cef81abbe | ||
|
|
6f9bc5d206 | ||
|
|
94ab41d3c0 | ||
|
|
da376068e1 | ||
|
|
552219fd5a | ||
|
|
4985986243 | ||
|
|
d99b443b4c | ||
|
|
2aab6cb708 | ||
|
|
1134723c80 | ||
|
|
6126024f2c | ||
|
|
ef12d4f754 | ||
|
|
e8dd3c02f2 | ||
|
|
e7f4c804eb | ||
|
|
3d6ee5c755 | ||
|
|
d8958da8cd | ||
|
|
a64d550045 | ||
|
|
d876a81e78 | ||
|
|
6723eb77b2 | ||
|
|
86891e3535 | ||
|
|
2f805db35d | ||
|
|
ecaf2bdf45 | ||
|
|
22e00eb1c5 | ||
|
|
900fad69cf | ||
|
|
55d807c116 | ||
|
|
9a0ed248ca | ||
|
|
88802b0f72 | ||
|
|
5720ac127c | ||
|
|
f44642d9d2 | ||
|
|
29775dedd8 | ||
|
|
6417ca9dde | ||
|
|
f417c1ce6d | ||
|
|
e4c057f5a3 | ||
|
|
f9e9b6f4ec | ||
|
|
c141e767c6 | ||
|
|
17f361d63b | ||
|
|
8780fe29f1 | ||
|
|
d57bb8afbe | ||
|
|
d39945c415 | ||
|
|
688df6aa24 | ||
|
|
b24fef8a61 | ||
|
|
8c840f3d4c | ||
|
|
577d3d566b | ||
|
|
fd92766083 | ||
|
|
2d2e02040d | ||
|
|
aee57364dd | ||
|
|
7ca37c4831 | ||
|
|
5b06a6cae5 | ||
|
|
5d5695cd9a | ||
|
|
fd72894c90 | ||
|
|
c1abec2e4b | ||
|
|
9916f59753 | ||
|
|
e6716ccf63 | ||
|
|
e533ed6d12 | ||
|
|
4fefbb80ac | ||
|
|
1253a2b0a6 | ||
|
|
71537b570f | ||
|
|
203d5f7296 | ||
|
|
7754215dad | ||
|
|
b470af7c7b | ||
|
|
f8c5f9045d | ||
|
|
c7a0a5f207 | ||
|
|
b1be05009b | ||
|
|
977f992e3a | ||
|
|
cdca36f5d2 | ||
|
|
6ed88fe848 | ||
|
|
74f70305b7 | ||
|
|
b506c06542 | ||
|
|
e5cd66a2f7 | ||
|
|
2199cd263c | ||
|
|
47fe06f79d | ||
|
|
75a84d3cec | ||
|
|
ea4e03b1d8 | ||
|
|
aa341fd268 | ||
|
|
c4aefc5fac | ||
|
|
e7c662a5d6 | ||
|
|
5caeb7525d | ||
|
|
f495bb154e | ||
|
|
4d1657a531 | ||
|
|
5391cb4198 | ||
|
|
1b28ae3baa | ||
|
|
518a1b2b75 | ||
|
|
443915b6d6 | ||
|
|
371158cb56 | ||
|
|
1fa296a303 | ||
|
|
b0c34a89cd | ||
|
|
2003afe27f | ||
|
|
682898a3ba | ||
|
|
9a21e13d33 | ||
|
|
f03aa8713d | ||
|
|
7b526cf74b | ||
|
|
27db900692 | ||
|
|
b9b7bf38ab | ||
|
|
7e56ace2c0 | ||
|
|
67a98de841 | ||
|
|
4306f8fd3e | ||
|
|
69f37df356 | ||
|
|
94ecbde198 | ||
|
|
51c70e9e47 | ||
|
|
c45336a3cd | ||
|
|
f34f1091c3 | ||
|
|
899bbe9229 | ||
|
|
eeb70e966c | ||
|
|
1335da4f45 | ||
|
|
2d91e438d6 | ||
|
|
a55bc0c07c | ||
|
|
f7f6db831b | ||
|
|
a655ce1f00 | ||
|
|
28119e343c | ||
|
|
f75e39dc27 | ||
|
|
e4409b94d1 | ||
|
|
2570e4b997 | ||
|
|
2b917edf26 | ||
|
|
fcf04554c6 | ||
|
|
107ea868e1 | ||
|
|
da7c03e868 | ||
|
|
42339a3e6b | ||
|
|
362b545a45 | ||
|
|
84b45dc4fb | ||
|
|
f9fc02948a | ||
|
|
0299b0f95f | ||
|
|
33bf795c66 | ||
|
|
caf45ef740 | ||
|
|
b49b272587 | ||
|
|
a1a91c25a5 | ||
|
|
2912eaf082 | ||
|
|
795de492fe | ||
|
|
0ff750b60a | ||
|
|
8ad2a2bb86 | ||
|
|
12df41563a | ||
|
|
8d94564e67 | ||
|
|
736f1214ee | ||
|
|
e9cf3d3d12 | ||
|
|
996057e588 | ||
|
|
804599bbc3 | ||
|
|
ffe6c1403e | ||
|
|
3a2466fe4e | ||
|
|
6c795809f7 | ||
|
|
3141cd392a | ||
|
|
77220002e0 | ||
|
|
cd40bf9ae2 | ||
|
|
6c3405ba55 | ||
|
|
bba3419ace | ||
|
|
61cf2b32eb | ||
|
|
3ed0e8012d | ||
|
|
4d9256296d | ||
|
|
0897057be1 | ||
|
|
136e6aaa21 | ||
|
|
8e375b0ed2 | ||
|
|
5192d316f0 | ||
|
|
245585be81 | ||
|
|
4824905592 | ||
|
|
5566ba8257 | ||
|
|
8c4a753b65 | ||
|
|
f016323b8a | ||
|
|
cd9f2ec402 | ||
|
|
ca7ff47fcb | ||
|
|
09857ea455 | ||
|
|
17cf47dcd6 | ||
|
|
136162ec0d | ||
|
|
08f036aafd | ||
|
|
9fb29f249b | ||
|
|
9a1aff5bb6 | ||
|
|
f3f90f7b90 | ||
|
|
527f9d28ad | ||
|
|
12b2a229b6 | ||
|
|
40a065ce04 | ||
|
|
b14d4de0b1 | ||
|
|
e64c26e617 | ||
|
|
0b1e599b01 | ||
|
|
127385b846 | ||
|
|
cf085565a7 | ||
|
|
5a530df4f2 | ||
|
|
b4c7b26f63 | ||
|
|
8bdcc4ff28 | ||
|
|
e596bb6fff | ||
|
|
50ecb45d63 | ||
|
|
349c399967 | ||
|
|
103d05d242 | ||
|
|
d0589209cc | ||
|
|
8faf69c41e | ||
|
|
f7a332eee7 | ||
|
|
f6e34d9621 | ||
|
|
706a239232 | ||
|
|
00076cc6f4 | ||
|
|
a711db0b5b | ||
|
|
5dd3f4ad6d | ||
|
|
65e202881a | ||
|
|
27c4e3ef4f | ||
|
|
e2b3c47186 | ||
|
|
a14ef78d52 | ||
|
|
b88e577eb5 | ||
|
|
991e41b313 | ||
|
|
ff2bc64d57 | ||
|
|
218f0c445e | ||
|
|
7ee0c94924 | ||
|
|
3531e7f23f | ||
|
|
d99f4681f0 | ||
|
|
f2b2ccd577 | ||
|
|
c18a235d33 | ||
|
|
6c87c55a8a | ||
|
|
f925fe7692 | ||
|
|
af83c43fb0 | ||
|
|
4305ee0313 | ||
|
|
a6e7bbbd22 | ||
|
|
62c02dfa86 | ||
|
|
a2ebbafb77 | ||
|
|
a915a2ddd1 | ||
|
|
537c15b354 | ||
|
|
73ed92af59 | ||
|
|
88303b6f78 | ||
|
|
120d4ad556 | ||
|
|
3410bd9b1d | ||
|
|
20e3eee6e7 | ||
|
|
775b07dbcc | ||
|
|
560d4e2cb1 | ||
|
|
4ad432e1da | ||
|
|
32ddcd067a | ||
|
|
98ef658307 | ||
|
|
1e2bcb8189 | ||
|
|
a4de91d000 | ||
|
|
1bb437a5d0 | ||
|
|
4421219c2b | ||
|
|
ea28db855d | ||
|
|
5aea7b3d09 | ||
|
|
5274117cf1 | ||
|
|
673faf8cef | ||
|
|
130ae31d55 | ||
|
|
c3abc46d4d | ||
|
|
4df75d49ad | ||
|
|
9ea0fe4de2 | ||
|
|
8698c5a80f | ||
|
|
383f7f4f77 | ||
|
|
34d784df79 | ||
|
|
662bebfc02 | ||
|
|
0c3b00fc6b | ||
|
|
b6e370e8c9 | ||
|
|
71ea8e584a | ||
|
|
a5491b9199 | ||
|
|
6f383c1dc8 | ||
|
|
500a0cbd16 | ||
|
|
1ef6730369 | ||
|
|
491174095a | ||
|
|
02c270410c | ||
|
|
89eec21f27 | ||
|
|
49cea97822 | ||
|
|
6310b65d70 | ||
|
|
93c76e1809 | ||
|
|
f64cf7a3d1 | ||
|
|
fdffbee1b0 | ||
|
|
87ccd1a89a | ||
|
|
87b9734986 | ||
|
|
d2d5665c37 | ||
|
|
0844b6e9cf | ||
|
|
9cb05e5724 | ||
|
|
80b209fa0c | ||
|
|
8d4cb05738 | ||
|
|
31f4069563 | ||
|
|
8ba6fc062e | ||
|
|
c0c2d14e3d | ||
|
|
f0a5c49a9c | ||
|
|
9333570ab7 | ||
|
|
d6eaaad962 | ||
|
|
e24f077b68 | ||
|
|
dc5bb9741a | ||
|
|
b383b45191 | ||
|
|
2d8f37baba | ||
|
|
409927ef8e | ||
|
|
5b231e0170 | ||
|
|
87f629bb37 | ||
|
|
3672c97a06 | ||
|
|
b6ee3e9807 | ||
|
|
d56bc280e9 | ||
|
|
d5fd00c15d | ||
|
|
5e647ff149 | ||
|
|
868faf00cc | ||
|
|
a0286c39b9 | ||
|
|
9cced321f1 | ||
|
|
3073935e24 | ||
|
|
ef6631b280 | ||
|
|
0801e4d881 | ||
|
|
ae08cfbcae | ||
|
|
1c0d5361ea | ||
|
|
278464bfb7 | ||
|
|
2a6996f5d0 | ||
|
|
84b11016c6 | ||
|
|
7e74d3d699 | ||
|
|
2cad8e2694 | ||
|
|
e765ec1223 | ||
|
|
471a369bb8 | ||
|
|
760ff1840c | ||
|
|
9905122fc2 | ||
|
|
abea0d07ac | ||
|
|
16ff5ddcdc | ||
|
|
1c4cb340ca | ||
|
|
5ba8ea27d1 | ||
|
|
567c6530d8 | ||
|
|
a3f36668a8 | ||
|
|
a1cc2f733c | ||
|
|
0937f37388 | ||
|
|
74f35e3401 | ||
|
|
ab7999c71a | ||
|
|
544771db9a | ||
|
|
ec9d030457 | ||
|
|
14de282302 | ||
|
|
fb5467b85b | ||
|
|
c4c6465927 | ||
|
|
99a1cd6f9f | ||
|
|
7e73a255f4 | ||
|
|
4b5f13bff2 | ||
|
|
d495b73456 | ||
|
|
e699b6b13f | ||
|
|
eb150987f0 | ||
|
|
34784333dc | ||
|
|
28d777a96b | ||
|
|
c45fa88684 | ||
|
|
ad9807dd14 | ||
|
|
2a51715075 | ||
|
|
7c307d8964 | ||
|
|
baaacc5a7b | ||
|
|
6faf5947c9 | ||
|
|
571335cbc4 | ||
|
|
7d5abb6d69 | ||
|
|
a0f592308a | ||
|
|
e512d99879 | ||
|
|
e70b636513 | ||
|
|
408b8403fe | ||
|
|
74f8cb3511 | ||
|
|
2202cf3701 | ||
|
|
cce69beee9 | ||
|
|
347124c967 | ||
|
|
77a6105a9a | ||
|
|
13c9606af7 | ||
|
|
bac6810e75 | ||
|
|
c176187d24 | ||
|
|
31d5ee6ccc | ||
|
|
5e0dc9b9ad | ||
|
|
4c6f3aa427 | ||
|
|
d7331befc1 | ||
|
|
63219baa21 | ||
|
|
97cb9a4adc | ||
|
|
24f41b0a75 | ||
|
|
bfec29e9bc | ||
|
|
dd9e624761 | ||
|
|
7855325ff9 | ||
|
|
2c039ff5c9 | ||
|
|
9a5ee86434 | ||
|
|
d6698db257 | ||
|
|
b2d03bf2a3 | ||
|
|
2f83b60fb3 | ||
|
|
d183e34461 | ||
|
|
fb78569335 | ||
|
|
12c8cd75ee | ||
|
|
0e21e3e2e7 | ||
|
|
fda1e87278 | ||
|
|
1092031d77 | ||
|
|
f0482d3bae | ||
|
|
b6ac3d0d6c | ||
|
|
3344ffcb8b | ||
|
|
82936f71b6 | ||
|
|
51e809c09e | ||
|
|
713df396dc | ||
|
|
23a42d93df | ||
|
|
0ef06683dc | ||
|
|
843113ba0f | ||
|
|
79080290c6 | ||
|
|
9bd2023a8e | ||
|
|
0d6e32d31a | ||
|
|
0418257218 | ||
|
|
a3e6fc0141 | ||
|
|
1dd165a3cd | ||
|
|
e666b5269e | ||
|
|
0b70e9df7b | ||
|
|
1639796041 | ||
|
|
03164bcb6f | ||
|
|
d0af074225 | ||
|
|
6d7f3feab3 | ||
|
|
045b7f6312 | ||
|
|
116b7ce12f | ||
|
|
8b0905c076 | ||
|
|
b69140307b | ||
|
|
b31abbcad3 | ||
|
|
2d5a1fbc12 | ||
|
|
d052d425af | ||
|
|
89de49f31e | ||
|
|
a208782049 | ||
|
|
eb802ee975 | ||
|
|
f40d48b014 | ||
|
|
ef4203f5ca | ||
|
|
adf93195e8 | ||
|
|
3e5cdbaf68 | ||
|
|
27cab3b38a | ||
|
|
09d38e4abf | ||
|
|
7efb5cb6f5 | ||
|
|
31ff6e1e7a | ||
|
|
2fa3d47887 | ||
|
|
2cca46375c | ||
|
|
06410b593c | ||
|
|
545c9f47de | ||
|
|
973ad41bde | ||
|
|
3fa7416eb2 | ||
|
|
ec76d3dcc4 | ||
|
|
3f27bec94b | ||
|
|
ed11269aef | ||
|
|
6c653734ec | ||
|
|
19bd0c35ed | ||
|
|
3f4c4ebc29 | ||
|
|
6cc7d4ed69 | ||
|
|
67fff17917 | ||
|
|
8fce49fa02 | ||
|
|
30f28b37c3 | ||
|
|
6a5681dd0a | ||
|
|
dacc282763 | ||
|
|
9720bec5e5 | ||
|
|
8b3b883fce | ||
|
|
4dc0f8e57a |
6
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
6
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -69,9 +69,3 @@ body:
|
||||
attributes:
|
||||
label: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||
description: Terminal Traceback & Material to Help Reproduce Bugs | 终端traceback(如有) + 帮助我们复现的测试材料样本(如有)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
5
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -21,8 +21,3 @@ body:
|
||||
attributes:
|
||||
label: Feature Request | 功能请求
|
||||
description: Feature Request | 功能请求
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
44
.github/workflows/build-with-all-capacity-beta.yml
vendored
Normal file
44
.github/workflows/build-with-all-capacity-beta.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-with-all-capacity-beta
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}_with_all_capacity_beta
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: docs/GithubAction+AllCapacityBeta
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
44
.github/workflows/build-with-all-capacity.yml
vendored
Normal file
44
.github/workflows/build-with-all-capacity.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
|
||||
name: build-with-all-capacity
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}_with_all_capacity
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: docs/GithubAction+AllCapacity
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
25
.github/workflows/stale.yml
vendored
Normal file
25
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
|
||||
#
|
||||
# You can adjust the behavior by modifying this file.
|
||||
# For more information, see:
|
||||
# https://github.com/actions/stale
|
||||
|
||||
name: 'Close stale issues and PRs'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '*/5 * * * *'
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: read
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v8
|
||||
with:
|
||||
stale-issue-message: 'This issue is stale because it has been open 100 days with no activity. Remove stale label or comment or this will be closed in 1 days.'
|
||||
days-before-stale: 100
|
||||
days-before-close: 1
|
||||
debug-only: true
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -146,9 +146,10 @@ debug*
|
||||
private*
|
||||
crazy_functions/test_project/pdf_and_word
|
||||
crazy_functions/test_samples
|
||||
request_llm/jittorllms
|
||||
request_llms/jittorllms
|
||||
multi-language
|
||||
request_llm/moss
|
||||
request_llms/moss
|
||||
media
|
||||
flagged
|
||||
request_llm/ChatGLM-6b-onnx-u8s8
|
||||
request_llms/ChatGLM-6b-onnx-u8s8
|
||||
.pre-commit-config.yaml
|
||||
|
||||
22
Dockerfile
22
Dockerfile
@@ -1,34 +1,34 @@
|
||||
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型或者latex运行依赖,请参考 docker-compose.yml
|
||||
# 如何构建: 先修改 `config.py`, 然后 `docker build -t gpt-academic . `
|
||||
# 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic `
|
||||
# 如何运行(其他操作系统,选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic `
|
||||
# 此Dockerfile适用于“无本地模型”的迷你运行环境构建
|
||||
# 如果需要使用chatglm等本地模型或者latex运行依赖,请参考 docker-compose.yml
|
||||
# - 如何构建: 先修改 `config.py`, 然后 `docker build -t gpt-academic . `
|
||||
# - 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic `
|
||||
# - 如何运行(其他操作系统,选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic `
|
||||
FROM python:3.11
|
||||
|
||||
|
||||
# 非必要步骤,更换pip源
|
||||
# 非必要步骤,更换pip源 (以下三行,可以删除)
|
||||
RUN echo '[global]' > /etc/pip.conf && \
|
||||
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||
|
||||
|
||||
# 进入工作路径
|
||||
# 进入工作路径(必要)
|
||||
WORKDIR /gpt
|
||||
|
||||
|
||||
# 安装大部分依赖,利用Docker缓存加速以后的构建
|
||||
# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下三行,可以删除)
|
||||
COPY requirements.txt ./
|
||||
COPY ./docs/gradio-3.32.2-py3-none-any.whl ./docs/gradio-3.32.2-py3-none-any.whl
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
|
||||
# 装载项目文件,安装剩余依赖
|
||||
# 装载项目文件,安装剩余依赖(必要)
|
||||
COPY . .
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
|
||||
# 非必要步骤,用于预热模块
|
||||
# 非必要步骤,用于预热模块(可以删除)
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
|
||||
# 启动
|
||||
# 启动(必要)
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
|
||||
372
README.md
372
README.md
@@ -1,70 +1,94 @@
|
||||
> **Note**
|
||||
> [!IMPORTANT]
|
||||
> 2024.1.18: 更新3.70版本,支持Mermaid绘图库(让大模型绘制脑图)
|
||||
> 2024.1.17: 恭迎GLM4,全力支持Qwen、GLM、DeepseekCoder等国内中文大语言基座模型!
|
||||
> 2024.1.17: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
|
||||
> 2024.1.17: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目完全开源免费,您可通过订阅[在线服务](https://github.com/binary-husky/gpt_academic/wiki/online)的方式鼓励本项目的发展。
|
||||
|
||||
<br>
|
||||
|
||||
<div align=center>
|
||||
<h1 aligh="center">
|
||||
<img src="docs/logo.png" width="40"> GPT 学术优化 (GPT Academic)
|
||||
</h1>
|
||||
|
||||
[![Github][Github-image]][Github-url]
|
||||
[![License][License-image]][License-url]
|
||||
[![Releases][Releases-image]][Releases-url]
|
||||
[![Installation][Installation-image]][Installation-url]
|
||||
[![Wiki][Wiki-image]][Wiki-url]
|
||||
[![PR][PRs-image]][PRs-url]
|
||||
|
||||
[Github-image]: https://img.shields.io/badge/github-12100E.svg?style=flat-square
|
||||
[License-image]: https://img.shields.io/github/license/binary-husky/gpt_academic?label=License&style=flat-square&color=orange
|
||||
[Releases-image]: https://img.shields.io/github/release/binary-husky/gpt_academic?label=Release&style=flat-square&color=blue
|
||||
[Installation-image]: https://img.shields.io/badge/dynamic/json?color=blue&url=https://raw.githubusercontent.com/binary-husky/gpt_academic/master/version&query=$.version&label=Installation&style=flat-square
|
||||
[Wiki-image]: https://img.shields.io/badge/wiki-项目文档-black?style=flat-square
|
||||
[PRs-image]: https://img.shields.io/badge/PRs-welcome-pink?style=flat-square
|
||||
|
||||
[Github-url]: https://github.com/binary-husky/gpt_academic
|
||||
[License-url]: https://github.com/binary-husky/gpt_academic/blob/master/LICENSE
|
||||
[Releases-url]: https://github.com/binary-husky/gpt_academic/releases
|
||||
[Installation-url]: https://github.com/binary-husky/gpt_academic#installation
|
||||
[Wiki-url]: https://github.com/binary-husky/gpt_academic/wiki
|
||||
[PRs-url]: https://github.com/binary-husky/gpt_academic/pulls
|
||||
|
||||
|
||||
</div>
|
||||
<br>
|
||||
|
||||
**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!**
|
||||
|
||||
If you like this project, please give it a Star.
|
||||
Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
|
||||
<br>
|
||||
|
||||
> [!NOTE]
|
||||
> 1.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。
|
||||
> [](#installation) [](https://github.com/binary-husky/gpt_academic/releases) [](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) []([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki))
|
||||
>
|
||||
> 2023.7.8: Gradio, Pydantic依赖调整,已修改 `requirements.txt`。请及时**更新代码**,安装依赖时,请严格选择`requirements.txt`中**指定的版本**
|
||||
>
|
||||
> `pip install -r requirements.txt`
|
||||
|
||||
|
||||
# <div align=center><img src="docs/logo.png" width="40"> GPT 学术优化 (GPT Academic)</div>
|
||||
|
||||
**如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或函数插件,欢迎发pull requests!**
|
||||
|
||||
If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a README in [English|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md) translated by this project itself.
|
||||
To translate this project to arbitary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> 1.请注意只有 **高亮(如红色)** 标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。
|
||||
>
|
||||
> 2.本项目中每个文件的功能都在自译解[`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题汇总在[`wiki`](https://github.com/binary-husky/gpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)当中。[安装方法](#installation)。
|
||||
>
|
||||
> 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM和Moss等等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
|
||||
|
||||
|
||||
> 2.本项目兼容并鼓励尝试国内中文大语言基座模型如通义千问,智谱GLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交即可生效。
|
||||
|
||||
<br><br>
|
||||
|
||||
<div align="center">
|
||||
|
||||
功能(⭐= 近期新增功能) | 描述
|
||||
--- | ---
|
||||
⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | ⭐阿里达摩院[通义千问](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/)
|
||||
一键润色 | 支持一键润色、一键查找论文语法错误
|
||||
一键中英互译 | 一键中英互译
|
||||
一键代码解释 | 显示代码、解释代码、生成代码、给代码加注释
|
||||
⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱GLM4](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/)
|
||||
⭐支持mermaid图像渲染 | 支持让GPT生成[流程图](https://www.bilibili.com/video/BV18c41147H9/)、状态转移图、甘特图、饼状图、GitGraph等等(3.7版本)
|
||||
⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
|
||||
⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
|
||||
⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能!
|
||||
⭐虚空终端插件 | [插件] 能够使用自然语言直接调度本项目其他插件
|
||||
润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码
|
||||
[自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
|
||||
模块化设计 | 支持自定义强大的[函数插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[自我程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] [一键读懂](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)本项目的源代码
|
||||
[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] 一键可以剖析其他Python/C/C++/Java/Lua/...项目树
|
||||
读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [函数插件] 一键解读latex/pdf论文全文并生成摘要
|
||||
Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [函数插件] 一键翻译或润色latex论文
|
||||
批量注释生成 | [函数插件] 一键批量生成函数注释
|
||||
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [函数插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?
|
||||
chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
|
||||
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [函数插件] PDF论文提取题目&摘要+翻译全文(多线程)
|
||||
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
|
||||
Latex论文一键校对 | [函数插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
|
||||
[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [函数插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||
互联网信息聚合+GPT | [函数插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
|
||||
⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [函数插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
|
||||
⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [函数插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
|
||||
模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [插件] 一键剖析Python/C/C++/Java/Lua/...项目树 或 [自我剖析](https://www.bilibili.com/video/BV1cj411A7VW)
|
||||
读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [插件] 一键解读latex/pdf论文全文并生成摘要
|
||||
Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文
|
||||
批量注释生成 | [插件] 一键批量生成函数注释
|
||||
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?就是出自他的手笔
|
||||
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程)
|
||||
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
|
||||
Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
|
||||
[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||
互联网信息聚合+GPT | [插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
|
||||
公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
|
||||
多线程函数插件支持 | 支持多线调用chatgpt,一键处理[海量文本](https://www.bilibili.com/video/BV1FT411H7c5/)或程序
|
||||
启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
|
||||
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧?
|
||||
⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件
|
||||
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)伺候的感觉一定会很不错吧?
|
||||
更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
|
||||
⭐[虚空终端](https://github.com/binary-husky/void-terminal)pip包 | 脱离GUI,在Python中直接调用本项目的函数插件(开发中)
|
||||
⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中)
|
||||
更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……
|
||||
</div>
|
||||
|
||||
|
||||
- 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/279702205-d81137c3-affd-4cd1-bb5e-b15610389762.gif" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放粘贴板
|
||||
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放剪贴板
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
@@ -74,66 +98,99 @@ Latex论文一键校对 | [函数插件] 仿Grammarly对Latex文章进行语法
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- 如果输出包含公式,会同时以tex形式和渲染形式显示,方便复制和阅读
|
||||
- 如果输出包含公式,会以tex形式和渲染形式同时显示,方便复制和阅读
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 懒得看项目代码?整个工程直接给chatgpt炫嘴里
|
||||
- 懒得看项目代码?直接把整个工程炫ChatGPT嘴里
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
- 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
<br><br>
|
||||
|
||||
# Installation
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A{"安装方法"} --> W1("I. 🔑直接运行 (Windows, Linux or MacOS)")
|
||||
W1 --> W11["1. Python pip包管理依赖"]
|
||||
W1 --> W12["2. Anaconda包管理依赖(推荐⭐)"]
|
||||
|
||||
A --> W2["II. 🐳使用Docker (Windows, Linux or MacOS)"]
|
||||
|
||||
W2 --> k1["1. 部署项目全部能力的大镜像(推荐⭐)"]
|
||||
W2 --> k2["2. 仅在线模型(GPT, GLM4等)镜像"]
|
||||
W2 --> k3["3. 在线模型 + Latex的大镜像"]
|
||||
|
||||
A --> W4["IV. 🚀其他部署方法"]
|
||||
W4 --> C1["1. Windows/MacOS 一键安装运行脚本(推荐⭐)"]
|
||||
W4 --> C2["2. Huggingface, Sealos远程部署"]
|
||||
W4 --> C4["3. ... 其他 ..."]
|
||||
```
|
||||
|
||||
### 安装方法I:直接运行 (Windows, Linux or MacOS)
|
||||
|
||||
1. 下载项目
|
||||
```sh
|
||||
git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
2. 配置API_KEY
|
||||
```sh
|
||||
git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
||||
cd gpt_academic
|
||||
```
|
||||
|
||||
在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。
|
||||
2. 配置API_KEY等变量
|
||||
|
||||
(P.S. 程序运行时会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。因此,如果您能理解我们的配置读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中。`config_private.py`不受git管控,可以让您的隐私信息更加安全。P.S.项目同样支持通过`环境变量`配置大多数选项,环境变量的书写格式参考`docker-compose`文件。读取优先级: `环境变量` > `config_private.py` > `config.py`)
|
||||
在`config.py`中,配置API KEY等变量。[特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1)、[Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
|
||||
|
||||
「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解以上读取逻辑,我们强烈建议您在`config.py`同路径下创建一个名为`config_private.py`的新配置文件,并使用`config_private.py`配置项目,从而确保自动更新时不会丢失配置 」。
|
||||
|
||||
「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py` 」。
|
||||
|
||||
|
||||
3. 安装依赖
|
||||
```sh
|
||||
# (选择I: 如熟悉python)(python版本3.9以上,越新越好),备注:使用官方pip源或者阿里pip源,临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
```sh
|
||||
# (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (选择II: 如不熟悉python)使用anaconda,步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # 创建anaconda环境
|
||||
conda activate gptac_venv # 激活anaconda环境
|
||||
python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤
|
||||
```
|
||||
# (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
|
||||
conda create -n gptac_venv python=3.11 # 创建anaconda环境
|
||||
conda activate gptac_venv # 激活anaconda环境
|
||||
python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步骤
|
||||
```
|
||||
|
||||
|
||||
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处</summary>
|
||||
<p>
|
||||
|
||||
【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
||||
【可选步骤】如果需要支持清华ChatGLM3/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
|
||||
|
||||
```sh
|
||||
# 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
# 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
|
||||
python -m pip install -r request_llms/requirements_chatglm.txt
|
||||
|
||||
# 【可选步骤II】支持复旦MOSS
|
||||
python -m pip install -r request_llm/requirements_moss.txt
|
||||
git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss # 注意执行此行代码时,必须处于项目根路径
|
||||
python -m pip install -r request_llms/requirements_moss.txt
|
||||
git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # 注意执行此行代码时,必须处于项目根路径
|
||||
|
||||
# 【可选步骤III】支持RWKV Runner
|
||||
参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner
|
||||
|
||||
# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
|
||||
# 【可选步骤V】支持本地模型INT8,INT4量化(这里所指的模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择)
|
||||
pip install bitsandbyte
|
||||
# windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui
|
||||
python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui
|
||||
pip install -U git+https://github.com/huggingface/transformers.git
|
||||
pip install -U git+https://github.com/huggingface/accelerate.git
|
||||
pip install peft
|
||||
```
|
||||
|
||||
</p>
|
||||
@@ -142,78 +199,64 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-
|
||||
|
||||
|
||||
4. 运行
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
|
||||
### 安装方法II:使用Docker
|
||||
|
||||
1. 仅ChatGPT(推荐大多数人选择,等价于docker-compose方案1)
|
||||
0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐该方法部署完整项目)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml)
|
||||
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案0并删除其他方案。然后运行:
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
1. 仅ChatGPT + GLM4 + 文心一言+spark等在线模型(推荐大多数人选择)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
|
||||
|
||||
``` sh
|
||||
git clone --depth=1 https://github.com/binary-husky/gpt_academic.git # 下载项目
|
||||
cd gpt_academic # 进入路径
|
||||
nano config.py # 用任意文本编辑器编辑config.py, 配置 “Proxy”, “API_KEY” 以及 “WEB_PORT” (例如50923) 等
|
||||
docker build -t gpt-academic . # 安装
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案1并删除其他方案。然后运行:
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
#(最后一步-Linux操作系统)用`--net=host`更方便快捷
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
#(最后一步-MacOS/Windows操作系统)只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口
|
||||
docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
|
||||
```
|
||||
P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用docker-compose获取Latex功能(修改docker-compose.yml,保留方案4并删除其他方案)。
|
||||
P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。
|
||||
|
||||
2. ChatGPT + ChatGLM2 + MOSS(需要熟悉Docker)
|
||||
2. ChatGPT + GLM3 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
|
||||
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案2并删除其他方案。修改docker-compose.yml中方案2的配置,参考其中注释即可
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
3. ChatGPT + LLAMA + 盘古 + RWKV(需要熟悉Docker)
|
||||
[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-jittorllms.yml)
|
||||
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案3并删除其他方案。修改docker-compose.yml中方案3的配置,参考其中注释即可
|
||||
docker-compose up
|
||||
```
|
||||
``` sh
|
||||
# 修改docker-compose.yml,保留方案2并删除其他方案。然后运行:
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
|
||||
### 安装方法III:其他部署姿势
|
||||
1. 一键运行脚本。
|
||||
完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。
|
||||
脚本的贡献来源是[oobabooga](https://github.com/oobabooga/one-click-installers)。
|
||||
### 安装方法III:其他部署方法
|
||||
1. **Windows一键运行脚本**。
|
||||
完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。脚本贡献来源:[oobabooga](https://github.com/oobabooga/one-click-installers)。
|
||||
|
||||
2. 使用docker-compose运行。
|
||||
请阅读docker-compose.yml后,按照其中的提示操作即可
|
||||
2. 使用第三方API、Azure等、文心一言、星火等,见[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)
|
||||
|
||||
3. 如何使用反代URL
|
||||
按照`config.py`中的说明配置API_URL_REDIRECT即可。
|
||||
3. 云服务器远程部署避坑指南。
|
||||
请访问[云服务器远程部署wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
4. 微软云AzureAPI
|
||||
按照`config.py`中的说明配置即可(AZURE_ENDPOINT等四个配置)
|
||||
|
||||
5. 远程云服务器部署(需要云服务器知识与经验)。
|
||||
请访问[部署wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
6. 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)。
|
||||
|
||||
7. 使用WSL2(Windows Subsystem for Linux 子系统)。
|
||||
请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
8. 如何在二级网址(如`http://localhost/subpath`)下运行。
|
||||
请访问[FastAPI运行说明](docs/WithFastapi.md)
|
||||
4. 在其他平台部署&二级网址部署
|
||||
- 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)。
|
||||
- 使用WSL2(Windows Subsystem for Linux 子系统)。请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
- 如何在二级网址(如`http://localhost/subpath`)下运行。请访问[FastAPI运行说明](docs/WithFastapi.md)
|
||||
|
||||
<br><br>
|
||||
|
||||
# Advanced Usage
|
||||
### I:自定义新的便捷按钮(学术快捷键)
|
||||
任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。)
|
||||
|
||||
任意文本编辑器打开`core_functional.py`,添加如下条目,然后重启程序。(如果按钮已存在,那么可以直接修改(前缀、后缀都已支持热修改),无需重启程序即可生效。)
|
||||
例如
|
||||
```
|
||||
|
||||
```python
|
||||
"超级英译中": {
|
||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||
"Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n",
|
||||
@@ -222,19 +265,20 @@ docker-compose up
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
### II:自定义函数插件
|
||||
|
||||
编写强大的函数插件来执行任何你想得到的和想不到的任务。
|
||||
本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。
|
||||
详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。
|
||||
|
||||
<br><br>
|
||||
|
||||
# Latest Update
|
||||
### I:新功能动态
|
||||
# Updates
|
||||
### I:动态
|
||||
|
||||
1. 对话保存功能。在函数插件区调用 `保存当前的对话` 即可将当前对话保存为可读+可复原的html文件,
|
||||
另外在函数插件区(下拉菜单)调用 `载入对话历史存档` ,即可还原之前的会话。
|
||||
@@ -249,10 +293,13 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/9fdcc391-f823-464f-9322-f8719677043b" height="250" >
|
||||
</div>
|
||||
|
||||
3. 生成报告。大部分插件都会在执行结束后,生成工作报告
|
||||
3. 虚空终端(从自然语言输入中,理解用户意图+自动调用其他插件)
|
||||
|
||||
- 步骤一:输入 “ 请调用插件翻译PDF论文,地址为https://openreview.net/pdf?id=rJl0r3R9KX ”
|
||||
- 步骤二:点击“虚空终端”
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="250" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="250" >
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/66f1b044-e9ff-4eed-9126-5d4f3668f1ed" width="500" >
|
||||
</div>
|
||||
|
||||
4. 模块化功能设计,简单的接口却能支持强大的功能
|
||||
@@ -272,35 +319,41 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
|
||||
</div>
|
||||
|
||||
7. 新增MOSS大语言模型支持
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
|
||||
</div>
|
||||
|
||||
8. OpenAI图像生成
|
||||
7. OpenAI图像生成
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||
</div>
|
||||
|
||||
9. OpenAI音频解析与总结
|
||||
8. 基于mermaid的流图、脑图绘制
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/c518b82f-bd53-46e2-baf5-ad1b081c1da4" width="500" >
|
||||
</div>
|
||||
|
||||
10. Latex全文校对纠错
|
||||
9. Latex全文校对纠错
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" height="200" > ===>
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/476f66d9-7716-4537-b5c1-735372c25adb" height="200">
|
||||
</div>
|
||||
|
||||
11. 语言、主题切换
|
||||
10. 语言、主题切换
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/b6799499-b6fb-4f0c-9c8e-1b441872f4e8" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
### II:版本:
|
||||
- version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级)
|
||||
- version 3.80(TODO): 优化AutoGen插件主题并设计一系列衍生插件
|
||||
- version 3.70: 引入Mermaid绘图,实现GPT画脑图等功能
|
||||
- version 3.60: 引入AutoGen作为新一代插件的基石
|
||||
- version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG
|
||||
- version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面
|
||||
- version 3.55: 重构前端界面,引入悬浮窗口与菜单栏
|
||||
- version 3.54: 新增动态代码解释器(Code Interpreter)(待完善)
|
||||
- version 3.53: 支持动态选择不同界面主题,提高稳定性&解决多用户冲突问题
|
||||
- version 3.50: 使用自然语言调用本项目的所有函数插件(虚空终端),支持插件分类,改进UI,设计新主题
|
||||
- version 3.49: 支持百度千帆平台和文心一言
|
||||
- version 3.48: 支持阿里达摩院通义千问,上海AI-Lab书生,讯飞星火
|
||||
- version 3.46: 支持完全脱手操作的实时语音对话
|
||||
- version 3.45: 支持自定义ChatGLM2微调模型
|
||||
- version 3.44: 正式支持Azure,优化界面易用性
|
||||
@@ -311,25 +364,58 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
- version 3.0: 对chatglm和其他小型llm的支持
|
||||
- version 2.6: 重构了插件结构,提高了交互性,加入更多插件
|
||||
- version 2.5: 自更新,解决总结大工程源代码时文本过长、token溢出的问题
|
||||
- version 2.4: (1)新增PDF全文翻译功能; (2)新增输入区切换位置的功能; (3)新增垂直布局选项; (4)多线程函数插件优化。
|
||||
- version 2.4: 新增PDF全文翻译功能; 新增输入区切换位置的功能
|
||||
- version 2.3: 增强多线程交互性
|
||||
- version 2.2: 函数插件支持热重载
|
||||
- version 2.1: 可折叠式布局
|
||||
- version 2.0: 引入模块化函数插件
|
||||
- version 1.0: 基础功能
|
||||
|
||||
gpt_academic开发者QQ群-2:610599535
|
||||
GPT Academic开发者QQ群:`610599535`
|
||||
|
||||
- 已知问题
|
||||
- 某些浏览器翻译插件干扰此软件前端的运行
|
||||
- 官方Gradio目前有很多兼容性Bug,请务必使用`requirement.txt`安装Gradio
|
||||
- 官方Gradio目前有很多兼容性问题,请**务必使用`requirement.txt`安装Gradio**
|
||||
|
||||
```mermaid
|
||||
timeline LR
|
||||
title GPT-Academic项目发展历程
|
||||
section 2.x
|
||||
1.0~2.2: 基础功能: 引入模块化函数插件: 可折叠式布局: 函数插件支持热重载
|
||||
2.3~2.5: 增强多线程交互性: 新增PDF全文翻译功能: 新增输入区切换位置的功能: 自更新
|
||||
2.6: 重构了插件结构: 提高了交互性: 加入更多插件
|
||||
section 3.x
|
||||
3.0~3.1: 对chatglm支持: 对其他小型llm支持: 支持同时问询多个gpt模型: 支持多个apikey负载均衡
|
||||
3.2~3.3: 函数插件支持更多参数接口: 保存对话功能: 解读任意语言代码: 同时询问任意的LLM组合: 互联网信息综合功能
|
||||
3.4: 加入arxiv论文翻译: 加入latex论文批改功能
|
||||
3.44: 正式支持Azure: 优化界面易用性
|
||||
3.46: 自定义ChatGLM2微调模型: 实时语音对话
|
||||
3.49: 支持阿里达摩院通义千问: 上海AI-Lab书生: 讯飞星火: 支持百度千帆平台 & 文心一言
|
||||
3.50: 虚空终端: 支持插件分类: 改进UI: 设计新主题
|
||||
3.53: 动态选择不同界面主题: 提高稳定性: 解决多用户冲突问题
|
||||
3.55: 动态代码解释器: 重构前端界面: 引入悬浮窗口与菜单栏
|
||||
3.56: 动态追加基础功能按钮: 新汇报PDF汇总页面
|
||||
3.57: GLM3, 星火v3: 支持文心一言v4: 修复本地模型的并发BUG
|
||||
3.60: 引入AutoGen
|
||||
3.70: 引入Mermaid绘图: 实现GPT画脑图等功能
|
||||
3.80(TODO): 优化AutoGen插件主题: 设计衍生插件
|
||||
|
||||
```
|
||||
|
||||
|
||||
### III:主题
|
||||
可以通过修改`THEME`选项(config.py)变更主题
|
||||
1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/)
|
||||
|
||||
|
||||
### IV:参考与学习
|
||||
### IV:本项目的开发分支
|
||||
|
||||
1. `master` 分支: 主分支,稳定版
|
||||
2. `frontier` 分支: 开发分支,测试版
|
||||
3. 如何[接入其他大模型](request_llms/README.md)
|
||||
4. 访问GPT-Academic的[在线服务并支持我们](https://github.com/binary-husky/gpt_academic/wiki/online)
|
||||
|
||||
### V:参考与学习
|
||||
|
||||
```
|
||||
代码中参考了很多其他优秀项目中的设计,顺序不分先后:
|
||||
|
||||
@@ -5,7 +5,6 @@ def check_proxy(proxies):
|
||||
try:
|
||||
response = requests.get("https://ipapi.co/json/", proxies=proxies, timeout=4)
|
||||
data = response.json()
|
||||
print(f'查询代理的地理位置,返回的结果是{data}')
|
||||
if 'country_name' in data:
|
||||
country = data['country_name']
|
||||
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
|
||||
@@ -46,9 +45,9 @@ def backup_and_download(current_version, remote_version):
|
||||
return new_version_dir
|
||||
os.makedirs(new_version_dir)
|
||||
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
|
||||
proxies, = get_conf('proxies')
|
||||
r = requests.get(
|
||||
'https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
||||
proxies = get_conf('proxies')
|
||||
try: r = requests.get('https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
||||
except: r = requests.get('https://public.gpt-academic.top/publish/master.zip', proxies=proxies, stream=True)
|
||||
zip_file_path = backup_dir+'/master.zip'
|
||||
with open(zip_file_path, 'wb+') as f:
|
||||
f.write(r.content)
|
||||
@@ -111,11 +110,10 @@ def auto_update(raise_error=False):
|
||||
try:
|
||||
from toolbox import get_conf
|
||||
import requests
|
||||
import time
|
||||
import json
|
||||
proxies, = get_conf('proxies')
|
||||
response = requests.get(
|
||||
"https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
|
||||
proxies = get_conf('proxies')
|
||||
try: response = requests.get("https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
|
||||
except: response = requests.get("https://public.gpt-academic.top/publish/version", proxies=proxies, timeout=5)
|
||||
remote_json_data = json.loads(response.text)
|
||||
remote_version = remote_json_data['version']
|
||||
if remote_json_data["show_feature"]:
|
||||
@@ -127,8 +125,7 @@ def auto_update(raise_error=False):
|
||||
current_version = json.loads(current_version)['version']
|
||||
if (remote_version - current_version) >= 0.01-1e-5:
|
||||
from colorful import print亮黄
|
||||
print亮黄(
|
||||
f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}')
|
||||
print亮黄(f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}')
|
||||
print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
|
||||
user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?')
|
||||
if user_instruction in ['Y', 'y']:
|
||||
@@ -154,16 +151,26 @@ def auto_update(raise_error=False):
|
||||
print(msg)
|
||||
|
||||
def warm_up_modules():
|
||||
print('正在执行一些模块的预热...')
|
||||
from request_llm.bridge_all import model_info
|
||||
print('正在执行一些模块的预热 ...')
|
||||
from toolbox import ProxyNetworkActivate
|
||||
from request_llms.bridge_all import model_info
|
||||
with ProxyNetworkActivate("Warmup_Modules"):
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
enc.encode("模块预热", disallowed_special=())
|
||||
enc = model_info["gpt-4"]['tokenizer']
|
||||
enc.encode("模块预热", disallowed_special=())
|
||||
|
||||
def warm_up_vectordb():
|
||||
print('正在执行一些模块的预热 ...')
|
||||
from toolbox import ProxyNetworkActivate
|
||||
with ProxyNetworkActivate("Warmup_Modules"):
|
||||
import nltk
|
||||
with ProxyNetworkActivate("Warmup_Modules"): nltk.download("punkt")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os
|
||||
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||
from toolbox import get_conf
|
||||
proxies, = get_conf('proxies')
|
||||
proxies = get_conf('proxies')
|
||||
check_proxy(proxies)
|
||||
|
||||
240
config.py
240
config.py
@@ -11,17 +11,17 @@
|
||||
API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
||||
|
||||
|
||||
# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改
|
||||
# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改
|
||||
USE_PROXY = False
|
||||
if USE_PROXY:
|
||||
"""
|
||||
代理网络的地址,打开你的代理软件查看代理协议(socks5h / http)、地址(localhost)和端口(11284)
|
||||
填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
|
||||
<配置教程&视频教程> https://github.com/binary-husky/gpt_academic/issues/1>
|
||||
[协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
|
||||
[地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
|
||||
[地址] 填localhost或者127.0.0.1(localhost意思是代理软件安装在本机上)
|
||||
[端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
||||
"""
|
||||
# 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5h / http)、地址(localhost)和端口(11284)
|
||||
proxies = {
|
||||
# [协议]:// [地址] :[端口]
|
||||
"http": "socks5h://localhost:11284", # 再例如 "http": "http://127.0.0.1:7890",
|
||||
@@ -43,7 +43,17 @@ API_URL_REDIRECT = {}
|
||||
DEFAULT_WORKER_NUM = 3
|
||||
|
||||
|
||||
# 对话窗的高度
|
||||
# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"]
|
||||
# 更多主题, 请查阅Gradio主题商店: https://huggingface.co/spaces/gradio/theme-gallery 可选 ["Gstaff/Xkcd", "NoCrypt/Miku", ...]
|
||||
THEME = "Default"
|
||||
AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
|
||||
|
||||
|
||||
# 默认的系统提示词(system prompt)
|
||||
INIT_SYS_PROMPT = "Serve me as a writing and programming assistant."
|
||||
|
||||
|
||||
# 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效)
|
||||
CHATBOT_HEIGHT = 1115
|
||||
|
||||
|
||||
@@ -53,7 +63,10 @@ CODE_HIGHLIGHT = True
|
||||
|
||||
# 窗口布局
|
||||
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
||||
DARK_MODE = True # 暗色模式 / 亮色模式
|
||||
|
||||
|
||||
# 暗色模式 / 亮色模式
|
||||
DARK_MODE = True
|
||||
|
||||
|
||||
# 发送请求到OpenAI后,等待多久判定为超时
|
||||
@@ -68,21 +81,52 @@ WEB_PORT = -1
|
||||
MAX_RETRY = 2
|
||||
|
||||
|
||||
# 插件分类默认选项
|
||||
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
||||
|
||||
|
||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
||||
# P.S. 其他可用的模型还包括 ["qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "spark", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
||||
"gemini-pro", "chatglm3", "claude-2", "zhipuai"]
|
||||
# P.S. 其他可用的模型还包括 [
|
||||
# "moss", "qwen-turbo", "qwen-plus", "qwen-max"
|
||||
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
|
||||
# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
||||
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
|
||||
# ]
|
||||
|
||||
|
||||
# ChatGLM(2) Finetune Model Path (如果使用ChatGLM2微调模型,需要把"chatglmft"加入AVAIL_LLM_MODELS中)
|
||||
ChatGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100"
|
||||
# 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
|
||||
MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
||||
|
||||
|
||||
# 选择本地模型变体(只有当AVAIL_LLM_MODELS包含了对应本地模型时,才会起作用)
|
||||
# 如果你选择Qwen系列的模型,那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型
|
||||
# 也可以是具体的模型路径
|
||||
QWEN_LOCAL_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
|
||||
|
||||
|
||||
# 接入通义千问在线大模型 https://dashscope.console.aliyun.com/
|
||||
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY
|
||||
|
||||
|
||||
# 百度千帆(LLM_MODEL="qianfan")
|
||||
BAIDU_CLOUD_API_KEY = ''
|
||||
BAIDU_CLOUD_SECRET_KEY = ''
|
||||
BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat"
|
||||
|
||||
|
||||
# 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径
|
||||
CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100"
|
||||
|
||||
|
||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
||||
|
||||
|
||||
# 设置gradio的并行线程数(不需要修改)
|
||||
CONCURRENT_COUNT = 100
|
||||
|
||||
@@ -91,10 +135,6 @@ CONCURRENT_COUNT = 100
|
||||
AUTO_CLEAR_TXT = False
|
||||
|
||||
|
||||
# 色彩主体,可选 ["Default", "Chuanhu-Small-and-Beautiful"]
|
||||
THEME = "Default"
|
||||
|
||||
|
||||
# 加一个live2d装饰
|
||||
ADD_WAIFU = False
|
||||
|
||||
@@ -108,22 +148,31 @@ AUTHENTICATION = []
|
||||
CUSTOM_PATH = "/"
|
||||
|
||||
|
||||
# HTTPS 秘钥和证书(不需要修改)
|
||||
SSL_KEYFILE = ""
|
||||
SSL_CERTFILE = ""
|
||||
|
||||
|
||||
# 极少数情况下,openai的官方KEY需要伴随组织编码(格式如org-xxxxxxxxxxxxxxxxxxxxxxxx)使用
|
||||
API_ORG = ""
|
||||
|
||||
|
||||
# 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md
|
||||
# 如果需要使用Slack Claude,使用教程详情见 request_llms/README.md
|
||||
SLACK_CLAUDE_BOT_ID = ''
|
||||
SLACK_CLAUDE_USER_TOKEN = ''
|
||||
|
||||
|
||||
# 如果需要使用AZURE 详情请见额外文档 docs\use_azure.md
|
||||
# 如果需要使用AZURE(方法一:单个azure模型部署)详情请见额外文档 docs\use_azure.md
|
||||
AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/"
|
||||
AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处填写,该选项即将被弃用
|
||||
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
|
||||
|
||||
|
||||
# 使用Newbing
|
||||
# 如果需要使用AZURE(方法二:多个azure模型部署+动态切换)详情请见额外文档 docs\use_azure.md
|
||||
AZURE_CFG_ARRAY = {}
|
||||
|
||||
|
||||
# 使用Newbing (不推荐使用,未来将删除)
|
||||
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
||||
NEWBING_COOKIES = """
|
||||
put your new bing cookies here
|
||||
@@ -144,9 +193,164 @@ XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||
XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
|
||||
|
||||
# 接入智谱大模型
|
||||
ZHIPUAI_API_KEY = ""
|
||||
ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4"
|
||||
|
||||
|
||||
# # 火山引擎YUNQUE大模型
|
||||
# YUNQUE_SECRET_KEY = ""
|
||||
# YUNQUE_ACCESS_KEY = ""
|
||||
# YUNQUE_MODEL = ""
|
||||
|
||||
|
||||
# Claude API KEY
|
||||
ANTHROPIC_API_KEY = ""
|
||||
|
||||
|
||||
# 自定义API KEY格式
|
||||
CUSTOM_API_KEY_PATTERN = ""
|
||||
|
||||
|
||||
# Google Gemini API-Key
|
||||
GEMINI_API_KEY = ''
|
||||
|
||||
|
||||
# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
|
||||
HUGGINGFACE_ACCESS_TOKEN = "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"
|
||||
|
||||
|
||||
# GROBID服务器地址(填写多个可以均衡负载),用于高质量地读取PDF文档
|
||||
# 获取方法:复制以下空间https://huggingface.co/spaces/qingxu98/grobid,设为public,然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space"
|
||||
GROBID_URLS = [
|
||||
"https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space",
|
||||
"https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space",
|
||||
"https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space",
|
||||
]
|
||||
|
||||
|
||||
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
|
||||
ALLOW_RESET_CONFIG = False
|
||||
|
||||
|
||||
# 在使用AutoGen插件时,是否使用Docker容器运行代码
|
||||
AUTOGEN_USE_DOCKER = False
|
||||
|
||||
|
||||
# 临时的上传文件夹位置,请勿修改
|
||||
PATH_PRIVATE_UPLOAD = "private_upload"
|
||||
|
||||
|
||||
# 日志文件夹的位置,请勿修改
|
||||
PATH_LOGGING = "gpt_log"
|
||||
|
||||
|
||||
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改
|
||||
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
||||
"Warmup_Modules", "Nougat_Download", "AutoGen"]
|
||||
|
||||
|
||||
# *实验性功能*: 自动检测并屏蔽失效的KEY,请勿使用
|
||||
BLOCK_INVALID_APIKEY = False
|
||||
|
||||
|
||||
# 启用插件热加载
|
||||
PLUGIN_HOT_RELOAD = False
|
||||
|
||||
|
||||
# 自定义按钮的最大数量限制
|
||||
NUM_CUSTOM_BASIC_BTN = 4
|
||||
|
||||
"""
|
||||
在线大模型配置关联关系示意图
|
||||
│
|
||||
├── "gpt-3.5-turbo" 等openai模型
|
||||
│ ├── API_KEY
|
||||
│ ├── CUSTOM_API_KEY_PATTERN(不常用)
|
||||
│ ├── API_ORG(不常用)
|
||||
│ └── API_URL_REDIRECT(不常用)
|
||||
│
|
||||
├── "azure-gpt-3.5" 等azure模型(单个azure模型,不需要动态切换)
|
||||
│ ├── API_KEY
|
||||
│ ├── AZURE_ENDPOINT
|
||||
│ ├── AZURE_API_KEY
|
||||
│ ├── AZURE_ENGINE
|
||||
│ └── API_URL_REDIRECT
|
||||
│
|
||||
├── "azure-gpt-3.5" 等azure模型(多个azure模型,需要动态切换,高优先级)
|
||||
│ └── AZURE_CFG_ARRAY
|
||||
│
|
||||
├── "spark" 星火认知大模型 spark & sparkv2
|
||||
│ ├── XFYUN_APPID
|
||||
│ ├── XFYUN_API_SECRET
|
||||
│ └── XFYUN_API_KEY
|
||||
│
|
||||
├── "claude-1-100k" 等claude模型
|
||||
│ └── ANTHROPIC_API_KEY
|
||||
│
|
||||
├── "stack-claude"
|
||||
│ ├── SLACK_CLAUDE_BOT_ID
|
||||
│ └── SLACK_CLAUDE_USER_TOKEN
|
||||
│
|
||||
├── "qianfan" 百度千帆大模型库
|
||||
│ ├── BAIDU_CLOUD_QIANFAN_MODEL
|
||||
│ ├── BAIDU_CLOUD_API_KEY
|
||||
│ └── BAIDU_CLOUD_SECRET_KEY
|
||||
│
|
||||
├── "zhipuai" 智谱AI大模型chatglm_turbo
|
||||
│ ├── ZHIPUAI_API_KEY
|
||||
│ └── ZHIPUAI_MODEL
|
||||
│
|
||||
├── "qwen-turbo" 等通义千问大模型
|
||||
│ └── DASHSCOPE_API_KEY
|
||||
│
|
||||
├── "Gemini"
|
||||
│ └── GEMINI_API_KEY
|
||||
│
|
||||
└── "newbing" Newbing接口不再稳定,不推荐使用
|
||||
├── NEWBING_STYLE
|
||||
└── NEWBING_COOKIES
|
||||
|
||||
|
||||
本地大模型示意图
|
||||
│
|
||||
├── "chatglm3"
|
||||
├── "chatglm"
|
||||
├── "chatglm_onnx"
|
||||
├── "chatglmft"
|
||||
├── "internlm"
|
||||
├── "moss"
|
||||
├── "jittorllms_pangualpha"
|
||||
├── "jittorllms_llama"
|
||||
├── "deepseekcoder"
|
||||
├── "qwen-local"
|
||||
├── RWKV的支持见Wiki
|
||||
└── "llama2"
|
||||
|
||||
|
||||
用户图形界面布局依赖关系示意图
|
||||
│
|
||||
├── CHATBOT_HEIGHT 对话窗的高度
|
||||
├── CODE_HIGHLIGHT 代码高亮
|
||||
├── LAYOUT 窗口布局
|
||||
├── DARK_MODE 暗色模式 / 亮色模式
|
||||
├── DEFAULT_FN_GROUPS 插件分类默认选项
|
||||
├── THEME 色彩主题
|
||||
├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框
|
||||
├── ADD_WAIFU 加一个live2d装饰
|
||||
└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性
|
||||
|
||||
|
||||
插件在线服务配置依赖关系示意图
|
||||
│
|
||||
├── 语音功能
|
||||
│ ├── ENABLE_AUDIO
|
||||
│ ├── ALIYUN_TOKEN
|
||||
│ ├── ALIYUN_APPKEY
|
||||
│ ├── ALIYUN_ACCESSKEY
|
||||
│ └── ALIYUN_SECRET
|
||||
│
|
||||
└── PDF文档精准解析
|
||||
└── GROBID_URLS
|
||||
|
||||
"""
|
||||
|
||||
@@ -3,81 +3,143 @@
|
||||
# 'stop' 颜色对应 theme.py 中的 color_er
|
||||
import importlib
|
||||
from toolbox import clear_line_break
|
||||
|
||||
from toolbox import apply_gpt_academic_string_mask_langbased
|
||||
from toolbox import build_gpt_academic_masked_string_langbased
|
||||
from textwrap import dedent
|
||||
|
||||
def get_core_functions():
|
||||
return {
|
||||
"英语学术润色": {
|
||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
|
||||
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
|
||||
r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n",
|
||||
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||
|
||||
"学术语料润色": {
|
||||
# [1*] 前缀字符串,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等。
|
||||
# 这里填一个提示词字符串就行了,这里为了区分中英文情景搞复杂了一点
|
||||
"Prefix": build_gpt_academic_masked_string_langbased(
|
||||
text_show_english=
|
||||
r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, "
|
||||
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. "
|
||||
r"Firstly, you should provide the polished paragraph. "
|
||||
r"Secondly, you should list all your modification and explain the reasons to do so in markdown table.",
|
||||
text_show_chinese=
|
||||
r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,"
|
||||
r"同时分解长句,减少重复,并提供改进建议。请先提供文本的更正版本,然后在markdown表格中列出修改的内容,并给出修改的理由:"
|
||||
) + "\n\n",
|
||||
# [2*] 后缀字符串,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||
"Suffix": r"",
|
||||
# 按钮颜色 (默认 secondary)
|
||||
# [3] 按钮颜色 (可选参数,默认 secondary)
|
||||
"Color": r"secondary",
|
||||
# 按钮是否可见 (默认 True,即可见)
|
||||
# [4] 按钮是否可见 (可选参数,默认 True,即可见)
|
||||
"Visible": True,
|
||||
# 是否在触发时清除历史 (默认 False,即不处理之前的对话历史)
|
||||
"AutoClearHistory": False
|
||||
# [5] 是否在触发时清除历史 (可选参数,默认 False,即不处理之前的对话历史)
|
||||
"AutoClearHistory": False,
|
||||
# [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符)
|
||||
"PreProcess": None,
|
||||
},
|
||||
"中文学术润色": {
|
||||
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
|
||||
r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
|
||||
"Suffix": r"",
|
||||
|
||||
|
||||
"总结绘制脑图": {
|
||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||
"Prefix": r"",
|
||||
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||
"Suffix":
|
||||
# dedent() 函数用于去除多行字符串的缩进
|
||||
dedent("\n"+r'''
|
||||
==============================
|
||||
|
||||
使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如:
|
||||
|
||||
以下是对以上文本的总结,以mermaid flowchart的形式展示:
|
||||
```mermaid
|
||||
flowchart LR
|
||||
A["节点名1"] --> B("节点名2")
|
||||
B --> C{"节点名3"}
|
||||
C --> D["节点名4"]
|
||||
C --> |"箭头名1"| E["节点名5"]
|
||||
C --> |"箭头名2"| F["节点名6"]
|
||||
```
|
||||
|
||||
警告:
|
||||
(1)使用中文
|
||||
(2)节点名字使用引号包裹,如["Laptop"]
|
||||
(3)`|` 和 `"`之间不要存在空格
|
||||
(4)根据情况选择flowchart LR(从左到右)或者flowchart TD(从上到下)
|
||||
'''),
|
||||
},
|
||||
|
||||
|
||||
"查找语法错误": {
|
||||
"Prefix": r"Can you help me ensure that the grammar and the spelling is correct? " +
|
||||
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good." +
|
||||
r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " +
|
||||
r"put the original text the first column, " +
|
||||
r"put the corrected text in the second column and highlight the key words you fixed.""\n"
|
||||
"Prefix": r"Help me ensure that the grammar and the spelling is correct. "
|
||||
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. "
|
||||
r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, "
|
||||
r"put the original text the first column, "
|
||||
r"put the corrected text in the second column and highlight the key words you fixed. "
|
||||
r"Finally, please provide the proofreaded text.""\n\n"
|
||||
r"Example:""\n"
|
||||
r"Paragraph: How is you? Do you knows what is it?""\n"
|
||||
r"| Original sentence | Corrected sentence |""\n"
|
||||
r"| :--- | :--- |""\n"
|
||||
r"| How **is** you? | How **are** you? |""\n"
|
||||
r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n"
|
||||
r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n\n"
|
||||
r"Below is a paragraph from an academic paper. "
|
||||
r"You need to report all grammar and spelling mistakes as the example before."
|
||||
+ "\n\n",
|
||||
"Suffix": r"",
|
||||
"PreProcess": clear_line_break, # 预处理:清除换行符
|
||||
},
|
||||
|
||||
|
||||
"中译英": {
|
||||
"Prefix": r"Please translate following sentence to English:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
},
|
||||
"学术中英互译": {
|
||||
"Prefix": r"I want you to act as a scientific English-Chinese translator, " +
|
||||
r"I will provide you with some paragraphs in one language " +
|
||||
r"and your task is to accurately and academically translate the paragraphs only into the other language. " +
|
||||
r"Do not repeat the original provided paragraphs after translation. " +
|
||||
r"You should use artificial intelligence tools, " +
|
||||
r"such as natural language processing, and rhetorical knowledge " +
|
||||
r"and experience about effective writing techniques to reply. " +
|
||||
r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:" + "\n\n",
|
||||
"Suffix": "",
|
||||
"Color": "secondary",
|
||||
|
||||
|
||||
"学术英中互译": {
|
||||
"Prefix": build_gpt_academic_masked_string_langbased(
|
||||
text_show_chinese=
|
||||
r"I want you to act as a scientific English-Chinese translator, "
|
||||
r"I will provide you with some paragraphs in one language "
|
||||
r"and your task is to accurately and academically translate the paragraphs only into the other language. "
|
||||
r"Do not repeat the original provided paragraphs after translation. "
|
||||
r"You should use artificial intelligence tools, "
|
||||
r"such as natural language processing, and rhetorical knowledge "
|
||||
r"and experience about effective writing techniques to reply. "
|
||||
r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:",
|
||||
text_show_english=
|
||||
r"你是经验丰富的翻译,请把以下学术文章段落翻译成中文,"
|
||||
r"并同时充分考虑中文的语法、清晰、简洁和整体可读性,"
|
||||
r"必要时,你可以修改整个句子的顺序以确保翻译后的段落符合中文的语言习惯。"
|
||||
r"你需要翻译的文本如下:"
|
||||
) + "\n\n",
|
||||
"Suffix": r"",
|
||||
},
|
||||
|
||||
|
||||
"英译中": {
|
||||
"Prefix": r"翻译成地道的中文:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
"Visible": False,
|
||||
},
|
||||
|
||||
|
||||
"找图片": {
|
||||
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
|
||||
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL,"
|
||||
r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
"Visible": False,
|
||||
},
|
||||
|
||||
|
||||
"解释代码": {
|
||||
"Prefix": r"请解释以下代码:" + "\n```\n",
|
||||
"Suffix": "\n```\n",
|
||||
},
|
||||
|
||||
|
||||
"参考文献转Bib": {
|
||||
"Prefix": r"Here are some bibliography items, please transform them into bibtex style." +
|
||||
r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
|
||||
r"Items need to be transformed:",
|
||||
"Prefix": r"Here are some bibliography items, please transform them into bibtex style."
|
||||
r"Note that, reference styles maybe more than one kind, you should transform each item correctly."
|
||||
r"Items need to be transformed:" + "\n\n",
|
||||
"Visible": False,
|
||||
"Suffix": r"",
|
||||
}
|
||||
}
|
||||
@@ -87,8 +149,25 @@ def handle_core_functionality(additional_fn, inputs, history, chatbot):
|
||||
import core_functional
|
||||
importlib.reload(core_functional) # 热更新prompt
|
||||
core_functional = core_functional.get_core_functions()
|
||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||
addition = chatbot._cookies['customize_fn_overwrite']
|
||||
if additional_fn in addition:
|
||||
# 自定义功能
|
||||
inputs = addition[additional_fn]["Prefix"] + inputs + addition[additional_fn]["Suffix"]
|
||||
return inputs, history
|
||||
else:
|
||||
# 预制功能
|
||||
if "PreProcess" in core_functional[additional_fn]:
|
||||
if core_functional[additional_fn]["PreProcess"] is not None:
|
||||
inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
# 为字符串加上上面定义的前缀和后缀。
|
||||
inputs = apply_gpt_academic_string_mask_langbased(
|
||||
string = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"],
|
||||
lang_reference = inputs,
|
||||
)
|
||||
if core_functional[additional_fn].get("AutoClearHistory", False):
|
||||
history = []
|
||||
return inputs, history
|
||||
|
||||
if __name__ == "__main__":
|
||||
t = get_core_functions()["总结绘制脑图"]
|
||||
print(t["Prefix"] + t["Suffix"])
|
||||
@@ -1,12 +1,13 @@
|
||||
from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效
|
||||
from toolbox import trimmed_format_exc
|
||||
|
||||
|
||||
def get_crazy_functions():
|
||||
###################### 第一组插件 ###########################
|
||||
from crazy_functions.读文章写摘要 import 读文章写摘要
|
||||
from crazy_functions.生成函数注释 import 批量生成函数注释
|
||||
from crazy_functions.解析项目源代码 import 解析项目本身
|
||||
from crazy_functions.解析项目源代码 import 解析一个Python项目
|
||||
from crazy_functions.解析项目源代码 import 解析一个Matlab项目
|
||||
from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
|
||||
from crazy_functions.解析项目源代码 import 解析一个C项目
|
||||
from crazy_functions.解析项目源代码 import 解析一个Golang项目
|
||||
@@ -14,7 +15,6 @@ def get_crazy_functions():
|
||||
from crazy_functions.解析项目源代码 import 解析一个Java项目
|
||||
from crazy_functions.解析项目源代码 import 解析一个前端项目
|
||||
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
||||
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
||||
from crazy_functions.Latex全文润色 import Latex英文润色
|
||||
from crazy_functions.询问多个大语言模型 import 同时问询
|
||||
from crazy_functions.解析项目源代码 import 解析一个Lua项目
|
||||
@@ -25,351 +25,657 @@ def get_crazy_functions():
|
||||
from crazy_functions.对话历史存档 import 载入对话历史存档
|
||||
from crazy_functions.对话历史存档 import 删除所有本地对话历史记录
|
||||
from crazy_functions.辅助功能 import 清除缓存
|
||||
|
||||
from crazy_functions.批量Markdown翻译 import Markdown英译中
|
||||
function_plugins = {
|
||||
"解析整个Python项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(解析一个Python项目)
|
||||
},
|
||||
"载入对话历史存档(先上传存档或输入路径)": {
|
||||
"Color": "stop",
|
||||
"AsButton":False,
|
||||
"Function": HotReload(载入对话历史存档)
|
||||
},
|
||||
"删除所有本地对话历史记录(请谨慎操作)": {
|
||||
"AsButton":False,
|
||||
"Function": HotReload(删除所有本地对话历史记录)
|
||||
},
|
||||
"清除所有缓存文件(请谨慎操作)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(清除缓存)
|
||||
},
|
||||
"解析Jupyter Notebook文件": {
|
||||
"Color": "stop",
|
||||
"AsButton":False,
|
||||
"Function": HotReload(解析ipynb文件),
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
|
||||
},
|
||||
"批量总结Word文档": {
|
||||
"Color": "stop",
|
||||
"Function": HotReload(总结word文档)
|
||||
},
|
||||
"解析整个C++项目头文件": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个C项目的头文件)
|
||||
},
|
||||
"解析整个C++项目(.cpp/.hpp/.c/.h)": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个C项目)
|
||||
},
|
||||
"解析整个Go项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Golang项目)
|
||||
},
|
||||
"解析整个Rust项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Rust项目)
|
||||
},
|
||||
"解析整个Java项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Java项目)
|
||||
},
|
||||
"解析整个前端项目(js,ts,css等)": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个前端项目)
|
||||
},
|
||||
"解析整个Lua项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Lua项目)
|
||||
},
|
||||
"解析整个CSharp项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个CSharp项目)
|
||||
},
|
||||
"读Tex论文写摘要": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(读文章写摘要)
|
||||
},
|
||||
"Markdown/Readme英译中": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"Function": HotReload(Markdown英译中)
|
||||
},
|
||||
"批量生成函数注释": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(批量生成函数注释)
|
||||
},
|
||||
"保存当前的对话": {
|
||||
"Function": HotReload(对话历史存档)
|
||||
},
|
||||
"[多线程Demo] 解析此项目本身(源码自译解)": {
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析项目本身)
|
||||
},
|
||||
# "[老旧的Demo] 把本项目源代码切换成全英文": {
|
||||
# # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
# "Function": HotReload(全项目切换英文)
|
||||
# },
|
||||
"[插件demo] 历史上的今天": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Function": HotReload(高阶功能模板函数)
|
||||
},
|
||||
|
||||
}
|
||||
###################### 第二组插件 ###########################
|
||||
# [第二组插件]: 经过充分测试
|
||||
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
|
||||
# from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer
|
||||
from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
|
||||
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
||||
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
||||
from crazy_functions.Latex全文润色 import Latex中文润色
|
||||
from crazy_functions.Latex全文润色 import Latex英文纠错
|
||||
from crazy_functions.Latex全文翻译 import Latex中译英
|
||||
from crazy_functions.Latex全文翻译 import Latex英译中
|
||||
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||
from crazy_functions.虚空终端 import 虚空终端
|
||||
from crazy_functions.生成多种Mermaid图表 import 生成多种Mermaid图表
|
||||
|
||||
function_plugins.update({
|
||||
"批量翻译PDF文档(多线程)": {
|
||||
function_plugins = {
|
||||
"虚空终端": {
|
||||
"Group": "对话|编程|学术|智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": True, # 加入下拉菜单中
|
||||
"Function": HotReload(批量翻译PDF文档)
|
||||
"AsButton": True,
|
||||
"Function": HotReload(虚空终端),
|
||||
},
|
||||
"解析整个Python项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Python项目),
|
||||
},
|
||||
"载入对话历史存档(先上传存档或输入路径)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "载入对话历史存档 | 输入参数为路径",
|
||||
"Function": HotReload(载入对话历史存档),
|
||||
},
|
||||
"删除所有本地对话历史记录(谨慎操作)": {
|
||||
"Group": "对话",
|
||||
"AsButton": False,
|
||||
"Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数",
|
||||
"Function": HotReload(删除所有本地对话历史记录),
|
||||
},
|
||||
"清除所有缓存文件(谨慎操作)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
|
||||
"Function": HotReload(清除缓存),
|
||||
},
|
||||
"生成多种Mermaid图表(从当前对话或文件(.pdf/.md)中生产图表)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info" : "基于当前对话或PDF生成多种Mermaid图表,图表类型由模型判断",
|
||||
"Function": HotReload(生成多种Mermaid图表),
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图",
|
||||
},
|
||||
"批量总结Word文档": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "批量总结word文档 | 输入参数为路径",
|
||||
"Function": HotReload(总结word文档),
|
||||
},
|
||||
"解析整个Matlab项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "解析一个Matlab项目的所有源文件(.m) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Matlab项目),
|
||||
},
|
||||
"解析整个C++项目头文件": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个C项目的头文件),
|
||||
},
|
||||
"解析整个C++项目(.cpp/.hpp/.c/.h)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径",
|
||||
"Function": HotReload(解析一个C项目),
|
||||
},
|
||||
"解析整个Go项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Go项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Golang项目),
|
||||
},
|
||||
"解析整个Rust项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Rust项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Rust项目),
|
||||
},
|
||||
"解析整个Java项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Java项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Java项目),
|
||||
},
|
||||
"解析整个前端项目(js,ts,css等)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径",
|
||||
"Function": HotReload(解析一个前端项目),
|
||||
},
|
||||
"解析整个Lua项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个Lua项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个Lua项目),
|
||||
},
|
||||
"解析整个CSharp项目": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析一个CSharp项目),
|
||||
},
|
||||
"解析Jupyter Notebook文件": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "解析Jupyter Notebook文件 | 输入参数为路径",
|
||||
"Function": HotReload(解析ipynb文件),
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
|
||||
},
|
||||
"读Tex论文写摘要": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "读取Tex论文并写摘要 | 输入参数为路径",
|
||||
"Function": HotReload(读文章写摘要),
|
||||
},
|
||||
"翻译README或MD": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "将Markdown翻译为中文 | 输入参数为路径或URL",
|
||||
"Function": HotReload(Markdown英译中),
|
||||
},
|
||||
"翻译Markdown或README(支持Github链接)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Info": "将Markdown或README翻译为中文 | 输入参数为路径或URL",
|
||||
"Function": HotReload(Markdown英译中),
|
||||
},
|
||||
"批量生成函数注释": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "批量生成函数的注释 | 输入参数为路径",
|
||||
"Function": HotReload(批量生成函数注释),
|
||||
},
|
||||
"保存当前的对话": {
|
||||
"Group": "对话",
|
||||
"AsButton": True,
|
||||
"Info": "保存当前的对话 | 不需要输入参数",
|
||||
"Function": HotReload(对话历史存档),
|
||||
},
|
||||
"[多线程Demo]解析此项目本身(源码自译解)": {
|
||||
"Group": "对话|编程",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
|
||||
"Function": HotReload(解析项目本身),
|
||||
},
|
||||
"历史上的今天": {
|
||||
"Group": "对话",
|
||||
"AsButton": True,
|
||||
"Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
|
||||
"Function": HotReload(高阶功能模板函数),
|
||||
},
|
||||
"精准翻译PDF论文": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "精准翻译PDF论文为中文 | 输入参数为路径",
|
||||
"Function": HotReload(批量翻译PDF文档),
|
||||
},
|
||||
"询问多个GPT模型": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(同时问询)
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Function": HotReload(同时问询),
|
||||
},
|
||||
"[测试功能] 批量总结PDF文档": {
|
||||
"批量总结PDF文档": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Function": HotReload(批量总结PDF文档)
|
||||
"Info": "批量总结PDF文档的内容 | 输入参数为路径",
|
||||
"Function": HotReload(批量总结PDF文档),
|
||||
},
|
||||
# "[测试功能] 批量总结PDF文档pdfminer": {
|
||||
# "Color": "stop",
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
# "Function": HotReload(批量总结PDF文档pdfminer)
|
||||
# },
|
||||
"谷歌学术检索助手(输入谷歌学术搜索页url)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(谷歌检索小助手)
|
||||
"Info": "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL",
|
||||
"Function": HotReload(谷歌检索小助手),
|
||||
},
|
||||
"理解PDF文档内容 (模仿ChatPDF)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(理解PDF文档内容标准文件输入)
|
||||
"Info": "理解PDF文档的内容并进行回答 | 输入参数为路径",
|
||||
"Function": HotReload(理解PDF文档内容标准文件输入),
|
||||
},
|
||||
"英文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex英文润色)
|
||||
},
|
||||
"英文Latex项目全文纠错(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex英文纠错)
|
||||
"Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Latex英文润色),
|
||||
},
|
||||
|
||||
"中文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex中文润色)
|
||||
},
|
||||
"Latex项目全文中译英(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex中译英)
|
||||
},
|
||||
"Latex项目全文英译中(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex英译中)
|
||||
"Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Latex中文润色),
|
||||
},
|
||||
# 已经被新插件取代
|
||||
# "英文Latex项目全文纠错(输入路径或上传压缩包)": {
|
||||
# "Group": "学术",
|
||||
# "Color": "stop",
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
# "Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
|
||||
# "Function": HotReload(Latex英文纠错),
|
||||
# },
|
||||
# 已经被新插件取代
|
||||
# "Latex项目全文中译英(输入路径或上传压缩包)": {
|
||||
# "Group": "学术",
|
||||
# "Color": "stop",
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
# "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
|
||||
# "Function": HotReload(Latex中译英)
|
||||
# },
|
||||
# 已经被新插件取代
|
||||
# "Latex项目全文英译中(输入路径或上传压缩包)": {
|
||||
# "Group": "学术",
|
||||
# "Color": "stop",
|
||||
# "AsButton": False, # 加入下拉菜单中
|
||||
# "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包",
|
||||
# "Function": HotReload(Latex英译中)
|
||||
# },
|
||||
"批量Markdown中译英(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Markdown中译英)
|
||||
"Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包",
|
||||
"Function": HotReload(Markdown中译英),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
})
|
||||
|
||||
###################### 第三组插件 ###########################
|
||||
# [第三组插件]: 尚未充分测试的函数插件
|
||||
|
||||
# -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-
|
||||
try:
|
||||
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(下载arxiv论文并翻译摘要)
|
||||
# "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695",
|
||||
"Function": HotReload(下载arxiv论文并翻译摘要),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(连接网络回答问题)
|
||||
# "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
|
||||
"Function": HotReload(连接网络回答问题),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(连接bing搜索回答问题)
|
||||
"Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题",
|
||||
"Function": HotReload(连接bing搜索回答问题),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"解析项目源代码(手动指定和筛选源代码文件类型)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(解析任意code项目)
|
||||
"ArgsReminder": '输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: "*.c, ^*.cpp, config.toml, ^*.toml"', # 高级参数输入区的显示提示
|
||||
"Function": HotReload(解析任意code项目),
|
||||
},
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"询问多个GPT模型(手动指定询问哪些模型)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(同时问询_指定模型)
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&gpt-4", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(同时问询_指定模型),
|
||||
},
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.图片生成 import 图片生成
|
||||
function_plugins.update({
|
||||
"图片生成(先切换模型到openai或api2d)": {
|
||||
from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"图片生成_DALLE2 (先切换模型到gpt-*)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(图片生成)
|
||||
"ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
|
||||
"Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
|
||||
"Function": HotReload(图片生成_DALLE2),
|
||||
},
|
||||
})
|
||||
}
|
||||
)
|
||||
function_plugins.update(
|
||||
{
|
||||
"图片生成_DALLE3 (先切换模型到gpt-*)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示
|
||||
"Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
|
||||
"Function": HotReload(图片生成_DALLE3),
|
||||
},
|
||||
}
|
||||
)
|
||||
function_plugins.update(
|
||||
{
|
||||
"图片修改_DALLE2 (先切换模型到gpt-*)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": False, # 调用时,唤起高级参数输入区(默认False)
|
||||
# "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
|
||||
"Function": HotReload(图片修改_DALLE2),
|
||||
},
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.总结音视频 import 总结音视频
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"批量总结音视频(输入路径或上传压缩包)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
|
||||
"Function": HotReload(总结音视频)
|
||||
"Info": "批量总结音频或视频 | 输入参数为路径",
|
||||
"Function": HotReload(总结音视频),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.数学动画生成manim import 动画生成
|
||||
function_plugins.update({
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"数学动画生成(Manim)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(动画生成)
|
||||
"Info": "按照自然语言描述生成一个动画 | 输入参数是一段话",
|
||||
"Function": HotReload(动画生成),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
|
||||
function_plugins.update({
|
||||
"Markdown翻译(手动指定语言)": {
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"Markdown翻译(指定翻译成何种语言)": {
|
||||
"Group": "编程",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。",
|
||||
"Function": HotReload(Markdown翻译指定语言)
|
||||
"Function": HotReload(Markdown翻译指定语言),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.Langchain知识库 import 知识库问答
|
||||
function_plugins.update({
|
||||
"构建知识库(请先上传文件素材)": {
|
||||
from crazy_functions.知识库问答 import 知识库文件注入
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"构建知识库(先上传文件素材,再运行此插件)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "待注入的知识库名称id, 默认为default",
|
||||
"Function": HotReload(知识库问答)
|
||||
"ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式,向知识库追加更多文档。",
|
||||
"Function": HotReload(知识库文件注入),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.Langchain知识库 import 读取知识库作答
|
||||
function_plugins.update({
|
||||
"知识库问答": {
|
||||
from crazy_functions.知识库问答 import 读取知识库作答
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"知识库文件注入(构建知识库后,再运行此插件)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要首先调用构建知识库",
|
||||
"Function": HotReload(读取知识库作答)
|
||||
"ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要构建知识库后再运行此插件。",
|
||||
"Function": HotReload(读取知识库作答),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.交互功能函数模板 import 交互功能模板函数
|
||||
function_plugins.update({
|
||||
"交互功能模板函数": {
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"交互功能模板Demo函数(查找wallhaven.cc的壁纸)": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(交互功能模板函数)
|
||||
"Function": HotReload(交互功能模板函数),
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
|
||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"Latex英文纠错+高亮修正位置 [需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
|
||||
"Function": HotReload(Latex英文纠错加PDF对比),
|
||||
},
|
||||
"Arxiv论文精细翻译(输入arxivID)[需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
||||
},
|
||||
"本地Latex论文精细翻译(上传Latex项目)[需Latex]": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
||||
+ "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
||||
+ 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
||||
}
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from toolbox import get_conf
|
||||
|
||||
ENABLE_AUDIO = get_conf("ENABLE_AUDIO")
|
||||
if ENABLE_AUDIO:
|
||||
from crazy_functions.语音助手 import 语音助手
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"实时语音对话": {
|
||||
"Group": "对话",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数",
|
||||
"Function": HotReload(语音助手),
|
||||
}
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.批量翻译PDF文档_NOUGAT import 批量翻译PDF文档
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"精准翻译PDF文档(NOUGAT)": {
|
||||
"Group": "学术",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(批量翻译PDF文档),
|
||||
}
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.函数动态生成 import 函数动态生成
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"动态代码解释器(CodeInterpreter)": {
|
||||
"Group": "智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(函数动态生成),
|
||||
}
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.多智能体 import 多智能体终端
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"AutoGen多智能体终端(仅供测试)": {
|
||||
"Group": "智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(多智能体终端),
|
||||
}
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
try:
|
||||
from crazy_functions.互动小游戏 import 随机小游戏
|
||||
|
||||
function_plugins.update(
|
||||
{
|
||||
"随机互动小游戏(仅供测试)": {
|
||||
"Group": "智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"Function": HotReload(随机小游戏),
|
||||
}
|
||||
}
|
||||
)
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
print("Load function plugin failed")
|
||||
|
||||
# try:
|
||||
# from crazy_functions.高级功能函数模板 import 测试图表渲染
|
||||
# function_plugins.update({
|
||||
# "绘制逻辑关系(测试图表渲染)": {
|
||||
# "Group": "智能体",
|
||||
# "Color": "stop",
|
||||
# "AsButton": True,
|
||||
# "Function": HotReload(测试图表渲染)
|
||||
# }
|
||||
# })
|
||||
# except:
|
||||
# print(trimmed_format_exc())
|
||||
# print('Load function plugin failed')
|
||||
|
||||
# try:
|
||||
# from crazy_functions.chatglm微调工具 import 微调数据集生成
|
||||
@@ -385,71 +691,21 @@ def get_crazy_functions():
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
|
||||
try:
|
||||
from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
|
||||
function_plugins.update({
|
||||
"Latex英文纠错+高亮修正位置 [需Latex]": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
|
||||
"Function": HotReload(Latex英文纠错加PDF对比)
|
||||
}
|
||||
})
|
||||
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
||||
function_plugins.update({
|
||||
"Arixv论文精细翻译(输入arxivID)[需Latex]": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
}
|
||||
})
|
||||
function_plugins.update({
|
||||
"本地Latex论文精细翻译(上传Latex项目)[需Latex]": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder":
|
||||
"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "+
|
||||
"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
||||
"Function": HotReload(Latex翻译中文并重新编译PDF)
|
||||
}
|
||||
})
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
|
||||
|
||||
try:
|
||||
from toolbox import get_conf
|
||||
ENABLE_AUDIO, = get_conf('ENABLE_AUDIO')
|
||||
if ENABLE_AUDIO:
|
||||
from crazy_functions.语音助手 import 语音助手
|
||||
function_plugins.update({
|
||||
"实时音频采集": {
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Function": HotReload(语音助手)
|
||||
}
|
||||
})
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
|
||||
# try:
|
||||
# from crazy_functions.虚空终端 import 终端
|
||||
# function_plugins.update({
|
||||
# "超级终端": {
|
||||
# "Color": "stop",
|
||||
# "AsButton": False,
|
||||
# # "AdvancedArgs": True,
|
||||
# # "ArgsReminder": "",
|
||||
# "Function": HotReload(终端)
|
||||
# }
|
||||
# })
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
"""
|
||||
设置默认值:
|
||||
- 默认 Group = 对话
|
||||
- 默认 AsButton = True
|
||||
- 默认 AdvancedArgs = False
|
||||
- 默认 Color = secondary
|
||||
"""
|
||||
for name, function_meta in function_plugins.items():
|
||||
if "Group" not in function_meta:
|
||||
function_plugins[name]["Group"] = "对话"
|
||||
if "AsButton" not in function_meta:
|
||||
function_plugins[name]["AsButton"] = True
|
||||
if "AdvancedArgs" not in function_meta:
|
||||
function_plugins[name]["AdvancedArgs"] = False
|
||||
if "Color" not in function_meta:
|
||||
function_plugins[name]["Color"] = "secondary"
|
||||
|
||||
return function_plugins
|
||||
|
||||
232
crazy_functions/CodeInterpreter.py
Normal file
232
crazy_functions/CodeInterpreter.py
Normal file
@@ -0,0 +1,232 @@
|
||||
from collections.abc import Callable, Iterable, Mapping
|
||||
from typing import Any
|
||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc
|
||||
from toolbox import promote_file_to_downloadzone, get_log_folder
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from .crazy_utils import input_clipping, try_install_deps
|
||||
from multiprocessing import Process, Pipe
|
||||
import os
|
||||
import time
|
||||
|
||||
templete = """
|
||||
```python
|
||||
import ... # Put dependencies here, e.g. import numpy as np
|
||||
|
||||
class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction`
|
||||
|
||||
def run(self, path): # The name of the function must be `run`, it takes only a positional argument.
|
||||
# rewrite the function you have just written here
|
||||
...
|
||||
return generated_file_path
|
||||
```
|
||||
"""
|
||||
|
||||
def inspect_dependency(chatbot, history):
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return True
|
||||
|
||||
def get_code_block(reply):
|
||||
import re
|
||||
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
|
||||
matches = re.findall(pattern, reply) # find all code blocks in text
|
||||
if len(matches) == 1:
|
||||
return matches[0].strip('python') # code block
|
||||
for match in matches:
|
||||
if 'class TerminalFunction' in match:
|
||||
return match.strip('python') # code block
|
||||
raise RuntimeError("GPT is not generating proper code.")
|
||||
|
||||
def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
|
||||
# 输入
|
||||
prompt_compose = [
|
||||
f'Your job:\n'
|
||||
f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n',
|
||||
f"2. You should write this function to perform following task: " + txt + "\n",
|
||||
f"3. Wrap the output python function with markdown codeblock."
|
||||
]
|
||||
i_say = "".join(prompt_compose)
|
||||
demo = []
|
||||
|
||||
# 第一步
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say, inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo,
|
||||
sys_prompt= r"You are a programmer."
|
||||
)
|
||||
history.extend([i_say, gpt_say])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
|
||||
# 第二步
|
||||
prompt_compose = [
|
||||
"If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n",
|
||||
templete
|
||||
]
|
||||
i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. "
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say, inputs_show_user=inputs_show_user,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||
sys_prompt= r"You are a programmer."
|
||||
)
|
||||
code_to_return = gpt_say
|
||||
history.extend([i_say, gpt_say])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
|
||||
# # 第三步
|
||||
# i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them."
|
||||
# i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`'
|
||||
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
# inputs=i_say, inputs_show_user=inputs_show_user,
|
||||
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||
# sys_prompt= r"You are a programmer."
|
||||
# )
|
||||
# # # 第三步
|
||||
# i_say = "Show me how to use `pip` to install packages to run the code above. "
|
||||
# i_say += 'For instance. `pip install -r opencv-python scipy numpy`'
|
||||
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
# inputs=i_say, inputs_show_user=i_say,
|
||||
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||
# sys_prompt= r"You are a programmer."
|
||||
# )
|
||||
installation_advance = ""
|
||||
|
||||
return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history
|
||||
|
||||
def make_module(code):
|
||||
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
||||
with open(f'{get_log_folder()}/{module_file}.py', 'w', encoding='utf8') as f:
|
||||
f.write(code)
|
||||
|
||||
def get_class_name(class_string):
|
||||
import re
|
||||
# Use regex to extract the class name
|
||||
class_name = re.search(r'class (\w+)\(', class_string).group(1)
|
||||
return class_name
|
||||
|
||||
class_name = get_class_name(code)
|
||||
return f"{get_log_folder().replace('/', '.')}.{module_file}->{class_name}"
|
||||
|
||||
def init_module_instance(module):
|
||||
import importlib
|
||||
module_, class_ = module.split('->')
|
||||
init_f = getattr(importlib.import_module(module_), class_)
|
||||
return init_f()
|
||||
|
||||
def for_immediate_show_off_when_possible(file_type, fp, chatbot):
|
||||
if file_type in ['png', 'jpg']:
|
||||
image_path = os.path.abspath(fp)
|
||||
chatbot.append(['这是一张图片, 展示如下:',
|
||||
f'本地文件地址: <br/>`{image_path}`<br/>'+
|
||||
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
|
||||
])
|
||||
return chatbot
|
||||
|
||||
def subprocess_worker(instance, file_path, return_dict):
|
||||
return_dict['result'] = instance.run(file_path)
|
||||
|
||||
def have_any_recent_upload_files(chatbot):
|
||||
_5min = 5 * 60
|
||||
if not chatbot: return False # chatbot is None
|
||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||
if not most_recent_uploaded: return False # most_recent_uploaded is None
|
||||
if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new
|
||||
else: return False # most_recent_uploaded is too old
|
||||
|
||||
def get_recent_file_prompt_support(chatbot):
|
||||
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||
path = most_recent_uploaded['path']
|
||||
return path
|
||||
|
||||
@CatchException
|
||||
def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
user_request 当前用户的请求信息(IP地址等)
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
history = []; clear_file_downloadzone(chatbot)
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"CodeInterpreter开源版, 此插件处于开发阶段, 建议暂时不要使用, 插件初始化中 ..."
|
||||
])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if have_any_recent_upload_files(chatbot):
|
||||
file_path = get_recent_file_prompt_support(chatbot)
|
||||
else:
|
||||
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 读取文件
|
||||
if ("recently_uploaded_files" in plugin_kwargs) and (plugin_kwargs["recently_uploaded_files"] == ""): plugin_kwargs.pop("recently_uploaded_files")
|
||||
recently_uploaded_files = plugin_kwargs.get("recently_uploaded_files", None)
|
||||
file_path = recently_uploaded_files[-1]
|
||||
file_type = file_path.split('.')[-1]
|
||||
|
||||
# 粗心检查
|
||||
if is_the_upload_folder(txt):
|
||||
chatbot.append([
|
||||
"...",
|
||||
f"请在输入框内填写需求,然后再次点击该插件(文件路径 {file_path} 已经被记忆)"
|
||||
])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 开始干正事
|
||||
for j in range(5): # 最多重试5次
|
||||
try:
|
||||
code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \
|
||||
yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history)
|
||||
code = get_code_block(code)
|
||||
res = make_module(code)
|
||||
instance = init_module_instance(res)
|
||||
break
|
||||
except Exception as e:
|
||||
chatbot.append([f"第{j}次代码生成尝试,失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 代码生成结束, 开始执行
|
||||
try:
|
||||
import multiprocessing
|
||||
manager = multiprocessing.Manager()
|
||||
return_dict = manager.dict()
|
||||
|
||||
p = multiprocessing.Process(target=subprocess_worker, args=(instance, file_path, return_dict))
|
||||
# only has 10 seconds to run
|
||||
p.start(); p.join(timeout=10)
|
||||
if p.is_alive(): p.terminate(); p.join()
|
||||
p.close()
|
||||
res = return_dict['result']
|
||||
# res = instance.run(file_path)
|
||||
except Exception as e:
|
||||
chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"])
|
||||
# chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 顺利完成,收尾
|
||||
res = str(res)
|
||||
if os.path.exists(res):
|
||||
chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res])
|
||||
new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||
chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
else:
|
||||
chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
|
||||
"""
|
||||
测试:
|
||||
裁剪图像,保留下半部分
|
||||
交换图像的蓝色通道和红色通道
|
||||
将图像转为灰度图像
|
||||
将csv文件转excel表格
|
||||
"""
|
||||
@@ -1,5 +1,5 @@
|
||||
from toolbox import update_ui, trimmed_format_exc
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, zip_folder
|
||||
from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder
|
||||
from toolbox import CatchException, report_exception, write_history_to_file, zip_folder
|
||||
|
||||
|
||||
class PaperFileGroup():
|
||||
@@ -11,7 +11,7 @@ class PaperFileGroup():
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llm.bridge_all import model_info
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
@@ -26,8 +26,8 @@ class PaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
@@ -51,7 +51,7 @@ class PaperFileGroup():
|
||||
import os, time
|
||||
folder = os.path.dirname(self.file_paths[0])
|
||||
t = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||
zip_folder(folder, './gpt_log/', f'{t}-polished.zip')
|
||||
zip_folder(folder, get_log_folder(), f'{t}-polished.zip')
|
||||
|
||||
|
||||
def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='polish'):
|
||||
@@ -126,25 +126,27 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
||||
|
||||
# <-------- 整理结果,退出 ---------->
|
||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
||||
res = write_history_to_file(gpt_response_collection, file_basename=create_report_file_name)
|
||||
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||
|
||||
history = gpt_response_collection
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
|
||||
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky。(注意,此插件不调用Latex,如果有Latex环境,请使用「Latex英文纠错+高亮修正位置(需Latex)插件」"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
report_exception(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@@ -155,12 +157,12 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en')
|
||||
@@ -171,7 +173,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
@@ -182,7 +184,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
report_exception(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@@ -193,12 +195,12 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh')
|
||||
@@ -207,7 +209,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
@@ -218,7 +220,7 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
report_exception(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@@ -229,12 +231,12 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread')
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from toolbox import update_ui, promote_file_to_downloadzone
|
||||
from toolbox import CatchException, report_exception, write_history_to_file
|
||||
fast_debug = False
|
||||
|
||||
class PaperFileGroup():
|
||||
@@ -11,7 +11,7 @@ class PaperFileGroup():
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llm.bridge_all import model_info
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
@@ -26,8 +26,8 @@ class PaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
@@ -95,7 +95,8 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
||||
|
||||
# <-------- 整理结果,退出 ---------->
|
||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
||||
res = write_history_to_file(gpt_response_collection, create_report_file_name)
|
||||
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||
history = gpt_response_collection
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@@ -105,7 +106,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
@@ -116,7 +117,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
report_exception(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@@ -127,12 +128,12 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
|
||||
@@ -142,7 +143,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
@@ -153,7 +154,7 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
report_exception(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
@@ -164,12 +165,12 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
||||
@@ -1,12 +1,12 @@
|
||||
from toolbox import update_ui, trimmed_format_exc, get_conf, objdump, objload, promote_file_to_downloadzone
|
||||
from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str
|
||||
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
||||
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
||||
from functools import partial
|
||||
import glob, os, requests, time
|
||||
import glob, os, requests, time, tarfile
|
||||
pj = os.path.join
|
||||
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
||||
|
||||
# =================================== 工具函数 ===============================================
|
||||
专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
||||
def switch_prompt(pfg, mode, more_requirement):
|
||||
"""
|
||||
Generate prompts and system prompts based on the mode for proofreading or translating.
|
||||
@@ -65,7 +65,7 @@ def move_project(project_folder, arxiv_id=None):
|
||||
if arxiv_id is not None:
|
||||
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
|
||||
else:
|
||||
new_workfolder = f'gpt_log/{gen_time_str()}'
|
||||
new_workfolder = f'{get_log_folder()}/{gen_time_str()}'
|
||||
try:
|
||||
shutil.rmtree(new_workfolder)
|
||||
except:
|
||||
@@ -73,13 +73,14 @@ def move_project(project_folder, arxiv_id=None):
|
||||
|
||||
# align subfolder if there is a folder wrapper
|
||||
items = glob.glob(pj(project_folder,'*'))
|
||||
items = [item for item in items if os.path.basename(item)!='__MACOSX']
|
||||
if len(glob.glob(pj(project_folder,'*.tex'))) == 0 and len(items) == 1:
|
||||
if os.path.isdir(items[0]): project_folder = items[0]
|
||||
|
||||
shutil.copytree(src=project_folder, dst=new_workfolder)
|
||||
return new_workfolder
|
||||
|
||||
def arxiv_download(chatbot, history, txt):
|
||||
def arxiv_download(chatbot, history, txt, allow_cache=True):
|
||||
def check_cached_translation_pdf(arxiv_id):
|
||||
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation')
|
||||
if not os.path.exists(translation_dir):
|
||||
@@ -87,6 +88,9 @@ def arxiv_download(chatbot, history, txt):
|
||||
target_file = pj(translation_dir, 'translate_zh.pdf')
|
||||
if os.path.exists(target_file):
|
||||
promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot)
|
||||
target_file_compare = pj(translation_dir, 'comparison.pdf')
|
||||
if os.path.exists(target_file_compare):
|
||||
promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot)
|
||||
return target_file
|
||||
return False
|
||||
def is_float(s):
|
||||
@@ -100,7 +104,7 @@ def arxiv_download(chatbot, history, txt):
|
||||
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
||||
txt = 'https://arxiv.org/abs/' + txt[:10]
|
||||
if not txt.startswith('https://arxiv.org'):
|
||||
return txt, None
|
||||
return txt, None # 是本地文件,跳过下载
|
||||
|
||||
# <-------------- inspect format ------------->
|
||||
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
|
||||
@@ -109,14 +113,14 @@ def arxiv_download(chatbot, history, txt):
|
||||
|
||||
url_ = txt # https://arxiv.org/abs/1707.06690
|
||||
if not txt.startswith('https://arxiv.org/abs/'):
|
||||
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}"
|
||||
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。"
|
||||
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
|
||||
return msg, None
|
||||
# <-------------- set format ------------->
|
||||
arxiv_id = url_.split('/abs/')[-1]
|
||||
if 'v' in arxiv_id: arxiv_id = arxiv_id[:10]
|
||||
cached_translation_pdf = check_cached_translation_pdf(arxiv_id)
|
||||
if cached_translation_pdf: return cached_translation_pdf, arxiv_id
|
||||
if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id
|
||||
|
||||
url_tar = url_.replace('/abs/', '/e-print/')
|
||||
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print')
|
||||
@@ -129,7 +133,7 @@ def arxiv_download(chatbot, history, txt):
|
||||
yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面
|
||||
else:
|
||||
yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面
|
||||
proxies, = get_conf('proxies')
|
||||
proxies = get_conf('proxies')
|
||||
r = requests.get(url_tar, proxies=proxies)
|
||||
with open(dst, 'wb+') as f:
|
||||
f.write(r.content)
|
||||
@@ -138,11 +142,11 @@ def arxiv_download(chatbot, history, txt):
|
||||
from toolbox import extract_archive
|
||||
extract_archive(file_path=dst, dest_dir=extract_dst)
|
||||
return extract_dst, arxiv_id
|
||||
# ========================================= 插件主程序1 =====================================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append([ "函数插件功能?",
|
||||
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系统表现未知。"])
|
||||
@@ -171,12 +175,12 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
@@ -214,11 +218,10 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
|
||||
# <-------------- we are done ------------->
|
||||
return success
|
||||
|
||||
|
||||
# ========================================= 插件主程序2 =====================================================
|
||||
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
|
||||
@CatchException
|
||||
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# <-------------- information about this plugin ------------->
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
@@ -228,6 +231,9 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
||||
# <-------------- more requirements ------------->
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
more_req = plugin_kwargs.get("advanced_arg", "")
|
||||
no_cache = more_req.startswith("--no-cache")
|
||||
if no_cache: more_req.lstrip("--no-cache")
|
||||
allow_cache = not no_cache
|
||||
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
||||
|
||||
# <-------------- check deps ------------->
|
||||
@@ -244,9 +250,16 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
||||
|
||||
# <-------------- clear history and read input ------------->
|
||||
history = []
|
||||
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt)
|
||||
try:
|
||||
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
||||
except tarfile.ReadError as e:
|
||||
yield from update_ui_lastest_msg(
|
||||
"无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
|
||||
chatbot=chatbot, history=history)
|
||||
return
|
||||
|
||||
if txt.endswith('.pdf'):
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
@@ -255,13 +268,13 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
@@ -291,7 +304,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
|
||||
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
else:
|
||||
chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...'))
|
||||
chatbot.append((f"失败了", '虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...'))
|
||||
yield from update_ui(chatbot=chatbot, history=history); time.sleep(1) # 刷新界面
|
||||
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
||||
|
||||
|
||||
23
crazy_functions/agent_fns/auto_agent.py
Normal file
23
crazy_functions/agent_fns/auto_agent.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
||||
from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton
|
||||
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
||||
from crazy_functions.agent_fns.general import AutoGenGeneral
|
||||
|
||||
|
||||
|
||||
class AutoGenMath(AutoGenGeneral):
|
||||
|
||||
def define_agents(self):
|
||||
from autogen import AssistantAgent, UserProxyAgent
|
||||
return [
|
||||
{
|
||||
"name": "assistant", # name of the agent.
|
||||
"cls": AssistantAgent, # class of the agent.
|
||||
},
|
||||
{
|
||||
"name": "user_proxy", # name of the agent.
|
||||
"cls": UserProxyAgent, # class of the agent.
|
||||
"human_input_mode": "ALWAYS", # always ask for human input.
|
||||
"llm_config": False, # disables llm-based auto reply.
|
||||
},
|
||||
]
|
||||
19
crazy_functions/agent_fns/echo_agent.py
Normal file
19
crazy_functions/agent_fns/echo_agent.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
||||
|
||||
class EchoDemo(PluginMultiprocessManager):
|
||||
def subprocess_worker(self, child_conn):
|
||||
# ⭐⭐ 子进程
|
||||
self.child_conn = child_conn
|
||||
while True:
|
||||
msg = self.child_conn.recv() # PipeCom
|
||||
if msg.cmd == "user_input":
|
||||
# wait futher user input
|
||||
self.child_conn.send(PipeCom("show", msg.content))
|
||||
wait_success = self.subprocess_worker_wait_user_feedback(wait_msg="我准备好处理下一个问题了.")
|
||||
if not wait_success:
|
||||
# wait timeout, terminate this subprocess_worker
|
||||
break
|
||||
elif msg.cmd == "terminate":
|
||||
self.child_conn.send(PipeCom("done", ""))
|
||||
break
|
||||
print('[debug] subprocess_worker terminated')
|
||||
138
crazy_functions/agent_fns/general.py
Normal file
138
crazy_functions/agent_fns/general.py
Normal file
@@ -0,0 +1,138 @@
|
||||
from toolbox import trimmed_format_exc, get_conf, ProxyNetworkActivate
|
||||
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
import time
|
||||
|
||||
def gpt_academic_generate_oai_reply(
|
||||
self,
|
||||
messages,
|
||||
sender,
|
||||
config,
|
||||
):
|
||||
llm_config = self.llm_config if config is None else config
|
||||
if llm_config is False:
|
||||
return False, None
|
||||
if messages is None:
|
||||
messages = self._oai_messages[sender]
|
||||
|
||||
inputs = messages[-1]['content']
|
||||
history = []
|
||||
for message in messages[:-1]:
|
||||
history.append(message['content'])
|
||||
context=messages[-1].pop("context", None)
|
||||
assert context is None, "预留参数 context 未实现"
|
||||
|
||||
reply = predict_no_ui_long_connection(
|
||||
inputs=inputs,
|
||||
llm_kwargs=llm_config,
|
||||
history=history,
|
||||
sys_prompt=self._oai_system_message[0]['content'],
|
||||
console_slience=True
|
||||
)
|
||||
assumed_done = reply.endswith('\nTERMINATE')
|
||||
return True, reply
|
||||
|
||||
class AutoGenGeneral(PluginMultiprocessManager):
|
||||
def gpt_academic_print_override(self, user_proxy, message, sender):
|
||||
# ⭐⭐ run in subprocess
|
||||
try:
|
||||
print_msg = sender.name + "\n\n---\n\n" + message["content"]
|
||||
except:
|
||||
print_msg = sender.name + "\n\n---\n\n" + message
|
||||
self.child_conn.send(PipeCom("show", print_msg))
|
||||
|
||||
def gpt_academic_get_human_input(self, user_proxy, message):
|
||||
# ⭐⭐ run in subprocess
|
||||
patience = 300
|
||||
begin_waiting_time = time.time()
|
||||
self.child_conn.send(PipeCom("interact", message))
|
||||
while True:
|
||||
time.sleep(0.5)
|
||||
if self.child_conn.poll():
|
||||
wait_success = True
|
||||
break
|
||||
if time.time() - begin_waiting_time > patience:
|
||||
self.child_conn.send(PipeCom("done", ""))
|
||||
wait_success = False
|
||||
break
|
||||
if wait_success:
|
||||
return self.child_conn.recv().content
|
||||
else:
|
||||
raise TimeoutError("等待用户输入超时")
|
||||
|
||||
def define_agents(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def exe_autogen(self, input):
|
||||
# ⭐⭐ run in subprocess
|
||||
input = input.content
|
||||
code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
|
||||
agents = self.define_agents()
|
||||
user_proxy = None
|
||||
assistant = None
|
||||
for agent_kwargs in agents:
|
||||
agent_cls = agent_kwargs.pop('cls')
|
||||
kwargs = {
|
||||
'llm_config':self.llm_kwargs,
|
||||
'code_execution_config':code_execution_config
|
||||
}
|
||||
kwargs.update(agent_kwargs)
|
||||
agent_handle = agent_cls(**kwargs)
|
||||
agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
|
||||
for d in agent_handle._reply_func_list:
|
||||
if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply':
|
||||
d['reply_func'] = gpt_academic_generate_oai_reply
|
||||
if agent_kwargs['name'] == 'user_proxy':
|
||||
agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
|
||||
user_proxy = agent_handle
|
||||
if agent_kwargs['name'] == 'assistant': assistant = agent_handle
|
||||
try:
|
||||
if user_proxy is None or assistant is None: raise Exception("用户代理或助理代理未定义")
|
||||
with ProxyNetworkActivate("AutoGen"):
|
||||
user_proxy.initiate_chat(assistant, message=input)
|
||||
except Exception as e:
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
|
||||
|
||||
def subprocess_worker(self, child_conn):
|
||||
# ⭐⭐ run in subprocess
|
||||
self.child_conn = child_conn
|
||||
while True:
|
||||
msg = self.child_conn.recv() # PipeCom
|
||||
self.exe_autogen(msg)
|
||||
|
||||
|
||||
class AutoGenGroupChat(AutoGenGeneral):
|
||||
def exe_autogen(self, input):
|
||||
# ⭐⭐ run in subprocess
|
||||
import autogen
|
||||
|
||||
input = input.content
|
||||
with ProxyNetworkActivate("AutoGen"):
|
||||
code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
|
||||
agents = self.define_agents()
|
||||
agents_instances = []
|
||||
for agent_kwargs in agents:
|
||||
agent_cls = agent_kwargs.pop("cls")
|
||||
kwargs = {"code_execution_config": code_execution_config}
|
||||
kwargs.update(agent_kwargs)
|
||||
agent_handle = agent_cls(**kwargs)
|
||||
agent_handle._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b)
|
||||
agents_instances.append(agent_handle)
|
||||
if agent_kwargs["name"] == "user_proxy":
|
||||
user_proxy = agent_handle
|
||||
user_proxy.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
|
||||
try:
|
||||
groupchat = autogen.GroupChat(agents=agents_instances, messages=[], max_round=50)
|
||||
manager = autogen.GroupChatManager(groupchat=groupchat, **self.define_group_chat_manager_config())
|
||||
manager._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b)
|
||||
manager.get_human_input = lambda a: self.gpt_academic_get_human_input(manager, a)
|
||||
if user_proxy is None:
|
||||
raise Exception("user_proxy is not defined")
|
||||
user_proxy.initiate_chat(manager, message=input)
|
||||
except Exception:
|
||||
tb_str = "```\n" + trimmed_format_exc() + "```"
|
||||
self.child_conn.send(PipeCom("done", "AutoGen exe failed: \n\n" + tb_str))
|
||||
|
||||
def define_group_chat_manager_config(self):
|
||||
raise NotImplementedError
|
||||
16
crazy_functions/agent_fns/persistent.py
Normal file
16
crazy_functions/agent_fns/persistent.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from toolbox import Singleton
|
||||
@Singleton
|
||||
class GradioMultiuserManagerForPersistentClasses():
|
||||
def __init__(self):
|
||||
self.mapping = {}
|
||||
|
||||
def already_alive(self, key):
|
||||
return (key in self.mapping) and (self.mapping[key].is_alive())
|
||||
|
||||
def set(self, key, x):
|
||||
self.mapping[key] = x
|
||||
return self.mapping[key]
|
||||
|
||||
def get(self, key):
|
||||
return self.mapping[key]
|
||||
|
||||
194
crazy_functions/agent_fns/pipe.py
Normal file
194
crazy_functions/agent_fns/pipe.py
Normal file
@@ -0,0 +1,194 @@
|
||||
from toolbox import get_log_folder, update_ui, gen_time_str, get_conf, promote_file_to_downloadzone
|
||||
from crazy_functions.agent_fns.watchdog import WatchDog
|
||||
import time, os
|
||||
|
||||
class PipeCom:
|
||||
def __init__(self, cmd, content) -> None:
|
||||
self.cmd = cmd
|
||||
self.content = content
|
||||
|
||||
|
||||
class PluginMultiprocessManager:
|
||||
def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
# ⭐ run in main process
|
||||
self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str())
|
||||
self.previous_work_dir_files = {}
|
||||
self.llm_kwargs = llm_kwargs
|
||||
self.plugin_kwargs = plugin_kwargs
|
||||
self.chatbot = chatbot
|
||||
self.history = history
|
||||
self.system_prompt = system_prompt
|
||||
# self.user_request = user_request
|
||||
self.alive = True
|
||||
self.use_docker = get_conf("AUTOGEN_USE_DOCKER")
|
||||
self.last_user_input = ""
|
||||
# create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time
|
||||
timeout_seconds = 5 * 60
|
||||
self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5)
|
||||
self.heartbeat_watchdog.begin_watch()
|
||||
|
||||
def feed_heartbeat_watchdog(self):
|
||||
# feed this `dog`, so the dog will not `bark` (bark_fn will terminate the instance)
|
||||
self.heartbeat_watchdog.feed()
|
||||
|
||||
def is_alive(self):
|
||||
return self.alive
|
||||
|
||||
def launch_subprocess_with_pipe(self):
|
||||
# ⭐ run in main process
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
parent_conn, child_conn = Pipe()
|
||||
self.p = Process(target=self.subprocess_worker, args=(child_conn,))
|
||||
self.p.daemon = True
|
||||
self.p.start()
|
||||
return parent_conn
|
||||
|
||||
def terminate(self):
|
||||
self.p.terminate()
|
||||
self.alive = False
|
||||
print("[debug] instance terminated")
|
||||
|
||||
def subprocess_worker(self, child_conn):
|
||||
# ⭐⭐ run in subprocess
|
||||
raise NotImplementedError
|
||||
|
||||
def send_command(self, cmd):
|
||||
# ⭐ run in main process
|
||||
repeated = False
|
||||
if cmd == self.last_user_input:
|
||||
repeated = True
|
||||
cmd = ""
|
||||
else:
|
||||
self.last_user_input = cmd
|
||||
self.parent_conn.send(PipeCom("user_input", cmd))
|
||||
return repeated, cmd
|
||||
|
||||
def immediate_showoff_when_possible(self, fp):
|
||||
# ⭐ 主进程
|
||||
# 获取fp的拓展名
|
||||
file_type = fp.split('.')[-1]
|
||||
# 如果是文本文件, 则直接显示文本内容
|
||||
if file_type.lower() in ['png', 'jpg']:
|
||||
image_path = os.path.abspath(fp)
|
||||
self.chatbot.append([
|
||||
'检测到新生图像:',
|
||||
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
|
||||
])
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
|
||||
def overwatch_workdir_file_change(self):
|
||||
# ⭐ 主进程 Docker 外挂文件夹监控
|
||||
path_to_overwatch = self.autogen_work_dir
|
||||
change_list = []
|
||||
# 扫描路径下的所有文件, 并与self.previous_work_dir_files中所记录的文件进行对比,
|
||||
# 如果有新文件出现,或者文件的修改时间发生变化,则更新self.previous_work_dir_files中
|
||||
# 把新文件和发生变化的文件的路径记录到 change_list 中
|
||||
for root, dirs, files in os.walk(path_to_overwatch):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file)
|
||||
if file_path not in self.previous_work_dir_files.keys():
|
||||
last_modified_time = os.stat(file_path).st_mtime
|
||||
self.previous_work_dir_files.update({file_path: last_modified_time})
|
||||
change_list.append(file_path)
|
||||
else:
|
||||
last_modified_time = os.stat(file_path).st_mtime
|
||||
if last_modified_time != self.previous_work_dir_files[file_path]:
|
||||
self.previous_work_dir_files[file_path] = last_modified_time
|
||||
change_list.append(file_path)
|
||||
if len(change_list) > 0:
|
||||
file_links = ""
|
||||
for f in change_list:
|
||||
res = promote_file_to_downloadzone(f)
|
||||
file_links += f'<br/><a href="file={res}" target="_blank">{res}</a>'
|
||||
yield from self.immediate_showoff_when_possible(f)
|
||||
|
||||
self.chatbot.append(['检测到新生文档.', f'文档清单如下: {file_links}'])
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
return change_list
|
||||
|
||||
|
||||
def main_process_ui_control(self, txt, create_or_resume) -> str:
|
||||
# ⭐ 主进程
|
||||
if create_or_resume == 'create':
|
||||
self.cnt = 1
|
||||
self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐
|
||||
repeated, cmd_to_autogen = self.send_command(txt)
|
||||
if txt == 'exit':
|
||||
self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"])
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
self.terminate()
|
||||
return "terminate"
|
||||
|
||||
# patience = 10
|
||||
|
||||
while True:
|
||||
time.sleep(0.5)
|
||||
if not self.alive:
|
||||
# the heartbeat watchdog might have it killed
|
||||
self.terminate()
|
||||
return "terminate"
|
||||
if self.parent_conn.poll():
|
||||
self.feed_heartbeat_watchdog()
|
||||
if "[GPT-Academic] 等待中" in self.chatbot[-1][-1]:
|
||||
self.chatbot.pop(-1) # remove the last line
|
||||
if "等待您的进一步指令" in self.chatbot[-1][-1]:
|
||||
self.chatbot.pop(-1) # remove the last line
|
||||
if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]:
|
||||
self.chatbot.pop(-1) # remove the last line
|
||||
msg = self.parent_conn.recv() # PipeCom
|
||||
if msg.cmd == "done":
|
||||
self.chatbot.append([f"结束", msg.content])
|
||||
self.cnt += 1
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
self.terminate()
|
||||
break
|
||||
if msg.cmd == "show":
|
||||
yield from self.overwatch_workdir_file_change()
|
||||
notice = ""
|
||||
if repeated: notice = "(自动忽略重复的输入)"
|
||||
self.chatbot.append([f"运行阶段-{self.cnt}(上次用户反馈输入为: 「{cmd_to_autogen}」{notice}", msg.content])
|
||||
self.cnt += 1
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
if msg.cmd == "interact":
|
||||
yield from self.overwatch_workdir_file_change()
|
||||
self.chatbot.append([f"程序抵达用户反馈节点.", msg.content +
|
||||
"\n\n等待您的进一步指令." +
|
||||
"\n\n(1) 一般情况下您不需要说什么, 清空输入区, 然后直接点击“提交”以继续. " +
|
||||
"\n\n(2) 如果您需要补充些什么, 输入要反馈的内容, 直接点击“提交”以继续. " +
|
||||
"\n\n(3) 如果您想终止程序, 输入exit, 直接点击“提交”以终止AutoGen并解锁. "
|
||||
])
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
# do not terminate here, leave the subprocess_worker instance alive
|
||||
return "wait_feedback"
|
||||
else:
|
||||
self.feed_heartbeat_watchdog()
|
||||
if '[GPT-Academic] 等待中' not in self.chatbot[-1][-1]:
|
||||
# begin_waiting_time = time.time()
|
||||
self.chatbot.append(["[GPT-Academic] 等待AutoGen执行结果 ...", "[GPT-Academic] 等待中"])
|
||||
self.chatbot[-1] = [self.chatbot[-1][0], self.chatbot[-1][1].replace("[GPT-Academic] 等待中", "[GPT-Academic] 等待中.")]
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
# if time.time() - begin_waiting_time > patience:
|
||||
# self.chatbot.append([f"结束", "等待超时, 终止AutoGen程序。"])
|
||||
# yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
# self.terminate()
|
||||
# return "terminate"
|
||||
|
||||
self.terminate()
|
||||
return "terminate"
|
||||
|
||||
def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"):
|
||||
# ⭐⭐ run in subprocess
|
||||
patience = 5 * 60
|
||||
begin_waiting_time = time.time()
|
||||
self.child_conn.send(PipeCom("interact", wait_msg))
|
||||
while True:
|
||||
time.sleep(0.5)
|
||||
if self.child_conn.poll():
|
||||
wait_success = True
|
||||
break
|
||||
if time.time() - begin_waiting_time > patience:
|
||||
self.child_conn.send(PipeCom("done", ""))
|
||||
wait_success = False
|
||||
break
|
||||
return wait_success
|
||||
28
crazy_functions/agent_fns/watchdog.py
Normal file
28
crazy_functions/agent_fns/watchdog.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import threading, time
|
||||
|
||||
class WatchDog():
|
||||
def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
|
||||
self.last_feed = None
|
||||
self.timeout = timeout
|
||||
self.bark_fn = bark_fn
|
||||
self.interval = interval
|
||||
self.msg = msg
|
||||
self.kill_dog = False
|
||||
|
||||
def watch(self):
|
||||
while True:
|
||||
if self.kill_dog: break
|
||||
if time.time() - self.last_feed > self.timeout:
|
||||
if len(self.msg) > 0: print(self.msg)
|
||||
self.bark_fn()
|
||||
break
|
||||
time.sleep(self.interval)
|
||||
|
||||
def begin_watch(self):
|
||||
self.last_feed = time.time()
|
||||
th = threading.Thread(target=self.watch)
|
||||
th.daemon = True
|
||||
th.start()
|
||||
|
||||
def feed(self):
|
||||
self.last_feed = time.time()
|
||||
@@ -32,7 +32,7 @@ def string_to_options(arguments):
|
||||
return args
|
||||
|
||||
@CatchException
|
||||
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
@@ -40,7 +40,7 @@ def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
user_request 当前用户的请求信息(IP地址等)
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
|
||||
@@ -80,7 +80,7 @@ def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
||||
|
||||
|
||||
@CatchException
|
||||
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
@@ -88,7 +88,7 @@ def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
user_request 当前用户的请求信息(IP地址等)
|
||||
"""
|
||||
import subprocess
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc, get_max_token, Singleton
|
||||
import threading
|
||||
import os
|
||||
import logging
|
||||
|
||||
def input_clipping(inputs, history, max_token_limit):
|
||||
import numpy as np
|
||||
from request_llm.bridge_all import model_info
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
|
||||
@@ -61,18 +63,21 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
||||
"""
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
# 用户反馈
|
||||
chatbot.append([inputs_show_user, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
executor = ThreadPoolExecutor(max_workers=16)
|
||||
mutable = ["", time.time(), ""]
|
||||
# 看门狗耐心
|
||||
watch_dog_patience = 5
|
||||
# 请求任务
|
||||
def _req_gpt(inputs, history, sys_prompt):
|
||||
retry_op = retry_times_at_unknown_error
|
||||
exceeded_cnt = 0
|
||||
while True:
|
||||
# watchdog error
|
||||
if len(mutable) >= 2 and (time.time()-mutable[1]) > 5:
|
||||
if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience:
|
||||
raise RuntimeError("检测到程序终止。")
|
||||
try:
|
||||
# 【第一种情况】:顺利完成
|
||||
@@ -87,7 +92,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||
from toolbox import get_reduce_token_percent
|
||||
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
||||
MAX_TOKEN = 4096
|
||||
MAX_TOKEN = get_max_token(llm_kwargs)
|
||||
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
||||
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
||||
mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
||||
@@ -134,6 +139,8 @@ def can_multi_process(llm):
|
||||
if llm.startswith('gpt-'): return True
|
||||
if llm.startswith('api2d-'): return True
|
||||
if llm.startswith('azure-'): return True
|
||||
if llm.startswith('spark'): return True
|
||||
if llm.startswith('zhipuai'): return True
|
||||
return False
|
||||
|
||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
@@ -172,11 +179,11 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
"""
|
||||
import time, random
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
assert len(inputs_array) == len(history_array)
|
||||
assert len(inputs_array) == len(sys_prompt_array)
|
||||
if max_workers == -1: # 读取配置文件
|
||||
try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
|
||||
try: max_workers = get_conf('DEFAULT_WORKER_NUM')
|
||||
except: max_workers = 8
|
||||
if max_workers <= 0: max_workers = 3
|
||||
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
|
||||
@@ -191,19 +198,21 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
# 跨线程传递
|
||||
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
|
||||
|
||||
# 看门狗耐心
|
||||
watch_dog_patience = 5
|
||||
|
||||
# 子线程任务
|
||||
def _req_gpt(index, inputs, history, sys_prompt):
|
||||
gpt_say = ""
|
||||
retry_op = retry_times_at_unknown_error
|
||||
exceeded_cnt = 0
|
||||
mutable[index][2] = "执行中"
|
||||
detect_timeout = lambda: len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience
|
||||
while True:
|
||||
# watchdog error
|
||||
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5:
|
||||
raise RuntimeError("检测到程序终止。")
|
||||
if detect_timeout(): raise RuntimeError("检测到程序终止。")
|
||||
try:
|
||||
# 【第一种情况】:顺利完成
|
||||
# time.sleep(10); raise RuntimeError("测试")
|
||||
gpt_say = predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
||||
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
|
||||
@@ -211,13 +220,13 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
mutable[index][2] = "已成功"
|
||||
return gpt_say
|
||||
except ConnectionAbortedError as token_exceeded_error:
|
||||
# 【第二种情况】:Token溢出,
|
||||
# 【第二种情况】:Token溢出
|
||||
if handle_token_exceed:
|
||||
exceeded_cnt += 1
|
||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||
from toolbox import get_reduce_token_percent
|
||||
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
||||
MAX_TOKEN = 4096
|
||||
MAX_TOKEN = get_max_token(llm_kwargs)
|
||||
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
||||
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
||||
gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
||||
@@ -232,6 +241,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
return gpt_say # 放弃
|
||||
except:
|
||||
# 【第三种情况】:其他错误
|
||||
if detect_timeout(): raise RuntimeError("检测到程序终止。")
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
print(tb_str)
|
||||
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||
@@ -248,6 +258,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
for i in range(wait):
|
||||
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
|
||||
# 开始重试
|
||||
if detect_timeout(): raise RuntimeError("检测到程序终止。")
|
||||
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
|
||||
continue # 返回重试
|
||||
else:
|
||||
@@ -273,8 +284,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
# 在前端打印些好玩的东西
|
||||
for thread_index, _ in enumerate(worker_done):
|
||||
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
|
||||
replace('\n', '').replace('```', '...').replace(
|
||||
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
||||
replace('\n', '').replace('`', '.').replace(' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
||||
observe_win.append(print_something_really_funny)
|
||||
# 在前端打印些好玩的东西
|
||||
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||
@@ -299,99 +309,10 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
gpt_res = f.result()
|
||||
chatbot.append([inputs_show_user, gpt_res])
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
time.sleep(0.3)
|
||||
time.sleep(0.5)
|
||||
return gpt_response_collection
|
||||
|
||||
|
||||
def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
|
||||
def cut(txt_tocut, must_break_at_empty_line): # 递归
|
||||
if get_token_fn(txt_tocut) <= limit:
|
||||
return [txt_tocut]
|
||||
else:
|
||||
lines = txt_tocut.split('\n')
|
||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
||||
estimated_line_cut = int(estimated_line_cut)
|
||||
for cnt in reversed(range(estimated_line_cut)):
|
||||
if must_break_at_empty_line:
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
print(cnt)
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
if cnt == 0:
|
||||
raise RuntimeError("存在一行极长的文本!")
|
||||
# print(len(post))
|
||||
# 列表递归接龙
|
||||
result = [prev]
|
||||
result.extend(cut(post, must_break_at_empty_line))
|
||||
return result
|
||||
try:
|
||||
return cut(txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
return cut(txt, must_break_at_empty_line=False)
|
||||
|
||||
|
||||
def force_breakdown(txt, limit, get_token_fn):
|
||||
"""
|
||||
当无法用标点、空行分割时,我们用最暴力的方法切割
|
||||
"""
|
||||
for i in reversed(range(len(txt))):
|
||||
if get_token_fn(txt[:i]) < limit:
|
||||
return txt[:i], txt[i:]
|
||||
return "Tiktoken未知错误", "Tiktoken未知错误"
|
||||
|
||||
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
||||
# 递归
|
||||
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
|
||||
if get_token_fn(txt_tocut) <= limit:
|
||||
return [txt_tocut]
|
||||
else:
|
||||
lines = txt_tocut.split('\n')
|
||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
||||
estimated_line_cut = int(estimated_line_cut)
|
||||
cnt = 0
|
||||
for cnt in reversed(range(estimated_line_cut)):
|
||||
if must_break_at_empty_line:
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
if cnt == 0:
|
||||
if break_anyway:
|
||||
prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
|
||||
else:
|
||||
raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
|
||||
# print(len(post))
|
||||
# 列表递归接龙
|
||||
result = [prev]
|
||||
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
|
||||
return result
|
||||
try:
|
||||
# 第1次尝试,将双空行(\n\n)作为切分点
|
||||
return cut(txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第2次尝试,将单空行(\n)作为切分点
|
||||
return cut(txt, must_break_at_empty_line=False)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第3次尝试,将英文句号(.)作为切分点
|
||||
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
|
||||
return [r.replace('。\n', '.') for r in res]
|
||||
except RuntimeError as e:
|
||||
try:
|
||||
# 第4次尝试,将中文句号(。)作为切分点
|
||||
res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
|
||||
return [r.replace('。。\n', '。') for r in res]
|
||||
except RuntimeError as e:
|
||||
# 第5次尝试,没办法了,随便切一下敷衍吧
|
||||
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
|
||||
|
||||
|
||||
|
||||
def read_and_clean_pdf_text(fp):
|
||||
"""
|
||||
@@ -469,6 +390,7 @@ def read_and_clean_pdf_text(fp):
|
||||
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
|
||||
|
||||
############################## <第 2 步,获取正文主字体> ##################################
|
||||
try:
|
||||
fsize_statiscs = {}
|
||||
for span in meta_span:
|
||||
if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
|
||||
@@ -476,7 +398,8 @@ def read_and_clean_pdf_text(fp):
|
||||
main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
|
||||
if REMOVE_FOOT_NOTE:
|
||||
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
|
||||
|
||||
except:
|
||||
raise RuntimeError(f'抱歉, 我们暂时无法解析此PDF文档: {fp}。')
|
||||
############################## <第 3 步,切分和重新整合> ##################################
|
||||
mega_sec = []
|
||||
sec = []
|
||||
@@ -542,6 +465,9 @@ def read_and_clean_pdf_text(fp):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
# 对于某些PDF会有第一个段落就以小写字母开头,为了避免索引错误将其更改为大写
|
||||
if starts_with_lowercase_word(meta_txt[0]):
|
||||
meta_txt[0] = meta_txt[0].capitalize()
|
||||
for _ in range(100):
|
||||
for index, block_txt in enumerate(meta_txt):
|
||||
if starts_with_lowercase_word(block_txt):
|
||||
@@ -591,11 +517,16 @@ def get_files_from_everything(txt, type): # type='.md'
|
||||
# 网络的远程文件
|
||||
import requests
|
||||
from toolbox import get_conf
|
||||
proxies, = get_conf('proxies')
|
||||
from toolbox import get_log_folder, gen_time_str
|
||||
proxies = get_conf('proxies')
|
||||
try:
|
||||
r = requests.get(txt, proxies=proxies)
|
||||
with open('./gpt_log/temp'+type, 'wb+') as f: f.write(r.content)
|
||||
project_folder = './gpt_log/'
|
||||
file_manifest = ['./gpt_log/temp'+type]
|
||||
except:
|
||||
raise ConnectionRefusedError(f"无法下载资源{txt},请检查。")
|
||||
path = os.path.join(get_log_folder(plugin_name='web_download'), gen_time_str()+type)
|
||||
with open(path, 'wb+') as f: f.write(r.content)
|
||||
project_folder = get_log_folder(plugin_name='web_download')
|
||||
file_manifest = [path]
|
||||
elif txt.endswith(type):
|
||||
# 直接给定文件
|
||||
file_manifest = [txt]
|
||||
@@ -615,139 +546,63 @@ def get_files_from_everything(txt, type): # type='.md'
|
||||
|
||||
|
||||
|
||||
|
||||
def Singleton(cls):
|
||||
_instance = {}
|
||||
|
||||
def _singleton(*args, **kargs):
|
||||
if cls not in _instance:
|
||||
_instance[cls] = cls(*args, **kargs)
|
||||
return _instance[cls]
|
||||
|
||||
return _singleton
|
||||
|
||||
|
||||
@Singleton
|
||||
class knowledge_archive_interface():
|
||||
def __init__(self) -> None:
|
||||
class nougat_interface():
|
||||
def __init__(self):
|
||||
self.threadLock = threading.Lock()
|
||||
self.current_id = ""
|
||||
self.kai_path = None
|
||||
self.qa_handle = None
|
||||
self.text2vec_large_chinese = None
|
||||
|
||||
def get_chinese_text2vec(self):
|
||||
if self.text2vec_large_chinese is None:
|
||||
# < -------------------预热文本向量化模组--------------- >
|
||||
def nougat_with_timeout(self, command, cwd, timeout=3600):
|
||||
import subprocess
|
||||
from toolbox import ProxyNetworkActivate
|
||||
print('Checking Text2vec ...')
|
||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||
with ProxyNetworkActivate(): # 临时地激活代理网络
|
||||
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
||||
|
||||
return self.text2vec_large_chinese
|
||||
logging.info(f'正在执行命令 {command}')
|
||||
with ProxyNetworkActivate("Nougat_Download"):
|
||||
process = subprocess.Popen(command, shell=True, cwd=cwd, env=os.environ)
|
||||
try:
|
||||
stdout, stderr = process.communicate(timeout=timeout)
|
||||
except subprocess.TimeoutExpired:
|
||||
process.kill()
|
||||
stdout, stderr = process.communicate()
|
||||
print("Process timed out!")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def feed_archive(self, file_manifest, id="default"):
|
||||
def NOUGAT_parse_pdf(self, fp, chatbot, history):
|
||||
from toolbox import update_ui_lastest_msg
|
||||
|
||||
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
self.threadLock.acquire()
|
||||
# import uuid
|
||||
self.current_id = id
|
||||
from zh_langchain import construct_vector_store
|
||||
self.qa_handle, self.kai_path = construct_vector_store(
|
||||
vs_id=self.current_id,
|
||||
files=file_manifest,
|
||||
sentence_size=100,
|
||||
history=[],
|
||||
one_conent="",
|
||||
one_content_segmentation="",
|
||||
text2vec = self.get_chinese_text2vec(),
|
||||
)
|
||||
import glob, threading, os
|
||||
from toolbox import get_log_folder, gen_time_str
|
||||
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
|
||||
os.makedirs(dst)
|
||||
|
||||
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
|
||||
chatbot=chatbot, history=history, delay=0)
|
||||
self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600)
|
||||
res = glob.glob(os.path.join(dst,'*.mmd'))
|
||||
if len(res) == 0:
|
||||
self.threadLock.release()
|
||||
|
||||
def get_current_archive_id(self):
|
||||
return self.current_id
|
||||
|
||||
def get_loaded_file(self):
|
||||
return self.qa_handle.get_loaded_file()
|
||||
|
||||
def answer_with_archive_by_id(self, txt, id):
|
||||
self.threadLock.acquire()
|
||||
if not self.current_id == id:
|
||||
self.current_id = id
|
||||
from zh_langchain import construct_vector_store
|
||||
self.qa_handle, self.kai_path = construct_vector_store(
|
||||
vs_id=self.current_id,
|
||||
files=[],
|
||||
sentence_size=100,
|
||||
history=[],
|
||||
one_conent="",
|
||||
one_content_segmentation="",
|
||||
text2vec = self.get_chinese_text2vec(),
|
||||
)
|
||||
VECTOR_SEARCH_SCORE_THRESHOLD = 0
|
||||
VECTOR_SEARCH_TOP_K = 4
|
||||
CHUNK_SIZE = 512
|
||||
resp, prompt = self.qa_handle.get_knowledge_based_conent_test(
|
||||
query = txt,
|
||||
vs_path = self.kai_path,
|
||||
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
|
||||
vector_search_top_k=VECTOR_SEARCH_TOP_K,
|
||||
chunk_conent=True,
|
||||
chunk_size=CHUNK_SIZE,
|
||||
text2vec = self.get_chinese_text2vec(),
|
||||
)
|
||||
raise RuntimeError("Nougat解析论文失败。")
|
||||
self.threadLock.release()
|
||||
return resp, prompt
|
||||
return res[0]
|
||||
|
||||
def try_install_deps(deps):
|
||||
|
||||
|
||||
|
||||
def try_install_deps(deps, reload_m=[]):
|
||||
import subprocess, sys, importlib
|
||||
for dep in deps:
|
||||
import subprocess, sys
|
||||
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', dep])
|
||||
import site
|
||||
importlib.reload(site)
|
||||
for m in reload_m:
|
||||
importlib.reload(__import__(m))
|
||||
|
||||
|
||||
class construct_html():
|
||||
def __init__(self) -> None:
|
||||
self.css = """
|
||||
.row {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.column {
|
||||
flex: 1;
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
.table-header {
|
||||
font-weight: bold;
|
||||
border-bottom: 1px solid black;
|
||||
}
|
||||
|
||||
.table-row {
|
||||
border-bottom: 1px solid lightgray;
|
||||
}
|
||||
|
||||
.table-cell {
|
||||
padding: 5px;
|
||||
}
|
||||
"""
|
||||
self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
|
||||
|
||||
|
||||
def add_row(self, a, b):
|
||||
tmp = """
|
||||
<div class="row table-row">
|
||||
<div class="column table-cell">REPLACE_A</div>
|
||||
<div class="column table-cell">REPLACE_B</div>
|
||||
</div>
|
||||
"""
|
||||
from toolbox import markdown_convertion
|
||||
tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
|
||||
tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
|
||||
self.html_string += tmp
|
||||
|
||||
|
||||
def save_file(self, file_name):
|
||||
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
|
||||
f.write(self.html_string.encode('utf-8', 'ignore').decode())
|
||||
|
||||
def get_plugin_arg(plugin_kwargs, key, default):
|
||||
# 如果参数是空的
|
||||
if (key in plugin_kwargs) and (plugin_kwargs[key] == ""): plugin_kwargs.pop(key)
|
||||
# 正常情况
|
||||
return plugin_kwargs.get(key, default)
|
||||
|
||||
122
crazy_functions/diagram_fns/file_tree.py
Normal file
122
crazy_functions/diagram_fns/file_tree.py
Normal file
@@ -0,0 +1,122 @@
|
||||
import os
|
||||
from textwrap import indent
|
||||
|
||||
class FileNode:
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.children = []
|
||||
self.is_leaf = False
|
||||
self.level = 0
|
||||
self.parenting_ship = []
|
||||
self.comment = ""
|
||||
self.comment_maxlen_show = 50
|
||||
|
||||
@staticmethod
|
||||
def add_linebreaks_at_spaces(string, interval=10):
|
||||
return '\n'.join(string[i:i+interval] for i in range(0, len(string), interval))
|
||||
|
||||
def sanitize_comment(self, comment):
|
||||
if len(comment) > self.comment_maxlen_show: suf = '...'
|
||||
else: suf = ''
|
||||
comment = comment[:self.comment_maxlen_show]
|
||||
comment = comment.replace('\"', '').replace('`', '').replace('\n', '').replace('`', '').replace('$', '')
|
||||
comment = self.add_linebreaks_at_spaces(comment, 10)
|
||||
return '`' + comment + suf + '`'
|
||||
|
||||
def add_file(self, file_path, file_comment):
|
||||
directory_names, file_name = os.path.split(file_path)
|
||||
current_node = self
|
||||
level = 1
|
||||
if directory_names == "":
|
||||
new_node = FileNode(file_name)
|
||||
current_node.children.append(new_node)
|
||||
new_node.is_leaf = True
|
||||
new_node.comment = self.sanitize_comment(file_comment)
|
||||
new_node.level = level
|
||||
current_node = new_node
|
||||
else:
|
||||
dnamesplit = directory_names.split(os.sep)
|
||||
for i, directory_name in enumerate(dnamesplit):
|
||||
found_child = False
|
||||
level += 1
|
||||
for child in current_node.children:
|
||||
if child.name == directory_name:
|
||||
current_node = child
|
||||
found_child = True
|
||||
break
|
||||
if not found_child:
|
||||
new_node = FileNode(directory_name)
|
||||
current_node.children.append(new_node)
|
||||
new_node.level = level - 1
|
||||
current_node = new_node
|
||||
term = FileNode(file_name)
|
||||
term.level = level
|
||||
term.comment = self.sanitize_comment(file_comment)
|
||||
term.is_leaf = True
|
||||
current_node.children.append(term)
|
||||
|
||||
def print_files_recursively(self, level=0, code="R0"):
|
||||
print(' '*level + self.name + ' ' + str(self.is_leaf) + ' ' + str(self.level))
|
||||
for j, child in enumerate(self.children):
|
||||
child.print_files_recursively(level=level+1, code=code+str(j))
|
||||
self.parenting_ship.extend(child.parenting_ship)
|
||||
p1 = f"""{code}[\"🗎{self.name}\"]""" if self.is_leaf else f"""{code}[[\"📁{self.name}\"]]"""
|
||||
p2 = """ --> """
|
||||
p3 = f"""{code+str(j)}[\"🗎{child.name}\"]""" if child.is_leaf else f"""{code+str(j)}[[\"📁{child.name}\"]]"""
|
||||
edge_code = p1 + p2 + p3
|
||||
if edge_code in self.parenting_ship:
|
||||
continue
|
||||
self.parenting_ship.append(edge_code)
|
||||
if self.comment != "":
|
||||
pc1 = f"""{code}[\"🗎{self.name}\"]""" if self.is_leaf else f"""{code}[[\"📁{self.name}\"]]"""
|
||||
pc2 = f""" -.-x """
|
||||
pc3 = f"""C{code}[\"{self.comment}\"]:::Comment"""
|
||||
edge_code = pc1 + pc2 + pc3
|
||||
self.parenting_ship.append(edge_code)
|
||||
|
||||
|
||||
MERMAID_TEMPLATE = r"""
|
||||
```mermaid
|
||||
flowchart LR
|
||||
%% <gpt_academic_hide_mermaid_code> 一个特殊标记,用于在生成mermaid图表时隐藏代码块
|
||||
classDef Comment stroke-dasharray: 5 5
|
||||
subgraph {graph_name}
|
||||
{relationship}
|
||||
end
|
||||
```
|
||||
"""
|
||||
|
||||
def build_file_tree_mermaid_diagram(file_manifest, file_comments, graph_name):
|
||||
# Create the root node
|
||||
file_tree_struct = FileNode("root")
|
||||
# Build the tree structure
|
||||
for file_path, file_comment in zip(file_manifest, file_comments):
|
||||
file_tree_struct.add_file(file_path, file_comment)
|
||||
file_tree_struct.print_files_recursively()
|
||||
cc = "\n".join(file_tree_struct.parenting_ship)
|
||||
ccc = indent(cc, prefix=" "*8)
|
||||
return MERMAID_TEMPLATE.format(graph_name=graph_name, relationship=ccc)
|
||||
|
||||
if __name__ == "__main__":
|
||||
# File manifest
|
||||
file_manifest = [
|
||||
"cradle_void_terminal.ipynb",
|
||||
"tests/test_utils.py",
|
||||
"tests/test_plugins.py",
|
||||
"tests/test_llms.py",
|
||||
"config.py",
|
||||
"build/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/model_weights_0.bin",
|
||||
"crazy_functions/latex_fns/latex_actions.py",
|
||||
"crazy_functions/latex_fns/latex_toolbox.py"
|
||||
]
|
||||
file_comments = [
|
||||
"根据位置和名称,可能是一个模块的初始化文件根据位置和名称,可能是一个模块的初始化文件根据位置和名称,可能是一个模块的初始化文件",
|
||||
"包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器",
|
||||
"用于构建HTML报告的类和方法用于构建HTML报告的类和方法用于构建HTML报告的类和方法",
|
||||
"包含了用于文本切分的函数,以及处理PDF文件的示例代码包含了用于文本切分的函数,以及处理PDF文件的示例代码包含了用于文本切分的函数,以及处理PDF文件的示例代码",
|
||||
"用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数",
|
||||
"是一个包的初始化文件,用于初始化包的属性和导入模块是一个包的初始化文件,用于初始化包的属性和导入模块是一个包的初始化文件,用于初始化包的属性和导入模块",
|
||||
"用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器",
|
||||
"包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类",
|
||||
]
|
||||
print(build_file_tree_mermaid_diagram(file_manifest, file_comments, "项目文件树"))
|
||||
42
crazy_functions/game_fns/game_ascii_art.py
Normal file
42
crazy_functions/game_fns/game_ascii_art.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||
import random
|
||||
|
||||
|
||||
class MiniGame_ASCII_Art(GptAcademicGameBaseState):
|
||||
def step(self, prompt, chatbot, history):
|
||||
if self.step_cnt == 0:
|
||||
chatbot.append(["我画你猜(动物)", "请稍等..."])
|
||||
else:
|
||||
if prompt.strip() == 'exit':
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg=f"谜底是{self.obj},游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||
return
|
||||
chatbot.append([prompt, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
if self.step_cnt == 0:
|
||||
self.lock_plugin(chatbot)
|
||||
self.cur_task = 'draw'
|
||||
|
||||
if self.cur_task == 'draw':
|
||||
avail_obj = ["狗","猫","鸟","鱼","老鼠","蛇"]
|
||||
self.obj = random.choice(avail_obj)
|
||||
inputs = "I want to play a game called Guess the ASCII art. You can draw the ASCII art and I will try to guess it. " + \
|
||||
f"This time you draw a {self.obj}. Note that you must not indicate what you have draw in the text, and you should only produce the ASCII art wrapped by ```. "
|
||||
raw_res = predict_no_ui_long_connection(inputs=inputs, llm_kwargs=self.llm_kwargs, history=[], sys_prompt="")
|
||||
self.cur_task = 'identify user guess'
|
||||
res = get_code_block(raw_res)
|
||||
history += ['', f'the answer is {self.obj}', inputs, res]
|
||||
yield from update_ui_lastest_msg(lastmsg=res, chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
elif self.cur_task == 'identify user guess':
|
||||
if is_same_thing(self.obj, prompt, self.llm_kwargs):
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg="你猜对了!", chatbot=chatbot, history=history, delay=0.)
|
||||
else:
|
||||
self.cur_task = 'identify user guess'
|
||||
yield from update_ui_lastest_msg(lastmsg="猜错了,再试试,输入“exit”获取答案。", chatbot=chatbot, history=history, delay=0.)
|
||||
212
crazy_functions/game_fns/game_interactive_story.py
Normal file
212
crazy_functions/game_fns/game_interactive_story.py
Normal file
@@ -0,0 +1,212 @@
|
||||
prompts_hs = """ 请以“{headstart}”为开头,编写一个小说的第一幕。
|
||||
|
||||
- 尽量短,不要包含太多情节,因为你接下来将会与用户互动续写下面的情节,要留出足够的互动空间。
|
||||
- 出现人物时,给出人物的名字。
|
||||
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||
- 字数要求:第一幕的字数少于300字,且少于2个段落。
|
||||
"""
|
||||
|
||||
prompts_interact = """ 小说的前文回顾:
|
||||
「
|
||||
{previously_on_story}
|
||||
」
|
||||
|
||||
你是一个作家,根据以上的情节,给出4种不同的后续剧情发展方向,每个发展方向都精明扼要地用一句话说明。稍后,我将在这4个选择中,挑选一种剧情发展。
|
||||
|
||||
输出格式例如:
|
||||
1. 后续剧情发展1
|
||||
2. 后续剧情发展2
|
||||
3. 后续剧情发展3
|
||||
4. 后续剧情发展4
|
||||
"""
|
||||
|
||||
|
||||
prompts_resume = """小说的前文回顾:
|
||||
「
|
||||
{previously_on_story}
|
||||
」
|
||||
|
||||
你是一个作家,我们正在互相讨论,确定后续剧情的发展。
|
||||
在以下的剧情发展中,
|
||||
「
|
||||
{choice}
|
||||
」
|
||||
我认为更合理的是:{user_choice}。
|
||||
请在前文的基础上(不要重复前文),围绕我选定的剧情情节,编写小说的下一幕。
|
||||
|
||||
- 禁止杜撰不符合我选择的剧情。
|
||||
- 尽量短,不要包含太多情节,因为你接下来将会与用户互动续写下面的情节,要留出足够的互动空间。
|
||||
- 不要重复前文。
|
||||
- 出现人物时,给出人物的名字。
|
||||
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||
- 小说的下一幕字数少于300字,且少于2个段落。
|
||||
"""
|
||||
|
||||
|
||||
prompts_terminate = """小说的前文回顾:
|
||||
「
|
||||
{previously_on_story}
|
||||
」
|
||||
|
||||
你是一个作家,我们正在互相讨论,确定后续剧情的发展。
|
||||
现在,故事该结束了,我认为最合理的故事结局是:{user_choice}。
|
||||
|
||||
请在前文的基础上(不要重复前文),编写小说的最后一幕。
|
||||
|
||||
- 不要重复前文。
|
||||
- 出现人物时,给出人物的名字。
|
||||
- 积极地运用环境描写、人物描写等手法,让读者能够感受到你的故事世界。
|
||||
- 积极地运用修辞手法,比如比喻、拟人、排比、对偶、夸张等等。
|
||||
- 字数要求:最后一幕的字数少于1000字。
|
||||
"""
|
||||
|
||||
|
||||
from toolbox import CatchException, update_ui, update_ui_lastest_msg
|
||||
from crazy_functions.multi_stage.multi_stage_utils import GptAcademicGameBaseState
|
||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
||||
import random
|
||||
|
||||
|
||||
class MiniGame_ResumeStory(GptAcademicGameBaseState):
|
||||
story_headstart = [
|
||||
'先行者知道,他现在是全宇宙中唯一的一个人了。',
|
||||
'深夜,一个年轻人穿过天安门广场向纪念堂走去。在二十二世纪编年史中,计算机把他的代号定为M102。',
|
||||
'他知道,这最后一课要提前讲了。又一阵剧痛从肝部袭来,几乎使他晕厥过去。',
|
||||
'在距地球五万光年的远方,在银河系的中心,一场延续了两万年的星际战争已接近尾声。那里的太空中渐渐隐现出一个方形区域,仿佛灿烂的群星的背景被剪出一个方口。',
|
||||
'伊依一行三人乘坐一艘游艇在南太平洋上做吟诗航行,他们的目的地是南极,如果几天后能顺利到达那里,他们将钻出地壳去看诗云。',
|
||||
'很多人生来就会莫名其妙地迷上一样东西,仿佛他的出生就是要和这东西约会似的,正是这样,圆圆迷上了肥皂泡。'
|
||||
]
|
||||
|
||||
|
||||
def begin_game_step_0(self, prompt, chatbot, history):
|
||||
# init game at step 0
|
||||
self.headstart = random.choice(self.story_headstart)
|
||||
self.story = []
|
||||
chatbot.append(["互动写故事", f"这次的故事开头是:{self.headstart}"])
|
||||
self.sys_prompt_ = '你是一个想象力丰富的杰出作家。正在与你的朋友互动,一起写故事,因此你每次写的故事段落应少于300字(结局除外)。'
|
||||
|
||||
|
||||
def generate_story_image(self, story_paragraph):
|
||||
try:
|
||||
from crazy_functions.图片生成 import gen_image
|
||||
prompt_ = predict_no_ui_long_connection(inputs=story_paragraph, llm_kwargs=self.llm_kwargs, history=[], sys_prompt='你需要根据用户给出的小说段落,进行简短的环境描写。要求:80字以内。')
|
||||
image_url, image_path = gen_image(self.llm_kwargs, prompt_, '512x512', model="dall-e-2", quality='standard', style='natural')
|
||||
return f'<br/><div align="center"><img src="file={image_path}"></div>'
|
||||
except:
|
||||
return ''
|
||||
|
||||
def step(self, prompt, chatbot, history):
|
||||
|
||||
"""
|
||||
首先,处理游戏初始化等特殊情况
|
||||
"""
|
||||
if self.step_cnt == 0:
|
||||
self.begin_game_step_0(prompt, chatbot, history)
|
||||
self.lock_plugin(chatbot)
|
||||
self.cur_task = 'head_start'
|
||||
else:
|
||||
if prompt.strip() == 'exit' or prompt.strip() == '结束剧情':
|
||||
# should we terminate game here?
|
||||
self.delete_game = True
|
||||
yield from update_ui_lastest_msg(lastmsg=f"游戏结束。", chatbot=chatbot, history=history, delay=0.)
|
||||
return
|
||||
if '剧情收尾' in prompt:
|
||||
self.cur_task = 'story_terminate'
|
||||
# # well, game resumes
|
||||
# chatbot.append([prompt, ""])
|
||||
# update ui, don't keep the user waiting
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
|
||||
"""
|
||||
处理游戏的主体逻辑
|
||||
"""
|
||||
if self.cur_task == 'head_start':
|
||||
"""
|
||||
这是游戏的第一步
|
||||
"""
|
||||
inputs_ = prompts_hs.format(headstart=self.headstart)
|
||||
history_ = []
|
||||
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, '故事开头', self.llm_kwargs,
|
||||
chatbot, history_, self.sys_prompt_
|
||||
)
|
||||
self.story.append(story_paragraph)
|
||||
# # 配图
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
# # 构建后续剧情引导
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_interact.format(previously_on_story=previously_on_story)
|
||||
history_ = []
|
||||
self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, '请在以下几种故事走向中,选择一种(当然,您也可以选择给出其他故事走向):', self.llm_kwargs,
|
||||
chatbot,
|
||||
history_,
|
||||
self.sys_prompt_
|
||||
)
|
||||
self.cur_task = 'user_choice'
|
||||
|
||||
|
||||
elif self.cur_task == 'user_choice':
|
||||
"""
|
||||
根据用户的提示,确定故事的下一步
|
||||
"""
|
||||
if '请在以下几种故事走向中,选择一种' in chatbot[-1][0]: chatbot.pop(-1)
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_resume.format(previously_on_story=previously_on_story, choice=self.next_choices, user_choice=prompt)
|
||||
history_ = []
|
||||
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, f'下一段故事(您的选择是:{prompt})。', self.llm_kwargs,
|
||||
chatbot, history_, self.sys_prompt_
|
||||
)
|
||||
self.story.append(story_paragraph)
|
||||
# # 配图
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
# # 构建后续剧情引导
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_interact.format(previously_on_story=previously_on_story)
|
||||
history_ = []
|
||||
self.next_choices = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_,
|
||||
'请在以下几种故事走向中,选择一种。当然,您也可以给出您心中的其他故事走向。另外,如果您希望剧情立即收尾,请输入剧情走向,并以“剧情收尾”四个字提示程序。', self.llm_kwargs,
|
||||
chatbot,
|
||||
history_,
|
||||
self.sys_prompt_
|
||||
)
|
||||
self.cur_task = 'user_choice'
|
||||
|
||||
|
||||
elif self.cur_task == 'story_terminate':
|
||||
"""
|
||||
根据用户的提示,确定故事的结局
|
||||
"""
|
||||
previously_on_story = ""
|
||||
for s in self.story:
|
||||
previously_on_story += s + '\n'
|
||||
inputs_ = prompts_terminate.format(previously_on_story=previously_on_story, user_choice=prompt)
|
||||
history_ = []
|
||||
story_paragraph = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs_, f'故事收尾(您的选择是:{prompt})。', self.llm_kwargs,
|
||||
chatbot, history_, self.sys_prompt_
|
||||
)
|
||||
# # 配图
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>正在生成插图中 ...', chatbot=chatbot, history=history, delay=0.)
|
||||
yield from update_ui_lastest_msg(lastmsg=story_paragraph + '<br/>'+ self.generate_story_image(story_paragraph), chatbot=chatbot, history=history, delay=0.)
|
||||
|
||||
# terminate game
|
||||
self.delete_game = True
|
||||
return
|
||||
35
crazy_functions/game_fns/game_utils.py
Normal file
35
crazy_functions/game_fns/game_utils.py
Normal file
@@ -0,0 +1,35 @@
|
||||
|
||||
from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
def get_code_block(reply):
|
||||
import re
|
||||
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
|
||||
matches = re.findall(pattern, reply) # find all code blocks in text
|
||||
if len(matches) == 1:
|
||||
return "```" + matches[0] + "```" # code block
|
||||
raise RuntimeError("GPT is not generating proper code.")
|
||||
|
||||
def is_same_thing(a, b, llm_kwargs):
|
||||
from pydantic import BaseModel, Field
|
||||
class IsSameThing(BaseModel):
|
||||
is_same_thing: bool = Field(description="determine whether two objects are same thing.", default=False)
|
||||
|
||||
def run_gpt_fn(inputs, sys_prompt, history=[]):
|
||||
return predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs,
|
||||
history=history, sys_prompt=sys_prompt, observe_window=[]
|
||||
)
|
||||
|
||||
gpt_json_io = GptJsonIO(IsSameThing)
|
||||
inputs_01 = "Identity whether the user input and the target is the same thing: \n target object: {a} \n user input object: {b} \n\n\n".format(a=a, b=b)
|
||||
inputs_01 += "\n\n\n Note that the user may describe the target object with a different language, e.g. cat and 猫 are the same thing."
|
||||
analyze_res_cot_01 = run_gpt_fn(inputs_01, "", [])
|
||||
|
||||
inputs_02 = inputs_01 + gpt_json_io.format_instructions
|
||||
analyze_res = run_gpt_fn(inputs_02, "", [inputs_01, analyze_res_cot_01])
|
||||
|
||||
try:
|
||||
res = gpt_json_io.generate_output_auto_repair(analyze_res, run_gpt_fn)
|
||||
return res.is_same_thing
|
||||
except JsonStringError as e:
|
||||
return False
|
||||
70
crazy_functions/gen_fns/gen_fns_shared.py
Normal file
70
crazy_functions/gen_fns/gen_fns_shared.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import time
|
||||
import importlib
|
||||
from toolbox import trimmed_format_exc, gen_time_str, get_log_folder
|
||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
|
||||
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg
|
||||
import multiprocessing
|
||||
|
||||
def get_class_name(class_string):
|
||||
import re
|
||||
# Use regex to extract the class name
|
||||
class_name = re.search(r'class (\w+)\(', class_string).group(1)
|
||||
return class_name
|
||||
|
||||
def try_make_module(code, chatbot):
|
||||
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
||||
fn_path = f'{get_log_folder(plugin_name="gen_plugin_verify")}/{module_file}.py'
|
||||
with open(fn_path, 'w', encoding='utf8') as f: f.write(code)
|
||||
promote_file_to_downloadzone(fn_path, chatbot=chatbot)
|
||||
class_name = get_class_name(code)
|
||||
manager = multiprocessing.Manager()
|
||||
return_dict = manager.dict()
|
||||
p = multiprocessing.Process(target=is_function_successfully_generated, args=(fn_path, class_name, return_dict))
|
||||
# only has 10 seconds to run
|
||||
p.start(); p.join(timeout=10)
|
||||
if p.is_alive(): p.terminate(); p.join()
|
||||
p.close()
|
||||
return return_dict["success"], return_dict['traceback']
|
||||
|
||||
# check is_function_successfully_generated
|
||||
def is_function_successfully_generated(fn_path, class_name, return_dict):
|
||||
return_dict['success'] = False
|
||||
return_dict['traceback'] = ""
|
||||
try:
|
||||
# Create a spec for the module
|
||||
module_spec = importlib.util.spec_from_file_location('example_module', fn_path)
|
||||
# Load the module
|
||||
example_module = importlib.util.module_from_spec(module_spec)
|
||||
module_spec.loader.exec_module(example_module)
|
||||
# Now you can use the module
|
||||
some_class = getattr(example_module, class_name)
|
||||
# Now you can create an instance of the class
|
||||
instance = some_class()
|
||||
return_dict['success'] = True
|
||||
return
|
||||
except:
|
||||
return_dict['traceback'] = trimmed_format_exc()
|
||||
return
|
||||
|
||||
def subprocess_worker(code, file_path, return_dict):
|
||||
return_dict['result'] = None
|
||||
return_dict['success'] = False
|
||||
return_dict['traceback'] = ""
|
||||
try:
|
||||
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
||||
fn_path = f'{get_log_folder(plugin_name="gen_plugin_run")}/{module_file}.py'
|
||||
with open(fn_path, 'w', encoding='utf8') as f: f.write(code)
|
||||
class_name = get_class_name(code)
|
||||
# Create a spec for the module
|
||||
module_spec = importlib.util.spec_from_file_location('example_module', fn_path)
|
||||
# Load the module
|
||||
example_module = importlib.util.module_from_spec(module_spec)
|
||||
module_spec.loader.exec_module(example_module)
|
||||
# Now you can use the module
|
||||
some_class = getattr(example_module, class_name)
|
||||
# Now you can create an instance of the class
|
||||
instance = some_class()
|
||||
return_dict['result'] = instance.run(file_path)
|
||||
return_dict['success'] = True
|
||||
except:
|
||||
return_dict['traceback'] = trimmed_format_exc()
|
||||
37
crazy_functions/ipc_fns/mp.py
Normal file
37
crazy_functions/ipc_fns/mp.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import platform
|
||||
import pickle
|
||||
import multiprocessing
|
||||
|
||||
def run_in_subprocess_wrapper_func(v_args):
|
||||
func, args, kwargs, return_dict, exception_dict = pickle.loads(v_args)
|
||||
import sys
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
return_dict['result'] = result
|
||||
except Exception as e:
|
||||
exc_info = sys.exc_info()
|
||||
exception_dict['exception'] = exc_info
|
||||
|
||||
def run_in_subprocess_with_timeout(func, timeout=60):
|
||||
if platform.system() == 'Linux':
|
||||
def wrapper(*args, **kwargs):
|
||||
return_dict = multiprocessing.Manager().dict()
|
||||
exception_dict = multiprocessing.Manager().dict()
|
||||
v_args = pickle.dumps((func, args, kwargs, return_dict, exception_dict))
|
||||
process = multiprocessing.Process(target=run_in_subprocess_wrapper_func, args=(v_args,))
|
||||
process.start()
|
||||
process.join(timeout)
|
||||
if process.is_alive():
|
||||
process.terminate()
|
||||
raise TimeoutError(f'功能单元{str(func)}未能在规定时间内完成任务')
|
||||
process.close()
|
||||
if 'exception' in exception_dict:
|
||||
# ooops, the subprocess ran into an exception
|
||||
exc_info = exception_dict['exception']
|
||||
raise exc_info[1].with_traceback(exc_info[2])
|
||||
if 'result' in return_dict.keys():
|
||||
# If the subprocess ran successfully, return the result
|
||||
return return_dict['result']
|
||||
return wrapper
|
||||
else:
|
||||
return func
|
||||
111
crazy_functions/json_fns/pydantic_io.py
Normal file
111
crazy_functions/json_fns/pydantic_io.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""
|
||||
https://github.com/langchain-ai/langchain/blob/master/docs/extras/modules/model_io/output_parsers/pydantic.ipynb
|
||||
|
||||
Example 1.
|
||||
|
||||
# Define your desired data structure.
|
||||
class Joke(BaseModel):
|
||||
setup: str = Field(description="question to set up a joke")
|
||||
punchline: str = Field(description="answer to resolve the joke")
|
||||
|
||||
# You can add custom validation logic easily with Pydantic.
|
||||
@validator("setup")
|
||||
def question_ends_with_question_mark(cls, field):
|
||||
if field[-1] != "?":
|
||||
raise ValueError("Badly formed question!")
|
||||
return field
|
||||
|
||||
|
||||
Example 2.
|
||||
|
||||
# Here's another example, but with a compound typed field.
|
||||
class Actor(BaseModel):
|
||||
name: str = Field(description="name of an actor")
|
||||
film_names: List[str] = Field(description="list of names of films they starred in")
|
||||
"""
|
||||
|
||||
import json, re, logging
|
||||
|
||||
|
||||
PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
|
||||
|
||||
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
|
||||
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
|
||||
|
||||
Here is the output schema:
|
||||
```
|
||||
{schema}
|
||||
```"""
|
||||
|
||||
|
||||
PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
|
||||
```
|
||||
{schema}
|
||||
```"""
|
||||
|
||||
class JsonStringError(Exception): ...
|
||||
|
||||
class GptJsonIO():
|
||||
|
||||
def __init__(self, schema, example_instruction=True):
|
||||
self.pydantic_object = schema
|
||||
self.example_instruction = example_instruction
|
||||
self.format_instructions = self.generate_format_instructions()
|
||||
|
||||
def generate_format_instructions(self):
|
||||
schema = self.pydantic_object.schema()
|
||||
|
||||
# Remove extraneous fields.
|
||||
reduced_schema = schema
|
||||
if "title" in reduced_schema:
|
||||
del reduced_schema["title"]
|
||||
if "type" in reduced_schema:
|
||||
del reduced_schema["type"]
|
||||
# Ensure json in context is well-formed with double quotes.
|
||||
if self.example_instruction:
|
||||
schema_str = json.dumps(reduced_schema)
|
||||
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
|
||||
else:
|
||||
return PYDANTIC_FORMAT_INSTRUCTIONS_SIMPLE.format(schema=schema_str)
|
||||
|
||||
def generate_output(self, text):
|
||||
# Greedy search for 1st json candidate.
|
||||
match = re.search(
|
||||
r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL
|
||||
)
|
||||
json_str = ""
|
||||
if match: json_str = match.group()
|
||||
json_object = json.loads(json_str, strict=False)
|
||||
final_object = self.pydantic_object.parse_obj(json_object)
|
||||
return final_object
|
||||
|
||||
def generate_repair_prompt(self, broken_json, error):
|
||||
prompt = "Fix a broken json string.\n\n" + \
|
||||
"(1) The broken json string need to fix is: \n\n" + \
|
||||
"```" + "\n" + \
|
||||
broken_json + "\n" + \
|
||||
"```" + "\n\n" + \
|
||||
"(2) The error message is: \n\n" + \
|
||||
error + "\n\n" + \
|
||||
"Now, fix this json string. \n\n"
|
||||
return prompt
|
||||
|
||||
def generate_output_auto_repair(self, response, gpt_gen_fn):
|
||||
"""
|
||||
response: string containing canidate json
|
||||
gpt_gen_fn: gpt_gen_fn(inputs, sys_prompt)
|
||||
"""
|
||||
try:
|
||||
result = self.generate_output(response)
|
||||
except Exception as e:
|
||||
try:
|
||||
logging.info(f'Repairing json:{response}')
|
||||
repair_prompt = self.generate_repair_prompt(broken_json = response, error=repr(e))
|
||||
result = self.generate_output(gpt_gen_fn(repair_prompt, self.format_instructions))
|
||||
logging.info('Repaire json success.')
|
||||
except Exception as e:
|
||||
# 没辙了,放弃治疗
|
||||
logging.info('Repaire json fail.')
|
||||
raise JsonStringError('Cannot repair json.', str(e))
|
||||
return result
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
from toolbox import update_ui, update_ui_lastest_msg # 刷新Gradio前端界面
|
||||
from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone
|
||||
from toolbox import update_ui, update_ui_lastest_msg, get_log_folder
|
||||
from toolbox import get_conf, objdump, objload, promote_file_to_downloadzone
|
||||
from .latex_toolbox import PRESERVE, TRANSFORM
|
||||
from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
||||
from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process
|
||||
from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout
|
||||
from .latex_toolbox import find_title_and_abs
|
||||
|
||||
import os, shutil
|
||||
import re
|
||||
@@ -90,7 +91,18 @@ class LatexPaperSplit():
|
||||
"项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
|
||||
# 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
|
||||
self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
|
||||
self.title = "unknown"
|
||||
self.abstract = "unknown"
|
||||
|
||||
def read_title_and_abstract(self, txt):
|
||||
try:
|
||||
title, abstract = find_title_and_abs(txt)
|
||||
if title is not None:
|
||||
self.title = title.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '')
|
||||
if abstract is not None:
|
||||
self.abstract = abstract.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '')
|
||||
except:
|
||||
pass
|
||||
|
||||
def merge_result(self, arr, mode, msg, buggy_lines=[], buggy_line_surgery_n_lines=10):
|
||||
"""
|
||||
@@ -163,9 +175,8 @@ class LatexPaperFileGroup():
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llm.bridge_all import model_info
|
||||
from request_llms.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
@@ -180,13 +191,12 @@ class LatexPaperFileGroup():
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from ..crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
||||
segments = breakdown_text_to_satisfy_token_limit(file_content, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||
print('Segmentation: done')
|
||||
|
||||
def merge_result(self):
|
||||
self.file_result = ["" for _ in range(len(self.file_paths))]
|
||||
@@ -234,8 +244,8 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin
|
||||
chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。'))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
lps = LatexPaperSplit()
|
||||
lps.read_title_and_abstract(merged_content)
|
||||
res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数
|
||||
|
||||
# <-------- 拆分过长的latex片段 ---------->
|
||||
pfg = LatexPaperFileGroup()
|
||||
for index, r in enumerate(res):
|
||||
@@ -256,12 +266,19 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin
|
||||
|
||||
else:
|
||||
# <-------- gpt 多线程请求 ---------->
|
||||
history_array = [[""] for _ in range(n_split)]
|
||||
# LATEX_EXPERIMENTAL, = get_conf('LATEX_EXPERIMENTAL')
|
||||
# if LATEX_EXPERIMENTAL:
|
||||
# paper_meta = f"The paper you processing is `{lps.title}`, a part of the abstraction is `{lps.abstract}`"
|
||||
# paper_meta_max_len = 888
|
||||
# history_array = [[ paper_meta[:paper_meta_max_len] + '...', "Understand, what should I do?"] for _ in range(n_split)]
|
||||
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=inputs_array,
|
||||
inputs_show_user_array=inputs_show_user_array,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[""] for _ in range(n_split)],
|
||||
history_array=history_array,
|
||||
sys_prompt_array=sys_prompt_array,
|
||||
# max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待
|
||||
scroller_max_len = 40
|
||||
@@ -363,7 +380,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
||||
if mode!='translate_zh':
|
||||
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
||||
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex')
|
||||
ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex', os.getcwd())
|
||||
|
||||
yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面
|
||||
ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder)
|
||||
@@ -385,7 +402,7 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
||||
result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path
|
||||
promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||
if modified_pdf_success:
|
||||
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 即将退出 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 正在尝试生成对比PDF, 请稍候 ...', chatbot, history) # 刷新Gradio前端界面
|
||||
result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path
|
||||
origin_pdf = pj(work_folder_original, f'{main_file_original}.pdf') # get pdf path
|
||||
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||
@@ -397,8 +414,11 @@ def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_f
|
||||
from .latex_toolbox import merge_pdfs
|
||||
concat_pdf = pj(work_folder_modified, f'comparison.pdf')
|
||||
merge_pdfs(origin_pdf, result_pdf, concat_pdf)
|
||||
if os.path.exists(pj(work_folder, '..', 'translation')):
|
||||
shutil.copyfile(concat_pdf, pj(work_folder, '..', 'translation', 'comparison.pdf'))
|
||||
promote_file_to_downloadzone(concat_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
return True # 成功啦
|
||||
else:
|
||||
@@ -423,7 +443,7 @@ def write_html(sp_file_contents, sp_file_result, chatbot, project_folder):
|
||||
# write html
|
||||
try:
|
||||
import shutil
|
||||
from ..crazy_utils import construct_html
|
||||
from crazy_functions.pdf_fns.report_gen_html import construct_html
|
||||
from toolbox import gen_time_str
|
||||
ch = construct_html()
|
||||
orig = ""
|
||||
@@ -439,9 +459,9 @@ def write_html(sp_file_contents, sp_file_result, chatbot, project_folder):
|
||||
trans = k
|
||||
ch.add_row(a=orig, b=trans)
|
||||
create_report_file_name = f"{gen_time_str()}.trans.html"
|
||||
ch.save_file(create_report_file_name)
|
||||
shutil.copyfile(pj('./gpt_log/', create_report_file_name), pj(project_folder, create_report_file_name))
|
||||
promote_file_to_downloadzone(file=f'./gpt_log/{create_report_file_name}', chatbot=chatbot)
|
||||
res = ch.save_file(create_report_file_name)
|
||||
shutil.copyfile(res, pj(project_folder, create_report_file_name))
|
||||
promote_file_to_downloadzone(file=res, chatbot=chatbot)
|
||||
except:
|
||||
from toolbox import trimmed_format_exc
|
||||
print('writing html result failed:', trimmed_format_exc())
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
import os, shutil
|
||||
import re
|
||||
import numpy as np
|
||||
|
||||
PRESERVE = 0
|
||||
TRANSFORM = 1
|
||||
|
||||
pj = os.path.join
|
||||
|
||||
class LinkedListNode():
|
||||
|
||||
class LinkedListNode:
|
||||
"""
|
||||
Linked List Node
|
||||
"""
|
||||
|
||||
def __init__(self, string, preserve=True) -> None:
|
||||
self.string = string
|
||||
self.preserve = preserve
|
||||
@@ -18,19 +21,22 @@ class LinkedListNode():
|
||||
# self.begin_line = 0
|
||||
# self.begin_char = 0
|
||||
|
||||
|
||||
def convert_to_linklist(text, mask):
|
||||
root = LinkedListNode("", preserve=True)
|
||||
current_node = root
|
||||
for c, m, i in zip(text, mask, range(len(text))):
|
||||
if (m==PRESERVE and current_node.preserve) \
|
||||
or (m==TRANSFORM and not current_node.preserve):
|
||||
if (m == PRESERVE and current_node.preserve) or (
|
||||
m == TRANSFORM and not current_node.preserve
|
||||
):
|
||||
# add
|
||||
current_node.string += c
|
||||
else:
|
||||
current_node.next = LinkedListNode(c, preserve=(m==PRESERVE))
|
||||
current_node.next = LinkedListNode(c, preserve=(m == PRESERVE))
|
||||
current_node = current_node.next
|
||||
return root
|
||||
|
||||
|
||||
def post_process(root):
|
||||
# 修复括号
|
||||
node = root
|
||||
@@ -38,21 +44,24 @@ def post_process(root):
|
||||
string = node.string
|
||||
if node.preserve:
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
continue
|
||||
|
||||
def break_check(string):
|
||||
str_stack = [""] # (lv, index)
|
||||
for i, c in enumerate(string):
|
||||
if c == '{':
|
||||
str_stack.append('{')
|
||||
elif c == '}':
|
||||
if c == "{":
|
||||
str_stack.append("{")
|
||||
elif c == "}":
|
||||
if len(str_stack) == 1:
|
||||
print('stack fix')
|
||||
print("stack fix")
|
||||
return i
|
||||
str_stack.pop(-1)
|
||||
else:
|
||||
str_stack[-1] += c
|
||||
return -1
|
||||
|
||||
bp = break_check(string)
|
||||
|
||||
if bp == -1:
|
||||
@@ -69,51 +78,66 @@ def post_process(root):
|
||||
node.next = q
|
||||
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
|
||||
# 屏蔽空行和太短的句子
|
||||
node = root
|
||||
while True:
|
||||
if len(node.string.strip('\n').strip(''))==0: node.preserve = True
|
||||
if len(node.string.strip('\n').strip(''))<42: node.preserve = True
|
||||
if len(node.string.strip("\n").strip("")) == 0:
|
||||
node.preserve = True
|
||||
if len(node.string.strip("\n").strip("")) < 42:
|
||||
node.preserve = True
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
node = root
|
||||
while True:
|
||||
if node.next and node.preserve and node.next.preserve:
|
||||
node.string += node.next.string
|
||||
node.next = node.next.next
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
|
||||
# 将前后断行符脱离
|
||||
node = root
|
||||
prev_node = None
|
||||
while True:
|
||||
if not node.preserve:
|
||||
lstriped_ = node.string.lstrip().lstrip('\n')
|
||||
if (prev_node is not None) and (prev_node.preserve) and (len(lstriped_)!=len(node.string)):
|
||||
prev_node.string += node.string[:-len(lstriped_)]
|
||||
lstriped_ = node.string.lstrip().lstrip("\n")
|
||||
if (
|
||||
(prev_node is not None)
|
||||
and (prev_node.preserve)
|
||||
and (len(lstriped_) != len(node.string))
|
||||
):
|
||||
prev_node.string += node.string[: -len(lstriped_)]
|
||||
node.string = lstriped_
|
||||
rstriped_ = node.string.rstrip().rstrip('\n')
|
||||
if (node.next is not None) and (node.next.preserve) and (len(rstriped_)!=len(node.string)):
|
||||
node.next.string = node.string[len(rstriped_):] + node.next.string
|
||||
rstriped_ = node.string.rstrip().rstrip("\n")
|
||||
if (
|
||||
(node.next is not None)
|
||||
and (node.next.preserve)
|
||||
and (len(rstriped_) != len(node.string))
|
||||
):
|
||||
node.next.string = node.string[len(rstriped_) :] + node.next.string
|
||||
node.string = rstriped_
|
||||
# =====
|
||||
# =-=-=
|
||||
prev_node = node
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
|
||||
# 标注节点的行数范围
|
||||
node = root
|
||||
n_line = 0
|
||||
expansion = 2
|
||||
while True:
|
||||
n_l = node.string.count('\n')
|
||||
node.range = [n_line-expansion, n_line+n_l+expansion] # 失败时,扭转的范围
|
||||
n_line = n_line+n_l
|
||||
n_l = node.string.count("\n")
|
||||
node.range = [n_line - expansion, n_line + n_l + expansion] # 失败时,扭转的范围
|
||||
n_line = n_line + n_l
|
||||
node = node.next
|
||||
if node is None: break
|
||||
if node is None:
|
||||
break
|
||||
return root
|
||||
|
||||
|
||||
@@ -131,12 +155,14 @@ def set_forbidden_text(text, mask, pattern, flags=0):
|
||||
you can mask out (mask = PRESERVE so that text become untouchable for GPT)
|
||||
everything between "\begin{equation}" and "\end{equation}"
|
||||
"""
|
||||
if isinstance(pattern, list): pattern = '|'.join(pattern)
|
||||
if isinstance(pattern, list):
|
||||
pattern = "|".join(pattern)
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
for res in pattern_compile.finditer(text):
|
||||
mask[res.span()[0]:res.span()[1]] = PRESERVE
|
||||
mask[res.span()[0] : res.span()[1]] = PRESERVE
|
||||
return text, mask
|
||||
|
||||
|
||||
def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||
"""
|
||||
Move area out of preserve area (make text editable for GPT)
|
||||
@@ -144,17 +170,19 @@ def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||
e.g.
|
||||
\begin{abstract} blablablablablabla. \end{abstract}
|
||||
"""
|
||||
if isinstance(pattern, list): pattern = '|'.join(pattern)
|
||||
if isinstance(pattern, list):
|
||||
pattern = "|".join(pattern)
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
for res in pattern_compile.finditer(text):
|
||||
if not forbid_wrapper:
|
||||
mask[res.span()[0]:res.span()[1]] = TRANSFORM
|
||||
mask[res.span()[0] : res.span()[1]] = TRANSFORM
|
||||
else:
|
||||
mask[res.regs[0][0]: res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
|
||||
mask[res.regs[1][0]: res.regs[1][1]] = TRANSFORM # abstract
|
||||
mask[res.regs[1][1]: res.regs[0][1]] = PRESERVE # abstract
|
||||
mask[res.regs[0][0] : res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
|
||||
mask[res.regs[1][0] : res.regs[1][1]] = TRANSFORM # abstract
|
||||
mask[res.regs[1][1] : res.regs[0][1]] = PRESERVE # abstract
|
||||
return text, mask
|
||||
|
||||
|
||||
def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
||||
"""
|
||||
Add a preserve text area in this paper (text become untouchable for GPT).
|
||||
@@ -166,16 +194,22 @@ def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
|
||||
for res in pattern_compile.finditer(text):
|
||||
brace_level = -1
|
||||
p = begin = end = res.regs[0][0]
|
||||
for _ in range(1024*16):
|
||||
if text[p] == '}' and brace_level == 0: break
|
||||
elif text[p] == '}': brace_level -= 1
|
||||
elif text[p] == '{': brace_level += 1
|
||||
for _ in range(1024 * 16):
|
||||
if text[p] == "}" and brace_level == 0:
|
||||
break
|
||||
elif text[p] == "}":
|
||||
brace_level -= 1
|
||||
elif text[p] == "{":
|
||||
brace_level += 1
|
||||
p += 1
|
||||
end = p+1
|
||||
end = p + 1
|
||||
mask[begin:end] = PRESERVE
|
||||
return text, mask
|
||||
|
||||
def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wrapper=True):
|
||||
|
||||
def reverse_forbidden_text_careful_brace(
|
||||
text, mask, pattern, flags=0, forbid_wrapper=True
|
||||
):
|
||||
"""
|
||||
Move area out of preserve area (make text editable for GPT)
|
||||
count the number of the braces so as to catch compelete text area.
|
||||
@@ -186,39 +220,57 @@ def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wr
|
||||
for res in pattern_compile.finditer(text):
|
||||
brace_level = 0
|
||||
p = begin = end = res.regs[1][0]
|
||||
for _ in range(1024*16):
|
||||
if text[p] == '}' and brace_level == 0: break
|
||||
elif text[p] == '}': brace_level -= 1
|
||||
elif text[p] == '{': brace_level += 1
|
||||
for _ in range(1024 * 16):
|
||||
if text[p] == "}" and brace_level == 0:
|
||||
break
|
||||
elif text[p] == "}":
|
||||
brace_level -= 1
|
||||
elif text[p] == "{":
|
||||
brace_level += 1
|
||||
p += 1
|
||||
end = p
|
||||
mask[begin:end] = TRANSFORM
|
||||
if forbid_wrapper:
|
||||
mask[res.regs[0][0]:begin] = PRESERVE
|
||||
mask[end:res.regs[0][1]] = PRESERVE
|
||||
mask[res.regs[0][0] : begin] = PRESERVE
|
||||
mask[end : res.regs[0][1]] = PRESERVE
|
||||
return text, mask
|
||||
|
||||
|
||||
def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42):
|
||||
"""
|
||||
Find all \begin{} ... \end{} text block that with less than limit_n_lines lines.
|
||||
Add it to preserve area
|
||||
"""
|
||||
pattern_compile = re.compile(pattern, flags)
|
||||
|
||||
def search_with_line_limit(text, mask):
|
||||
for res in pattern_compile.finditer(text):
|
||||
cmd = res.group(1) # begin{what}
|
||||
this = res.group(2) # content between begin and end
|
||||
this_mask = mask[res.regs[2][0]:res.regs[2][1]]
|
||||
white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof',
|
||||
'em', 'emph', 'textit', 'textbf', 'itemize', 'enumerate']
|
||||
if (cmd in white_list) or this.count('\n') >= limit_n_lines: # use a magical number 42
|
||||
this_mask = mask[res.regs[2][0] : res.regs[2][1]]
|
||||
white_list = [
|
||||
"document",
|
||||
"abstract",
|
||||
"lemma",
|
||||
"definition",
|
||||
"sproof",
|
||||
"em",
|
||||
"emph",
|
||||
"textit",
|
||||
"textbf",
|
||||
"itemize",
|
||||
"enumerate",
|
||||
]
|
||||
if (cmd in white_list) or this.count(
|
||||
"\n"
|
||||
) >= limit_n_lines: # use a magical number 42
|
||||
this, this_mask = search_with_line_limit(this, this_mask)
|
||||
mask[res.regs[2][0]:res.regs[2][1]] = this_mask
|
||||
mask[res.regs[2][0] : res.regs[2][1]] = this_mask
|
||||
else:
|
||||
mask[res.regs[0][0]:res.regs[0][1]] = PRESERVE
|
||||
mask[res.regs[0][0] : res.regs[0][1]] = PRESERVE
|
||||
return text, mask
|
||||
return search_with_line_limit(text, mask)
|
||||
|
||||
return search_with_line_limit(text, mask)
|
||||
|
||||
|
||||
"""
|
||||
@@ -227,6 +279,7 @@ Latex Merge File
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
"""
|
||||
|
||||
|
||||
def find_main_tex_file(file_manifest, mode):
|
||||
"""
|
||||
在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
|
||||
@@ -234,28 +287,38 @@ def find_main_tex_file(file_manifest, mode):
|
||||
"""
|
||||
canidates = []
|
||||
for texf in file_manifest:
|
||||
if os.path.basename(texf).startswith('merge'):
|
||||
if os.path.basename(texf).startswith("merge"):
|
||||
continue
|
||||
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
|
||||
with open(texf, "r", encoding="utf8", errors="ignore") as f:
|
||||
file_content = f.read()
|
||||
if r'\documentclass' in file_content:
|
||||
if r"\documentclass" in file_content:
|
||||
canidates.append(texf)
|
||||
else:
|
||||
continue
|
||||
|
||||
if len(canidates) == 0:
|
||||
raise RuntimeError('无法找到一个主Tex文件(包含documentclass关键字)')
|
||||
raise RuntimeError("无法找到一个主Tex文件(包含documentclass关键字)")
|
||||
elif len(canidates) == 1:
|
||||
return canidates[0]
|
||||
else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
|
||||
canidates_score = []
|
||||
# 给出一些判定模板文档的词作为扣分项
|
||||
unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers']
|
||||
expected_words = ['\input', '\ref', '\cite']
|
||||
unexpected_words = [
|
||||
"\\LaTeX",
|
||||
"manuscript",
|
||||
"Guidelines",
|
||||
"font",
|
||||
"citations",
|
||||
"rejected",
|
||||
"blind review",
|
||||
"reviewers",
|
||||
]
|
||||
expected_words = ["\\input", "\\ref", "\\cite"]
|
||||
for texf in canidates:
|
||||
canidates_score.append(0)
|
||||
with open(texf, 'r', encoding='utf8', errors='ignore') as f:
|
||||
with open(texf, "r", encoding="utf8", errors="ignore") as f:
|
||||
file_content = f.read()
|
||||
file_content = rm_comments(file_content)
|
||||
for uw in unexpected_words:
|
||||
if uw in file_content:
|
||||
canidates_score[-1] -= 1
|
||||
@@ -265,6 +328,7 @@ def find_main_tex_file(file_manifest, mode):
|
||||
select = np.argmax(canidates_score) # 取评分最高者返回
|
||||
return canidates[select]
|
||||
|
||||
|
||||
def rm_comments(main_file):
|
||||
new_file_remove_comment_lines = []
|
||||
for l in main_file.splitlines():
|
||||
@@ -273,23 +337,39 @@ def rm_comments(main_file):
|
||||
pass
|
||||
else:
|
||||
new_file_remove_comment_lines.append(l)
|
||||
main_file = '\n'.join(new_file_remove_comment_lines)
|
||||
main_file = "\n".join(new_file_remove_comment_lines)
|
||||
# main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令
|
||||
main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
|
||||
main_file = re.sub(r"(?<!\\)%.*", "", main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
|
||||
return main_file
|
||||
|
||||
|
||||
def find_tex_file_ignore_case(fp):
|
||||
dir_name = os.path.dirname(fp)
|
||||
base_name = os.path.basename(fp)
|
||||
if not base_name.endswith('.tex'): base_name+='.tex'
|
||||
if os.path.exists(pj(dir_name, base_name)): return pj(dir_name, base_name)
|
||||
# go case in-sensitive
|
||||
# 如果输入的文件路径是正确的
|
||||
if os.path.isfile(pj(dir_name, base_name)):
|
||||
return pj(dir_name, base_name)
|
||||
# 如果不正确,试着加上.tex后缀试试
|
||||
if not base_name.endswith(".tex"):
|
||||
base_name += ".tex"
|
||||
if os.path.isfile(pj(dir_name, base_name)):
|
||||
return pj(dir_name, base_name)
|
||||
# 如果还找不到,解除大小写限制,再试一次
|
||||
import glob
|
||||
for f in glob.glob(dir_name+'/*.tex'):
|
||||
|
||||
for f in glob.glob(dir_name + "/*.tex"):
|
||||
base_name_s = os.path.basename(fp)
|
||||
if base_name_s.lower() == base_name.lower(): return f
|
||||
base_name_f = os.path.basename(f)
|
||||
if base_name_s.lower() == base_name_f.lower():
|
||||
return f
|
||||
# 试着加上.tex后缀试试
|
||||
if not base_name_s.endswith(".tex"):
|
||||
base_name_s += ".tex"
|
||||
if base_name_s.lower() == base_name_f.lower():
|
||||
return f
|
||||
return None
|
||||
|
||||
|
||||
def merge_tex_files_(project_foler, main_file, mode):
|
||||
"""
|
||||
Merge Tex project recrusively
|
||||
@@ -298,15 +378,53 @@ def merge_tex_files_(project_foler, main_file, mode):
|
||||
for s in reversed([q for q in re.finditer(r"\\input\{(.*?)\}", main_file, re.M)]):
|
||||
f = s.group(1)
|
||||
fp = os.path.join(project_foler, f)
|
||||
fp = find_tex_file_ignore_case(fp)
|
||||
if fp:
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
|
||||
fp_ = find_tex_file_ignore_case(fp)
|
||||
if fp_:
|
||||
try:
|
||||
with open(fp_, "r", encoding="utf-8", errors="replace") as fx:
|
||||
c = fx.read()
|
||||
except:
|
||||
c = f"\n\nWarning from GPT-Academic: LaTex source file is missing!\n\n"
|
||||
else:
|
||||
raise RuntimeError(f'找不到{fp},Tex源文件缺失!')
|
||||
raise RuntimeError(f"找不到{fp},Tex源文件缺失!")
|
||||
c = merge_tex_files_(project_foler, c, mode)
|
||||
main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
|
||||
main_file = main_file[: s.span()[0]] + c + main_file[s.span()[1] :]
|
||||
return main_file
|
||||
|
||||
|
||||
def find_title_and_abs(main_file):
|
||||
def extract_abstract_1(text):
|
||||
pattern = r"\\abstract\{(.*?)\}"
|
||||
match = re.search(pattern, text, re.DOTALL)
|
||||
if match:
|
||||
return match.group(1)
|
||||
else:
|
||||
return None
|
||||
|
||||
def extract_abstract_2(text):
|
||||
pattern = r"\\begin\{abstract\}(.*?)\\end\{abstract\}"
|
||||
match = re.search(pattern, text, re.DOTALL)
|
||||
if match:
|
||||
return match.group(1)
|
||||
else:
|
||||
return None
|
||||
|
||||
def extract_title(string):
|
||||
pattern = r"\\title\{(.*?)\}"
|
||||
match = re.search(pattern, string, re.DOTALL)
|
||||
|
||||
if match:
|
||||
return match.group(1)
|
||||
else:
|
||||
return None
|
||||
|
||||
abstract = extract_abstract_1(main_file)
|
||||
if abstract is None:
|
||||
abstract = extract_abstract_2(main_file)
|
||||
title = extract_title(main_file)
|
||||
return title, abstract
|
||||
|
||||
|
||||
def merge_tex_files(project_foler, main_file, mode):
|
||||
"""
|
||||
Merge Tex project recrusively
|
||||
@@ -316,33 +434,91 @@ def merge_tex_files(project_foler, main_file, mode):
|
||||
main_file = merge_tex_files_(project_foler, main_file, mode)
|
||||
main_file = rm_comments(main_file)
|
||||
|
||||
if mode == 'translate_zh':
|
||||
if mode == "translate_zh":
|
||||
# find paper documentclass
|
||||
pattern = re.compile(r'\\documentclass.*\n')
|
||||
pattern = re.compile(r"\\documentclass.*\n")
|
||||
match = pattern.search(main_file)
|
||||
assert match is not None, "Cannot find documentclass statement!"
|
||||
position = match.end()
|
||||
add_ctex = '\\usepackage{ctex}\n'
|
||||
add_url = '\\usepackage{url}\n' if '{url}' not in main_file else ''
|
||||
add_ctex = "\\usepackage{ctex}\n"
|
||||
add_url = "\\usepackage{url}\n" if "{url}" not in main_file else ""
|
||||
main_file = main_file[:position] + add_ctex + add_url + main_file[position:]
|
||||
# fontset=windows
|
||||
import platform
|
||||
main_file = re.sub(r"\\documentclass\[(.*?)\]{(.*?)}", r"\\documentclass[\1,fontset=windows,UTF8]{\2}",main_file)
|
||||
main_file = re.sub(r"\\documentclass{(.*?)}", r"\\documentclass[fontset=windows,UTF8]{\1}",main_file)
|
||||
|
||||
main_file = re.sub(
|
||||
r"\\documentclass\[(.*?)\]{(.*?)}",
|
||||
r"\\documentclass[\1,fontset=windows,UTF8]{\2}",
|
||||
main_file,
|
||||
)
|
||||
main_file = re.sub(
|
||||
r"\\documentclass{(.*?)}",
|
||||
r"\\documentclass[fontset=windows,UTF8]{\1}",
|
||||
main_file,
|
||||
)
|
||||
# find paper abstract
|
||||
pattern_opt1 = re.compile(r'\\begin\{abstract\}.*\n')
|
||||
pattern_opt1 = re.compile(r"\\begin\{abstract\}.*\n")
|
||||
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
|
||||
match_opt1 = pattern_opt1.search(main_file)
|
||||
match_opt2 = pattern_opt2.search(main_file)
|
||||
assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
|
||||
if (match_opt1 is None) and (match_opt2 is None):
|
||||
# "Cannot find paper abstract section!"
|
||||
main_file = insert_abstract(main_file)
|
||||
match_opt1 = pattern_opt1.search(main_file)
|
||||
match_opt2 = pattern_opt2.search(main_file)
|
||||
assert (match_opt1 is not None) or (
|
||||
match_opt2 is not None
|
||||
), "Cannot find paper abstract section!"
|
||||
return main_file
|
||||
|
||||
|
||||
insert_missing_abs_str = r"""
|
||||
\begin{abstract}
|
||||
The GPT-Academic program cannot find abstract section in this paper.
|
||||
\end{abstract}
|
||||
"""
|
||||
|
||||
|
||||
def insert_abstract(tex_content):
|
||||
if "\\maketitle" in tex_content:
|
||||
# find the position of "\maketitle"
|
||||
find_index = tex_content.index("\\maketitle")
|
||||
# find the nearest ending line
|
||||
end_line_index = tex_content.find("\n", find_index)
|
||||
# insert "abs_str" on the next line
|
||||
modified_tex = (
|
||||
tex_content[: end_line_index + 1]
|
||||
+ "\n\n"
|
||||
+ insert_missing_abs_str
|
||||
+ "\n\n"
|
||||
+ tex_content[end_line_index + 1 :]
|
||||
)
|
||||
return modified_tex
|
||||
elif r"\begin{document}" in tex_content:
|
||||
# find the position of "\maketitle"
|
||||
find_index = tex_content.index(r"\begin{document}")
|
||||
# find the nearest ending line
|
||||
end_line_index = tex_content.find("\n", find_index)
|
||||
# insert "abs_str" on the next line
|
||||
modified_tex = (
|
||||
tex_content[: end_line_index + 1]
|
||||
+ "\n\n"
|
||||
+ insert_missing_abs_str
|
||||
+ "\n\n"
|
||||
+ tex_content[end_line_index + 1 :]
|
||||
)
|
||||
return modified_tex
|
||||
else:
|
||||
return tex_content
|
||||
|
||||
|
||||
"""
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
Post process
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
"""
|
||||
|
||||
|
||||
def mod_inbraket(match):
|
||||
"""
|
||||
为啥chatgpt会把cite里面的逗号换成中文逗号呀
|
||||
@@ -351,11 +527,12 @@ def mod_inbraket(match):
|
||||
cmd = match.group(1)
|
||||
str_to_modify = match.group(2)
|
||||
# modify the matched string
|
||||
str_to_modify = str_to_modify.replace(':', ':') # 前面是中文冒号,后面是英文冒号
|
||||
str_to_modify = str_to_modify.replace(',', ',') # 前面是中文逗号,后面是英文逗号
|
||||
str_to_modify = str_to_modify.replace(":", ":") # 前面是中文冒号,后面是英文冒号
|
||||
str_to_modify = str_to_modify.replace(",", ",") # 前面是中文逗号,后面是英文逗号
|
||||
# str_to_modify = 'BOOM'
|
||||
return "\\" + cmd + "{" + str_to_modify + "}"
|
||||
|
||||
|
||||
def fix_content(final_tex, node_string):
|
||||
"""
|
||||
Fix common GPT errors to increase success rate
|
||||
@@ -367,9 +544,9 @@ def fix_content(final_tex, node_string):
|
||||
|
||||
if "Traceback" in final_tex and "[Local Message]" in final_tex:
|
||||
final_tex = node_string # 出问题了,还原原文
|
||||
if node_string.count('\\begin') != final_tex.count('\\begin'):
|
||||
if node_string.count("\\begin") != final_tex.count("\\begin"):
|
||||
final_tex = node_string # 出问题了,还原原文
|
||||
if node_string.count('\_') > 0 and node_string.count('\_') > final_tex.count('\_'):
|
||||
if node_string.count("\_") > 0 and node_string.count("\_") > final_tex.count("\_"):
|
||||
# walk and replace any _ without \
|
||||
final_tex = re.sub(r"(?<!\\)_", "\\_", final_tex)
|
||||
|
||||
@@ -377,24 +554,32 @@ def fix_content(final_tex, node_string):
|
||||
# this function count the number of { and }
|
||||
brace_level = 0
|
||||
for c in string:
|
||||
if c == "{": brace_level += 1
|
||||
elif c == "}": brace_level -= 1
|
||||
if c == "{":
|
||||
brace_level += 1
|
||||
elif c == "}":
|
||||
brace_level -= 1
|
||||
return brace_level
|
||||
|
||||
def join_most(tex_t, tex_o):
|
||||
# this function join translated string and original string when something goes wrong
|
||||
p_t = 0
|
||||
p_o = 0
|
||||
|
||||
def find_next(string, chars, begin):
|
||||
p = begin
|
||||
while p < len(string):
|
||||
if string[p] in chars: return p, string[p]
|
||||
if string[p] in chars:
|
||||
return p, string[p]
|
||||
p += 1
|
||||
return None, None
|
||||
|
||||
while True:
|
||||
res1, char = find_next(tex_o, ['{','}'], p_o)
|
||||
if res1 is None: break
|
||||
res1, char = find_next(tex_o, ["{", "}"], p_o)
|
||||
if res1 is None:
|
||||
break
|
||||
res2, char = find_next(tex_t, [char], p_t)
|
||||
if res2 is None: break
|
||||
if res2 is None:
|
||||
break
|
||||
p_o = res1 + 1
|
||||
p_t = res2 + 1
|
||||
return tex_t[:p_t] + tex_o[p_o:]
|
||||
@@ -404,9 +589,13 @@ def fix_content(final_tex, node_string):
|
||||
final_tex = join_most(final_tex, node_string)
|
||||
return final_tex
|
||||
|
||||
|
||||
def compile_latex_with_timeout(command, cwd, timeout=60):
|
||||
import subprocess
|
||||
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
|
||||
|
||||
process = subprocess.Popen(
|
||||
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
|
||||
)
|
||||
try:
|
||||
stdout, stderr = process.communicate(timeout=timeout)
|
||||
except subprocess.TimeoutExpired:
|
||||
@@ -417,15 +606,51 @@ def compile_latex_with_timeout(command, cwd, timeout=60):
|
||||
return True
|
||||
|
||||
|
||||
def run_in_subprocess_wrapper_func(func, args, kwargs, return_dict, exception_dict):
|
||||
import sys
|
||||
|
||||
def merge_pdfs(pdf1_path, pdf2_path, output_path):
|
||||
import PyPDF2
|
||||
Percent = 0.8
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
return_dict["result"] = result
|
||||
except Exception as e:
|
||||
exc_info = sys.exc_info()
|
||||
exception_dict["exception"] = exc_info
|
||||
|
||||
|
||||
def run_in_subprocess(func):
|
||||
import multiprocessing
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
return_dict = multiprocessing.Manager().dict()
|
||||
exception_dict = multiprocessing.Manager().dict()
|
||||
process = multiprocessing.Process(
|
||||
target=run_in_subprocess_wrapper_func,
|
||||
args=(func, args, kwargs, return_dict, exception_dict),
|
||||
)
|
||||
process.start()
|
||||
process.join()
|
||||
process.close()
|
||||
if "exception" in exception_dict:
|
||||
# ooops, the subprocess ran into an exception
|
||||
exc_info = exception_dict["exception"]
|
||||
raise exc_info[1].with_traceback(exc_info[2])
|
||||
if "result" in return_dict.keys():
|
||||
# If the subprocess ran successfully, return the result
|
||||
return return_dict["result"]
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def _merge_pdfs(pdf1_path, pdf2_path, output_path):
|
||||
import PyPDF2 # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
|
||||
|
||||
Percent = 0.95
|
||||
# raise RuntimeError('PyPDF2 has a serious memory leak problem, please use other tools to merge PDF files.')
|
||||
# Open the first PDF file
|
||||
with open(pdf1_path, 'rb') as pdf1_file:
|
||||
with open(pdf1_path, "rb") as pdf1_file:
|
||||
pdf1_reader = PyPDF2.PdfFileReader(pdf1_file)
|
||||
# Open the second PDF file
|
||||
with open(pdf2_path, 'rb') as pdf2_file:
|
||||
with open(pdf2_path, "rb") as pdf2_file:
|
||||
pdf2_reader = PyPDF2.PdfFileReader(pdf2_file)
|
||||
# Create a new PDF file to store the merged pages
|
||||
output_writer = PyPDF2.PdfFileWriter()
|
||||
@@ -445,12 +670,25 @@ def merge_pdfs(pdf1_path, pdf2_path, output_path):
|
||||
page2 = PyPDF2.PageObject.createBlankPage(pdf1_reader)
|
||||
# Create a new empty page with double width
|
||||
new_page = PyPDF2.PageObject.createBlankPage(
|
||||
width = int(int(page1.mediaBox.getWidth()) + int(page2.mediaBox.getWidth()) * Percent),
|
||||
height = max(page1.mediaBox.getHeight(), page2.mediaBox.getHeight())
|
||||
width=int(
|
||||
int(page1.mediaBox.getWidth())
|
||||
+ int(page2.mediaBox.getWidth()) * Percent
|
||||
),
|
||||
height=max(page1.mediaBox.getHeight(), page2.mediaBox.getHeight()),
|
||||
)
|
||||
new_page.mergeTranslatedPage(page1, 0, 0)
|
||||
new_page.mergeTranslatedPage(page2, int(int(page1.mediaBox.getWidth())-int(page2.mediaBox.getWidth())* (1-Percent)), 0)
|
||||
new_page.mergeTranslatedPage(
|
||||
page2,
|
||||
int(
|
||||
int(page1.mediaBox.getWidth())
|
||||
- int(page2.mediaBox.getWidth()) * (1 - Percent)
|
||||
),
|
||||
0,
|
||||
)
|
||||
output_writer.addPage(new_page)
|
||||
# Save the merged PDF file
|
||||
with open(output_path, 'wb') as output_file:
|
||||
with open(output_path, "wb") as output_file:
|
||||
output_writer.write(output_file)
|
||||
|
||||
|
||||
merge_pdfs = run_in_subprocess(_merge_pdfs) # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
|
||||
|
||||
@@ -1,4 +1,106 @@
|
||||
import time, threading, json
|
||||
import time, logging, json, sys, struct
|
||||
import numpy as np
|
||||
from scipy.io.wavfile import WAVE_FORMAT
|
||||
|
||||
def write_numpy_to_wave(filename, rate, data, add_header=False):
|
||||
"""
|
||||
Write a NumPy array as a WAV file.
|
||||
"""
|
||||
def _array_tofile(fid, data):
|
||||
# ravel gives a c-contiguous buffer
|
||||
fid.write(data.ravel().view('b').data)
|
||||
|
||||
if hasattr(filename, 'write'):
|
||||
fid = filename
|
||||
else:
|
||||
fid = open(filename, 'wb')
|
||||
|
||||
fs = rate
|
||||
|
||||
try:
|
||||
dkind = data.dtype.kind
|
||||
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
|
||||
data.dtype.itemsize == 1)):
|
||||
raise ValueError("Unsupported data type '%s'" % data.dtype)
|
||||
|
||||
header_data = b''
|
||||
|
||||
header_data += b'RIFF'
|
||||
header_data += b'\x00\x00\x00\x00'
|
||||
header_data += b'WAVE'
|
||||
|
||||
# fmt chunk
|
||||
header_data += b'fmt '
|
||||
if dkind == 'f':
|
||||
format_tag = WAVE_FORMAT.IEEE_FLOAT
|
||||
else:
|
||||
format_tag = WAVE_FORMAT.PCM
|
||||
if data.ndim == 1:
|
||||
channels = 1
|
||||
else:
|
||||
channels = data.shape[1]
|
||||
bit_depth = data.dtype.itemsize * 8
|
||||
bytes_per_second = fs*(bit_depth // 8)*channels
|
||||
block_align = channels * (bit_depth // 8)
|
||||
|
||||
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
|
||||
bytes_per_second, block_align, bit_depth)
|
||||
if not (dkind == 'i' or dkind == 'u'):
|
||||
# add cbSize field for non-PCM files
|
||||
fmt_chunk_data += b'\x00\x00'
|
||||
|
||||
header_data += struct.pack('<I', len(fmt_chunk_data))
|
||||
header_data += fmt_chunk_data
|
||||
|
||||
# fact chunk (non-PCM files)
|
||||
if not (dkind == 'i' or dkind == 'u'):
|
||||
header_data += b'fact'
|
||||
header_data += struct.pack('<II', 4, data.shape[0])
|
||||
|
||||
# check data size (needs to be immediately before the data chunk)
|
||||
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
|
||||
raise ValueError("Data exceeds wave file size limit")
|
||||
if add_header:
|
||||
fid.write(header_data)
|
||||
# data chunk
|
||||
fid.write(b'data')
|
||||
fid.write(struct.pack('<I', data.nbytes))
|
||||
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
|
||||
sys.byteorder == 'big'):
|
||||
data = data.byteswap()
|
||||
_array_tofile(fid, data)
|
||||
|
||||
if add_header:
|
||||
# Determine file size and place it in correct
|
||||
# position at start of the file.
|
||||
size = fid.tell()
|
||||
fid.seek(4)
|
||||
fid.write(struct.pack('<I', size-8))
|
||||
|
||||
finally:
|
||||
if not hasattr(filename, 'write'):
|
||||
fid.close()
|
||||
else:
|
||||
fid.seek(0)
|
||||
|
||||
def is_speaker_speaking(vad, data, sample_rate):
|
||||
# Function to detect if the speaker is speaking
|
||||
# The WebRTC VAD only accepts 16-bit mono PCM audio,
|
||||
# sampled at 8000, 16000, 32000 or 48000 Hz.
|
||||
# A frame must be either 10, 20, or 30 ms in duration:
|
||||
frame_duration = 30
|
||||
n_bit_each = int(sample_rate * frame_duration / 1000)*2 # x2 because audio is 16 bit (2 bytes)
|
||||
res_list = []
|
||||
for t in range(len(data)):
|
||||
if t!=0 and t % n_bit_each == 0:
|
||||
res_list.append(vad.is_speech(data[t-n_bit_each:t], sample_rate))
|
||||
|
||||
info = ''.join(['^' if r else '.' for r in res_list])
|
||||
info = info[:10]
|
||||
if any(res_list):
|
||||
return True, info
|
||||
else:
|
||||
return False, info
|
||||
|
||||
|
||||
class AliyunASR():
|
||||
@@ -12,14 +114,14 @@ class AliyunASR():
|
||||
message = json.loads(message)
|
||||
self.parsed_sentence = message['payload']['result']
|
||||
self.event_on_entence_end.set()
|
||||
print(self.parsed_sentence)
|
||||
# print(self.parsed_sentence)
|
||||
|
||||
def test_on_start(self, message, *args):
|
||||
# print("test_on_start:{}".format(message))
|
||||
pass
|
||||
|
||||
def test_on_error(self, message, *args):
|
||||
print("on_error args=>{}".format(args))
|
||||
logging.error("on_error args=>{}".format(args))
|
||||
pass
|
||||
|
||||
def test_on_close(self, *args):
|
||||
@@ -36,7 +138,6 @@ class AliyunASR():
|
||||
# print("on_completed:args=>{} message=>{}".format(args, message))
|
||||
pass
|
||||
|
||||
|
||||
def audio_convertion_thread(self, uuid):
|
||||
# 在一个异步线程中采集音频
|
||||
import nls # pip install git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
||||
@@ -67,12 +168,22 @@ class AliyunASR():
|
||||
on_close=self.test_on_close,
|
||||
callback_args=[uuid.hex]
|
||||
)
|
||||
|
||||
timeout_limit_second = 20
|
||||
r = sr.start(aformat="pcm",
|
||||
timeout=timeout_limit_second,
|
||||
enable_intermediate_result=True,
|
||||
enable_punctuation_prediction=True,
|
||||
enable_inverse_text_normalization=True)
|
||||
|
||||
import webrtcvad
|
||||
vad = webrtcvad.Vad()
|
||||
vad.set_mode(1)
|
||||
|
||||
is_previous_frame_transmitted = False # 上一帧是否有人说话
|
||||
previous_frame_data = None
|
||||
echo_cnt = 0 # 在没有声音之后,继续向服务器发送n次音频数据
|
||||
echo_cnt_max = 4 # 在没有声音之后,继续向服务器发送n次音频数据
|
||||
keep_alive_last_send_time = time.time()
|
||||
while not self.stop:
|
||||
# time.sleep(self.capture_interval)
|
||||
audio = rad.read(uuid.hex)
|
||||
@@ -80,12 +191,32 @@ class AliyunASR():
|
||||
# convert to pcm file
|
||||
temp_file = f'{temp_folder}/{uuid.hex}.pcm' #
|
||||
dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000
|
||||
io.wavfile.write(temp_file, NEW_SAMPLERATE, dsdata)
|
||||
write_numpy_to_wave(temp_file, NEW_SAMPLERATE, dsdata)
|
||||
# read pcm binary
|
||||
with open(temp_file, "rb") as f: data = f.read()
|
||||
# print('audio len:', len(audio), '\t ds len:', len(dsdata), '\t need n send:', len(data)//640)
|
||||
is_speaking, info = is_speaker_speaking(vad, data, NEW_SAMPLERATE)
|
||||
|
||||
if is_speaking or echo_cnt > 0:
|
||||
# 如果话筒激活 / 如果处于回声收尾阶段
|
||||
echo_cnt -= 1
|
||||
if not is_previous_frame_transmitted: # 上一帧没有人声,但是我们把上一帧同样加上
|
||||
if previous_frame_data is not None: data = previous_frame_data + data
|
||||
if is_speaking:
|
||||
echo_cnt = echo_cnt_max
|
||||
slices = zip(*(iter(data),) * 640) # 640个字节为一组
|
||||
for i in slices: sr.send_audio(bytes(i))
|
||||
keep_alive_last_send_time = time.time()
|
||||
is_previous_frame_transmitted = True
|
||||
else:
|
||||
is_previous_frame_transmitted = False
|
||||
echo_cnt = 0
|
||||
# 保持链接激活,即使没有声音,也根据时间间隔,发送一些音频片段给服务器
|
||||
if time.time() - keep_alive_last_send_time > timeout_limit_second/2:
|
||||
slices = zip(*(iter(data),) * 640) # 640个字节为一组
|
||||
for i in slices: sr.send_audio(bytes(i))
|
||||
keep_alive_last_send_time = time.time()
|
||||
is_previous_frame_transmitted = True
|
||||
self.audio_shape = info
|
||||
else:
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ class RealtimeAudioDistribution():
|
||||
def read(self, uuid):
|
||||
if uuid in self.data:
|
||||
res = self.data.pop(uuid)
|
||||
print('\r read-', len(res), '-', max(res), end='', flush=True)
|
||||
# print('\r read-', len(res), '-', max(res), end='', flush=True)
|
||||
else:
|
||||
res = None
|
||||
return res
|
||||
|
||||
93
crazy_functions/multi_stage/multi_stage_utils.py
Normal file
93
crazy_functions/multi_stage/multi_stage_utils.py
Normal file