镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
比较提交
752 次代码提交
version2.6
...
version3.3
| 作者 | SHA1 | 提交日期 | |
|---|---|---|---|
|
|
6538c58b8e | ||
|
|
e35eb9048e | ||
|
|
a0fa64de47 | ||
|
|
e04946c816 | ||
|
|
231c9c2e57 | ||
|
|
48555f570c | ||
|
|
7c9195ddd2 | ||
|
|
5500fbe682 | ||
|
|
5a83b3b096 | ||
|
|
4783fd6f37 | ||
|
|
9a4b56277c | ||
|
|
5eea959103 | ||
|
|
856df8fb62 | ||
|
|
8e59412c47 | ||
|
|
8f571ff68f | ||
|
|
b6d2766e59 | ||
|
|
73ce471a0e | ||
|
|
4e113139c8 | ||
|
|
e4c4b28ddf | ||
|
|
081acc6404 | ||
|
|
1a999497d7 | ||
|
|
6137963355 | ||
|
|
22bffdb737 | ||
|
|
75adcbffeb | ||
|
|
4451770061 | ||
|
|
09c413a272 | ||
|
|
ddb6c90a8f | ||
|
|
71590426f9 | ||
|
|
b3e5cdb3a5 | ||
|
|
6595ab813e | ||
|
|
d1efbd26da | ||
|
|
f04683732e | ||
|
|
cb0241db78 | ||
|
|
a097b6cd03 | ||
|
|
487ffe7888 | ||
|
|
51424a7d08 | ||
|
|
06e8e8f9a6 | ||
|
|
0512b311f8 | ||
|
|
81d53d0726 | ||
|
|
a141c5ccdc | ||
|
|
e361d741c3 | ||
|
|
f5bc58dbde | ||
|
|
e7b73f3041 | ||
|
|
ed8db8c8ae | ||
|
|
df97213d3b | ||
|
|
97443d1f83 | ||
|
|
59bed52faf | ||
|
|
3814c3a915 | ||
|
|
d98d0a291e | ||
|
|
ee94fa6dc4 | ||
|
|
d2e46f6684 | ||
|
|
5948dcacd5 | ||
|
|
3041858e7f | ||
|
|
9c2a6bc413 | ||
|
|
1cf8b6c6c8 | ||
|
|
781ef4487c | ||
|
|
4a494354b1 | ||
|
|
385c775aa5 | ||
|
|
518385dea2 | ||
|
|
4d1eea7bd5 | ||
|
|
9cb51ccc70 | ||
|
|
94dc398163 | ||
|
|
65317e33af | ||
|
|
06fbdf43af | ||
|
|
ab61418410 | ||
|
|
0785ff2aed | ||
|
|
676fe40d39 | ||
|
|
0b89673ee9 | ||
|
|
2f4e050612 | ||
|
|
87d963bda5 | ||
|
|
07807e4653 | ||
|
|
2b96217f2b | ||
|
|
13342c2988 | ||
|
|
95f8b2824a | ||
|
|
4065d6e234 | ||
|
|
d3dcd432e8 | ||
|
|
7d14de79bf | ||
|
|
15c6b52b5f | ||
|
|
c0f1b5bc8e | ||
|
|
bd62c6be68 | ||
|
|
70bd21f09a | ||
|
|
a0f15f1512 | ||
|
|
4575046ce1 | ||
|
|
33ea7391b5 | ||
|
|
e90eee2d8e | ||
|
|
7d44210a48 | ||
|
|
206f4138b6 | ||
|
|
6d2807f499 | ||
|
|
f1234937c6 | ||
|
|
7beea951c6 | ||
|
|
6f7e8076c7 | ||
|
|
ae24fab441 | ||
|
|
880be21bf7 | ||
|
|
559b3cd6bb | ||
|
|
9d9df8aa57 | ||
|
|
64548d33a9 | ||
|
|
c3cafd8d6f | ||
|
|
e9a6efef7f | ||
|
|
89a75e26c3 | ||
|
|
1139d395f2 | ||
|
|
e20070939c | ||
|
|
3236fcca21 | ||
|
|
5353eba376 | ||
|
|
7339b06acb | ||
|
|
ce1fc3a999 | ||
|
|
a9a489231a | ||
|
|
e889590a91 | ||
|
|
9481405f6f | ||
|
|
7317d79a3c | ||
|
|
a46e0111cd | ||
|
|
01a377d747 | ||
|
|
50258b781e | ||
|
|
dd1ba222ae | ||
|
|
b7d4adeccc | ||
|
|
3f82208062 | ||
|
|
5f319061d7 | ||
|
|
2c2a8ea549 | ||
|
|
90e1eef61f | ||
|
|
325406a650 | ||
|
|
bff4a87914 | ||
|
|
de0ed4a6f5 | ||
|
|
0ff838443e | ||
|
|
cfbfb68618 | ||
|
|
b42f2f745f | ||
|
|
9945d5048a | ||
|
|
f0ff1f2c64 | ||
|
|
7dd73e1330 | ||
|
|
4cfbacdb26 | ||
|
|
3bb4b4c92a | ||
|
|
bda025bc50 | ||
|
|
c24ff30f8c | ||
|
|
53189aea4e | ||
|
|
4b7a954fc8 | ||
|
|
2b9261bc39 | ||
|
|
4812513cbc | ||
|
|
4bd8475d95 | ||
|
|
ac219f40c5 | ||
|
|
26af2b1bb4 | ||
|
|
97385c98fc | ||
|
|
74e3cd4c6f | ||
|
|
205a6952a2 | ||
|
|
dd593d9f25 | ||
|
|
5d23420553 | ||
|
|
0aed1609f6 | ||
|
|
85e433910a | ||
|
|
fea5f01874 | ||
|
|
9d2bc1f3e0 | ||
|
|
f956dcd91d | ||
|
|
0086ad9e1b | ||
|
|
02e3e1d19b | ||
|
|
a9e9e79ed3 | ||
|
|
c37c49dd51 | ||
|
|
a15489d6e6 | ||
|
|
6bf89dfa2d | ||
|
|
966af4d4c5 | ||
|
|
df1d4fadec | ||
|
|
b0409b929b | ||
|
|
73d39b5470 | ||
|
|
28aa6d1dc0 | ||
|
|
2125ea437f | ||
|
|
23c5a77f82 | ||
|
|
acaf8cdbf4 | ||
|
|
da1b428030 | ||
|
|
bb94ad387f | ||
|
|
57b8ae3275 | ||
|
|
a3db8d1e1a | ||
|
|
e2a62ec409 | ||
|
|
20bec70160 | ||
|
|
abd11e5dff | ||
|
|
9b5f088793 | ||
|
|
3a561a70db | ||
|
|
11e33ec657 | ||
|
|
faffc59f51 | ||
|
|
0a5464d7d6 | ||
|
|
40d91e9e1a | ||
|
|
bf44dd1d41 | ||
|
|
095385f889 | ||
|
|
adb49f3866 | ||
|
|
d35d7710c1 | ||
|
|
4c486f27c8 | ||
|
|
aebd48bc84 | ||
|
|
a8f0801e99 | ||
|
|
1d449f5556 | ||
|
|
48e7757a19 | ||
|
|
dd92fa235e | ||
|
|
05c74e66e7 | ||
|
|
b5c4cd2f10 | ||
|
|
17ebf96d92 | ||
|
|
48cf5c0c9c | ||
|
|
9e87f96f55 | ||
|
|
9b0e20c96d | ||
|
|
c5d4e75a7a | ||
|
|
7c050d66c8 | ||
|
|
deb8e5e137 | ||
|
|
5316b5c373 | ||
|
|
d84c96cfa3 | ||
|
|
45c81cdaff | ||
|
|
2dd3530e82 | ||
|
|
3eef2d55a0 | ||
|
|
5549e5880a | ||
|
|
9bd8511ba4 | ||
|
|
03ba072c16 | ||
|
|
2472185de9 | ||
|
|
40bc865d33 | ||
|
|
c326a86ff4 | ||
|
|
d1926725d3 | ||
|
|
2f9a4e1618 | ||
|
|
d1c5986097 | ||
|
|
f0fbc65a36 | ||
|
|
321a51b5f9 | ||
|
|
8049296bee | ||
|
|
f6483c93e1 | ||
|
|
4120b05dd3 | ||
|
|
a593e2e4ac | ||
|
|
6aba339538 | ||
|
|
294ac338bd | ||
|
|
91609d6d39 | ||
|
|
ea6541c114 | ||
|
|
c325c6869f | ||
|
|
60848b21dc | ||
|
|
14c70c092d | ||
|
|
66bdf8b29a | ||
|
|
72a034a1ff | ||
|
|
faaa3bb1b6 | ||
|
|
0485d01d67 | ||
|
|
f48914f56d | ||
|
|
443f23521c | ||
|
|
cd6a1fd399 | ||
|
|
f10ea20351 | ||
|
|
2e044d97c7 | ||
|
|
ea7fd53a97 | ||
|
|
b2fba01487 | ||
|
|
fb3d0948a0 | ||
|
|
c8349e766b | ||
|
|
de8d20bcc2 | ||
|
|
512e3f7a32 | ||
|
|
717fae8984 | ||
|
|
2b352de7df | ||
|
|
bc3f3429a5 | ||
|
|
7e14229229 | ||
|
|
2aab3acfea | ||
|
|
dd648bd446 | ||
|
|
a2002ebd85 | ||
|
|
ff50f30669 | ||
|
|
0ac7734c7d | ||
|
|
23686bfc77 | ||
|
|
7e541a7229 | ||
|
|
5c251f03eb | ||
|
|
0c21b1edd9 | ||
|
|
0c58795a5e | ||
|
|
a70c08a3c4 | ||
|
|
2712d99d08 | ||
|
|
76ee4c2c55 | ||
|
|
fc222bf287 | ||
|
|
eee5763b15 | ||
|
|
ff55bbd498 | ||
|
|
703ff7f814 | ||
|
|
16f7a52207 | ||
|
|
7cad5fa594 | ||
|
|
0b1d833804 | ||
|
|
7d414f67f0 | ||
|
|
3e1cecd9f5 | ||
|
|
98724cd395 | ||
|
|
8ac9b454e3 | ||
|
|
5e602cabf5 | ||
|
|
c6610b2aab | ||
|
|
7e53cf7c7e | ||
|
|
3d6e4ee3a7 | ||
|
|
613be5509b | ||
|
|
d40fa20ce8 | ||
|
|
40bd857c70 | ||
|
|
6e1976d9b8 | ||
|
|
88a86635c6 | ||
|
|
acbeebd18d | ||
|
|
7515863503 | ||
|
|
a1af5a99e0 | ||
|
|
84f6ee2fb7 | ||
|
|
c090df34fa | ||
|
|
3cb46534b6 | ||
|
|
c41eb0e997 | ||
|
|
c43a3e6f59 | ||
|
|
a8399d2727 | ||
|
|
d41d0db810 | ||
|
|
19d6323801 | ||
|
|
929c0afc9b | ||
|
|
3748979133 | ||
|
|
6e8c5637aa | ||
|
|
f336ba060d | ||
|
|
4fe4626608 | ||
|
|
e6fbf13c67 | ||
|
|
6bc5cbce20 | ||
|
|
3c9d63c37b | ||
|
|
93d5fc2f1a | ||
|
|
2f2ad59823 | ||
|
|
b841d58a26 | ||
|
|
3d66e3eb79 | ||
|
|
4dad114ce7 | ||
|
|
f8d565c5a1 | ||
|
|
6bbc10f5b9 | ||
|
|
8bf2956ff7 | ||
|
|
270889a533 | ||
|
|
a72b95d2b9 | ||
|
|
7167c84394 | ||
|
|
d587189ceb | ||
|
|
4a4fb661df | ||
|
|
a7083873c0 | ||
|
|
a13ed231d3 | ||
|
|
0b960df309 | ||
|
|
42d366be94 | ||
|
|
fc331681b4 | ||
|
|
e965c36db3 | ||
|
|
ad208ff7cf | ||
|
|
1256387488 | ||
|
|
88919db63e | ||
|
|
bc8415b905 | ||
|
|
21a3519c50 | ||
|
|
a3dd982159 | ||
|
|
f38929b149 | ||
|
|
c8a9069ee3 | ||
|
|
53b099e3a6 | ||
|
|
0d387fa699 | ||
|
|
acf0349215 | ||
|
|
869de46078 | ||
|
|
6ce9b724ec | ||
|
|
49a6ff6a7c | ||
|
|
3725122de1 | ||
|
|
1f6defedfc | ||
|
|
0666fec86e | ||
|
|
ea031ab05b | ||
|
|
47445fdc90 | ||
|
|
e6cf5532a9 | ||
|
|
d741f884c5 | ||
|
|
58db0b04fa | ||
|
|
c5ce25d581 | ||
|
|
53cfed89d5 | ||
|
|
3592a0de11 | ||
|
|
91d07c329a | ||
|
|
ab373c5bf7 | ||
|
|
f714bfc59f | ||
|
|
09ab60c46d | ||
|
|
6383113e85 | ||
|
|
d52b4d6dbb | ||
|
|
2f9ec385c9 | ||
|
|
3249b31155 | ||
|
|
9cd15c6f88 | ||
|
|
300484f301 | ||
|
|
476a174320 | ||
|
|
b475a4f32a | ||
|
|
c07196698e | ||
|
|
e371b82ea3 | ||
|
|
3de941ee5e | ||
|
|
2120c074c1 | ||
|
|
8dbae2c68a | ||
|
|
50dfccc010 | ||
|
|
036bd93115 | ||
|
|
b7dca67f6e | ||
|
|
33dcbf5093 | ||
|
|
a5785446c0 | ||
|
|
8a83f8315b | ||
|
|
9344c414b6 | ||
|
|
042d06846b | ||
|
|
2a8d6e1d53 | ||
|
|
ce1cf491b6 | ||
|
|
28a4188332 | ||
|
|
e2770fe37f | ||
|
|
c2dcab0e12 | ||
|
|
8597dba5f2 | ||
|
|
1f09afcb8f | ||
|
|
533359e19f | ||
|
|
2e3f6b3126 | ||
|
|
3d3d259125 | ||
|
|
fffa536303 | ||
|
|
2df4742815 | ||
|
|
9ca7a90590 | ||
|
|
d1a18d293a | ||
|
|
769f2fe7d7 | ||
|
|
991cd29395 | ||
|
|
9a12adf853 | ||
|
|
928bef8983 | ||
|
|
f14aa4818a | ||
|
|
a4d731b190 | ||
|
|
0079733bfd | ||
|
|
1055fdaab7 | ||
|
|
0b3f7b8821 | ||
|
|
e8cf757dc0 | ||
|
|
06f8094a0a | ||
|
|
d4ed4efa03 | ||
|
|
aa7574dcec | ||
|
|
62a946e499 | ||
|
|
0b2b0a83d6 | ||
|
|
f8b2524aa3 | ||
|
|
079916f56c | ||
|
|
1da77af2a2 | ||
|
|
946481b774 | ||
|
|
d32a52c8e9 | ||
|
|
85d85d850a | ||
|
|
dcaa7a1808 | ||
|
|
785893b64f | ||
|
|
8aa2b48816 | ||
|
|
3269f430ff | ||
|
|
dad6a64194 | ||
|
|
2126a5ce74 | ||
|
|
7ee257a854 | ||
|
|
ddb39453fd | ||
|
|
eda3c6d345 | ||
|
|
745734b601 | ||
|
|
2bb1f3dd30 | ||
|
|
82952882eb | ||
|
|
971b45f332 | ||
|
|
04504b1d99 | ||
|
|
85e71f8a71 | ||
|
|
c96a253568 | ||
|
|
24780ee628 | ||
|
|
b87bfeaddb | ||
|
|
effa1421b4 | ||
|
|
3d95e42dc8 | ||
|
|
602fbb08da | ||
|
|
7f0393b2b0 | ||
|
|
79c617e437 | ||
|
|
e5bd6186d5 | ||
|
|
2418c45159 | ||
|
|
3aa446cf19 | ||
|
|
23c1b14ca3 | ||
|
|
f1b0e5f0f7 | ||
|
|
535525e9d5 | ||
|
|
c54be726bf | ||
|
|
858c40b0a5 | ||
|
|
ed33bf5a15 | ||
|
|
1b793e3397 | ||
|
|
f58f4fbbf8 | ||
|
|
b1ed86ee7d | ||
|
|
5570b94ad1 | ||
|
|
5aab515bac | ||
|
|
9b45a66137 | ||
|
|
7658842bdd | ||
|
|
a96a865265 | ||
|
|
ad48645db9 | ||
|
|
80fed7135a | ||
|
|
5e003070cd | ||
|
|
a10a23e347 | ||
|
|
c2062c05cb | ||
|
|
2dd674b0b3 | ||
|
|
88e92bfd8c | ||
|
|
872888d957 | ||
|
|
dcbfabf657 | ||
|
|
75754718c1 | ||
|
|
97193065f6 | ||
|
|
a999487b8e | ||
|
|
c330fa6be1 | ||
|
|
5b9de09c11 | ||
|
|
01265c5934 | ||
|
|
4888656a72 | ||
|
|
4556559e53 | ||
|
|
900b752e61 | ||
|
|
174146b5d7 | ||
|
|
3387b5acb0 | ||
|
|
bf3eb0bfab | ||
|
|
9540cf9448 | ||
|
|
55ef4acea9 | ||
|
|
8e0f401bf3 | ||
|
|
99e13e5895 | ||
|
|
190b547373 | ||
|
|
eee4cb361c | ||
|
|
2420d62a33 | ||
|
|
3af0bbdbe4 | ||
|
|
bfa6661367 | ||
|
|
d79dfe2fc7 | ||
|
|
919b15b242 | ||
|
|
a469d8714d | ||
|
|
15d9d9a307 | ||
|
|
a8bd564cd1 | ||
|
|
a51cfbc625 | ||
|
|
d10fec81fa | ||
|
|
a0841c6e6c | ||
|
|
594f4b24f6 | ||
|
|
629d022e8a | ||
|
|
c5355a9ca4 | ||
|
|
0218efaae7 | ||
|
|
1533c4b604 | ||
|
|
b64596de0e | ||
|
|
9752af934e | ||
|
|
70d9300972 | ||
|
|
47ea28693a | ||
|
|
172eb4a977 | ||
|
|
666a7448a0 | ||
|
|
9ce3497183 | ||
|
|
2c963cc368 | ||
|
|
b0dfef48e9 | ||
|
|
c85923b17b | ||
|
|
5e8eb6253c | ||
|
|
833d136fb9 | ||
|
|
753f9d50ff | ||
|
|
9ad21838fe | ||
|
|
7b8de7884f | ||
|
|
b5b0f6a3ce | ||
|
|
44a605e766 | ||
|
|
939dfa6ac9 | ||
|
|
8c3a8a2e3b | ||
|
|
d58802af01 | ||
|
|
9593b0d09d | ||
|
|
f7d50cd9fa | ||
|
|
14a7d00037 | ||
|
|
94e75d2718 | ||
|
|
6fc2423ae3 | ||
|
|
da8cb77314 | ||
|
|
a87ce5bb77 | ||
|
|
a098d08750 | ||
|
|
ab879ca4b7 | ||
|
|
dde672c63d | ||
|
|
030bfb4568 | ||
|
|
149ef28071 | ||
|
|
16caf34800 | ||
|
|
666dde9f74 | ||
|
|
167be41621 | ||
|
|
a71edeea95 | ||
|
|
87c09368da | ||
|
|
72f23cbbef | ||
|
|
a3952be1cb | ||
|
|
fa7464ae44 | ||
|
|
e5cc1eacd7 | ||
|
|
60506eff9f | ||
|
|
b655feedde | ||
|
|
e04d57cddd | ||
|
|
739cec9ab9 | ||
|
|
0b03c797bc | ||
|
|
cec44805a5 | ||
|
|
a88a42799f | ||
|
|
36890a14bf | ||
|
|
dca98d404b | ||
|
|
db8c8afd74 | ||
|
|
125fa7c378 | ||
|
|
285fa4690c | ||
|
|
380bfe6984 | ||
|
|
badf4090c5 | ||
|
|
a3d179c2fa | ||
|
|
9564a5e113 | ||
|
|
ac4fce05cf | ||
|
|
fda48fd37d | ||
|
|
44e77dc741 | ||
|
|
5d03dd37d2 | ||
|
|
ba0c17ba53 | ||
|
|
cd421d8074 | ||
|
|
363e45508b | ||
|
|
b073477905 | ||
|
|
743d18cd98 | ||
|
|
80e0c4e388 | ||
|
|
6d8c8cd3f0 | ||
|
|
d57d529aa1 | ||
|
|
e470ee1f7f | ||
|
|
a360cd7e74 | ||
|
|
16ce033d86 | ||
|
|
81b0118730 | ||
|
|
35847e4ec4 | ||
|
|
1f7f71c6b9 | ||
|
|
06a04f14d8 | ||
|
|
c21bdaae52 | ||
|
|
44155bcc24 | ||
|
|
4d02ea9863 | ||
|
|
82742f3ea5 | ||
|
|
6dd83fb1b4 | ||
|
|
2bf30d8a1e | ||
|
|
0b1f351cba | ||
|
|
0975b60c72 | ||
|
|
e3d763acff | ||
|
|
51c612920d | ||
|
|
bfdc2dee9a | ||
|
|
3c5122b529 | ||
|
|
97cd98d5a2 | ||
|
|
bd4bf71c4b | ||
|
|
9598029620 | ||
|
|
d6e4fc27ad | ||
|
|
e4b3523947 | ||
|
|
ad75886941 | ||
|
|
83fef07f58 | ||
|
|
3134e13d87 | ||
|
|
48cc477e48 | ||
|
|
77e34565e6 | ||
|
|
dc4fe3f8c2 | ||
|
|
4698ec6b98 | ||
|
|
a6c4b8d764 | ||
|
|
92d4400d19 | ||
|
|
11c641748f | ||
|
|
6867c5eed4 | ||
|
|
5c6d272950 | ||
|
|
0f28564fea | ||
|
|
403dd2fa59 | ||
|
|
3ac330dff1 | ||
|
|
cbcdd39239 | ||
|
|
e79b0c0835 | ||
|
|
730cd1e0e3 | ||
|
|
c78254cd86 | ||
|
|
23776b90b9 | ||
|
|
8849095776 | ||
|
|
8f60e962de | ||
|
|
b100680f72 | ||
|
|
5d22785e5a | ||
|
|
3f635bc4aa | ||
|
|
17abd29d50 | ||
|
|
4699395425 | ||
|
|
33adfc35df | ||
|
|
4b21ebdba6 | ||
|
|
17d9a060d8 | ||
|
|
7d5aaa5aee | ||
|
|
67215bcec5 | ||
|
|
e381dce78c | ||
|
|
4d70bc0288 | ||
|
|
c90391a902 | ||
|
|
4e06c350bf | ||
|
|
3981555466 | ||
|
|
403d66c3c2 | ||
|
|
6ed2b259db | ||
|
|
2bedba2e17 | ||
|
|
ebf365841e | ||
|
|
e76d8cfbc2 | ||
|
|
71d2f01685 | ||
|
|
6f1e9b63c2 | ||
|
|
d0e3ca7671 | ||
|
|
61b4ea6d1b | ||
|
|
1805f081d3 | ||
|
|
17c6524b8d | ||
|
|
c7e1b86b52 | ||
|
|
51bde973a1 | ||
|
|
954bc36d76 | ||
|
|
c06c60b977 | ||
|
|
b9f1a89812 | ||
|
|
67d1d88ebd | ||
|
|
9ac1068f56 | ||
|
|
9a1d4a0d72 | ||
|
|
043a9ea068 | ||
|
|
256e61b64d | ||
|
|
b9f2792983 | ||
|
|
28cd1dbf98 | ||
|
|
e1ee65eb66 | ||
|
|
d19127b5a9 | ||
|
|
2cb1effd45 | ||
|
|
7b48bdf880 | ||
|
|
8f7d9ad2d7 | ||
|
|
6ef2280281 | ||
|
|
d26fa46b27 | ||
|
|
da19fa1992 | ||
|
|
ab05cf6a01 | ||
|
|
d08edf7801 | ||
|
|
fab0c5dd63 | ||
|
|
cefd025700 | ||
|
|
e0aa6389cf | ||
|
|
e2618a0d3e | ||
|
|
39dde3b803 | ||
|
|
ec41c1e9f3 | ||
|
|
098eff8c68 | ||
|
|
127588c624 | ||
|
|
e341596d4b | ||
|
|
84dd6084cf | ||
|
|
5ab4ba2db2 | ||
|
|
d9686ef25e | ||
|
|
01e42acfe4 | ||
|
|
9de97da5e3 | ||
|
|
81741bc3f6 | ||
|
|
9c5cf2b1f7 | ||
|
|
6bc7f95633 | ||
|
|
29c1c898ba | ||
|
|
79914bb6aa | ||
|
|
56bf460c97 | ||
|
|
c22b4c39a2 | ||
|
|
6d55c4fbe1 | ||
|
|
f76ec644bf | ||
|
|
66dfd11efe | ||
|
|
d4a3566b21 | ||
|
|
f04d9755bf | ||
|
|
bb8b0567ac | ||
|
|
dc58745f4c | ||
|
|
6505fea0b7 | ||
|
|
652a153b3c | ||
|
|
877283ec05 | ||
|
|
5772fae7c5 | ||
|
|
d6ced8bfac | ||
|
|
f138b13024 | ||
|
|
c31f63cf6c | ||
|
|
922fdc3c50 | ||
|
|
6e593fd678 | ||
|
|
57996bf005 | ||
|
|
7cb01f2379 | ||
|
|
1def3cecfa | ||
|
|
8f739cfcdd | ||
|
|
54cd677d27 | ||
|
|
7f3b7221fd | ||
|
|
667cefe391 | ||
|
|
e32ae33965 | ||
|
|
1f9c90f0e0 | ||
|
|
b017a3d167 | ||
|
|
8b4b30a846 | ||
|
|
d29f72ce10 | ||
|
|
7186d9b17e | ||
|
|
86924fffa5 | ||
|
|
fedc748e17 | ||
|
|
273e8f38d9 | ||
|
|
7187f079c8 | ||
|
|
77408f795e | ||
|
|
32f36a609e | ||
|
|
93c13aa97a | ||
|
|
f238a34bb0 | ||
|
|
644c287a24 | ||
|
|
abd9077cd2 | ||
|
|
fa22df9cb6 | ||
|
|
3f559ec4cb | ||
|
|
c9abcef3e5 | ||
|
|
36b3c70b25 | ||
|
|
1ac203195f | ||
|
|
1c937edcb1 | ||
|
|
248e18e2ba | ||
|
|
86cd069ca7 | ||
|
|
2e67b516d9 | ||
|
|
7c20e79c01 | ||
|
|
4a2c3eec10 | ||
|
|
09ae862403 | ||
|
|
ac2c8cab1f | ||
|
|
1f0c4b1454 | ||
|
|
b1a6cfb799 | ||
|
|
b3a67b84b9 | ||
|
|
513d62570f | ||
|
|
5a9aa65f49 | ||
|
|
e39c511444 | ||
|
|
6781279019 | ||
|
|
3f6ddf85e9 | ||
|
|
1a301e0133 | ||
|
|
32824f3736 | ||
|
|
604ba40bdb | ||
|
|
889b719b09 | ||
|
|
a5c122b309 | ||
|
|
e5b7613fc5 | ||
|
|
1c4e853484 | ||
|
|
adbcc22a64 | ||
|
|
d4434219cd | ||
|
|
3ea231ee5d | ||
|
|
2881e080f7 | ||
|
|
a287230baa | ||
|
|
37f4544e0f | ||
|
|
2a6b17ed5e | ||
|
|
98f37e9ea7 | ||
|
|
85ff193e53 | ||
|
|
54914358c7 | ||
|
|
1fa9a79c3d | ||
|
|
dfa76157c8 | ||
|
|
0382ae2c72 | ||
|
|
8ce9266733 | ||
|
|
7103ffcdf3 | ||
|
|
3a2511ec1a | ||
|
|
787b5be7af | ||
|
|
2f94951996 | ||
|
|
5066fc8757 | ||
|
|
4afc7b3dda | ||
|
|
1faffeca49 | ||
|
|
5092e710c5 |
5
.gitattributes
vendored
普通文件
5
.gitattributes
vendored
普通文件
@@ -0,0 +1,5 @@
|
||||
*.h linguist-detectable=false
|
||||
*.cpp linguist-detectable=false
|
||||
*.tex linguist-detectable=false
|
||||
*.cs linguist-detectable=false
|
||||
*.tps linguist-detectable=false
|
||||
25
.github/ISSUE_TEMPLATE/bug_report.md
vendored
普通文件
25
.github/ISSUE_TEMPLATE/bug_report.md
vendored
普通文件
@@ -0,0 +1,25 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
- **(1) Describe the bug 简述**
|
||||
|
||||
|
||||
- **(2) Screen Shot 截图**
|
||||
|
||||
|
||||
- **(3) Terminal Traceback 终端traceback(如有)**
|
||||
|
||||
|
||||
- **(4) Material to Help Reproduce Bugs 帮助我们复现的测试材料样本(如有)**
|
||||
|
||||
|
||||
|
||||
Before submitting an issue 提交issue之前:
|
||||
- Please try to upgrade your code. 如果您的代码不是最新的,建议您先尝试更新代码
|
||||
- Please check project wiki for common problem solutions.项目[wiki](https://github.com/binary-husky/chatgpt_academic/wiki)有一些常见问题的解决方法
|
||||
10
.github/ISSUE_TEMPLATE/feature_request.md
vendored
普通文件
10
.github/ISSUE_TEMPLATE/feature_request.md
vendored
普通文件
@@ -0,0 +1,10 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -131,6 +131,18 @@ dmypy.json
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
.vscode
|
||||
.idea
|
||||
|
||||
history
|
||||
ssr_conf
|
||||
config_private.py
|
||||
gpt_log
|
||||
private.md
|
||||
private_upload
|
||||
other_llms
|
||||
cradle*
|
||||
debug*
|
||||
private*
|
||||
crazy_functions/test_project/pdf_and_word
|
||||
crazy_functions/test_samples
|
||||
|
||||
19
Dockerfile
19
Dockerfile
@@ -1,17 +1,20 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y python3 python3-pip && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
|
||||
# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic .
|
||||
# 如何运行: docker run --rm -it --net=host gpt-academic
|
||||
FROM python:3.11
|
||||
|
||||
RUN echo '[global]' > /etc/pip.conf && \
|
||||
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||
|
||||
RUN pip3 install gradio requests[socks] mdtex2html
|
||||
|
||||
COPY . /gpt
|
||||
WORKDIR /gpt
|
||||
COPY requirements.txt .
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
CMD ["python3", "main.py"]
|
||||
# 可选步骤,用于预热模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
|
||||
218
README.md
218
README.md
@@ -1,26 +1,222 @@
|
||||
# ChatGPT 学术优化
|
||||
> **Note**
|
||||
>
|
||||
> 本项目依赖的Gradio组件的新版pip包(Gradio 3.26~3.27)有严重bug。所以,请在安装时严格选择requirements.txt中**指定的版本**。
|
||||
>
|
||||
> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`
|
||||
>
|
||||
|
||||
**如果喜欢这个项目,请给它一个Star**
|
||||
# <img src="docs/logo.png" width="40" > ChatGPT 学术优化
|
||||
|
||||
## 使用docker
|
||||
**如果喜欢这个项目,请给它一个Star;如果你发明了更好用的快捷键或函数插件,欢迎发pull requests**
|
||||
|
||||
If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a README in [English|](docs/README_EN.md)[日本語|](docs/README_JP.md)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md) translated by this project itself.
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> 1.请注意只有**红颜色**标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR!
|
||||
>
|
||||
> 2.本项目中每个文件的功能都在自译解[`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题汇总在[`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98)当中。
|
||||
>
|
||||
> 3.已支持OpenAI和API2D的api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,api2d-key3"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
|
||||
|
||||
<div align="center">
|
||||
|
||||
功能 | 描述
|
||||
--- | ---
|
||||
一键润色 | 支持一键润色、一键查找论文语法错误
|
||||
一键中英互译 | 一键中英互译
|
||||
一键代码解释 | 显示代码、解释代码、生成代码、给代码加注释
|
||||
[自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
|
||||
模块化设计 | 支持自定义强大的[函数插件](https://github.com/binary-husky/chatgpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[自我程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] [一键读懂](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)本项目的源代码
|
||||
[程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] 一键可以剖析其他Python/C/C++/Java/Lua/...项目树
|
||||
读论文、翻译论文 | [函数插件] 一键解读latex/pdf论文全文并生成摘要
|
||||
Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [函数插件] 一键翻译或润色latex论文
|
||||
批量注释生成 | [函数插件] 一键批量生成函数注释
|
||||
Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [函数插件] 看到上面5种语言的[README](https://github.com/binary-husky/chatgpt_academic/blob/master/docs/README_EN.md)了吗?
|
||||
chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
|
||||
[PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [函数插件] PDF论文提取题目&摘要+翻译全文(多线程)
|
||||
[Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
|
||||
[谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [函数插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
|
||||
互联网信息聚合+GPT | [函数插件] 一键让ChatGPT先Google搜索,再回答问题,信息流永不过时
|
||||
公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
|
||||
多线程函数插件支持 | 支持多线调用chatgpt,一键处理[海量文本](https://www.bilibili.com/video/BV1FT411H7c5/)或程序
|
||||
启动暗色gradio[主题](https://github.com/binary-husky/chatgpt_academic/issues/173) | 在浏览器url后面添加```/?__dark-theme=true```可以切换dark主题
|
||||
[多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持,[API2D](https://api2d.com/)接口支持 | 同时被GPT3.5、GPT4和[清华ChatGLM](https://github.com/THUDM/ChatGLM-6B)伺候的感觉一定会很不错吧?
|
||||
更多LLM模型接入 | 新加入Newbing测试接口(新必应AI)
|
||||
…… | ……
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
- 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
- 所有按钮都通过读取functional.py动态生成,可随意加自定义功能,解放粘贴板
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- 润色/纠错
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- 如果输出包含公式,会同时以tex形式和渲染形式显示,方便复制和阅读
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 懒得看项目代码?整个工程直接给chatgpt炫嘴里
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 多种大语言模型混合调用(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
## 安装-方法1:直接运行 (Windows, Linux or MacOS)
|
||||
|
||||
1. 下载项目
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
```
|
||||
|
||||
2. 配置API_KEY
|
||||
|
||||
在`config.py`中,配置API KEY等[设置](https://github.com/binary-husky/gpt_academic/issues/1) 。
|
||||
|
||||
(P.S. 程序运行时会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。因此,如果您能理解我们的配置读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中。`config_private.py`不受git管控,可以让您的隐私信息更加安全。)
|
||||
|
||||
|
||||
3. 安装依赖
|
||||
```sh
|
||||
# (选择I: 如熟悉python)推荐
|
||||
python -m pip install -r requirements.txt
|
||||
# 备注:使用官方pip源或者阿里pip源,其他pip源(如一些大学的pip)有可能出问题,临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
|
||||
# (选择II: 如不熟悉python)使用anaconda,步骤也是类似的:
|
||||
# (II-1)conda create -n gptac_venv python=3.11
|
||||
# (II-2)conda activate gptac_venv
|
||||
# (II-3)python -m pip install -r requirements.txt
|
||||
```
|
||||
|
||||
如果需要支持清华ChatGLM后端,需要额外安装更多依赖(前提条件:熟悉python + 电脑配置够强):
|
||||
```sh
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
```
|
||||
|
||||
4. 运行
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
|
||||
5. 测试函数插件
|
||||
```
|
||||
- 测试函数插件模板函数(要求gpt回答历史上的今天发生了什么),您可以根据此函数为模板,实现更复杂的功能
|
||||
点击 "[函数插件模板Demo] 历史上的今天"
|
||||
```
|
||||
|
||||
## 安装-方法2:使用Docker
|
||||
|
||||
1. 仅ChatGPT(推荐大多数人选择)
|
||||
|
||||
``` sh
|
||||
# 下载项目
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
# 配置 海外Proxy 和 OpenAI API KEY
|
||||
config.py
|
||||
# 配置 “Proxy”, “API_KEY” 以及 “WEB_PORT” (例如50923) 等
|
||||
用任意文本编辑器编辑 config.py
|
||||
# 安装
|
||||
docker build -t gpt-academic .
|
||||
# 运行
|
||||
#(最后一步-选择1)在Linux环境下,用`--net=host`更方便快捷
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
|
||||
#(最后一步-选择2)在macOS/windows环境下,只能用-p选项将容器上的端口(例如50923)暴露给主机上的端口
|
||||
docker run --rm -it -p 50923:50923 gpt-academic
|
||||
```
|
||||
|
||||
## 参考项目
|
||||
2. ChatGPT+ChatGLM(需要对Docker熟悉 + 读懂Dockerfile + 电脑配置够强)
|
||||
|
||||
``` sh
|
||||
# 修改Dockerfile
|
||||
cd docs && nano Dockerfile+ChatGLM
|
||||
# 构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs)
|
||||
docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
|
||||
# 运行 (1) 直接运行:
|
||||
docker run --rm -it --net=host --gpus=all gpt-academic
|
||||
# 运行 (2) 我想运行之前进容器做一些调整:
|
||||
docker run --rm -it --net=host --gpus=all gpt-academic bash
|
||||
```
|
||||
https://github.com/Python-Markdown/markdown
|
||||
https://github.com/gradio-app/gradio
|
||||
https://github.com/polarwinkel/mdtex2html
|
||||
|
||||
## 安装-方法3:其他部署方式(需要云服务器知识与经验)
|
||||
|
||||
1. 远程云服务器部署
|
||||
请访问[部署wiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
2. 使用WSL2(Windows Subsystem for Linux 子系统)
|
||||
请访问[部署wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
3. 如何在二级网址(如`http://localhost/subpath`)下运行
|
||||
请访问[FastAPI运行说明](docs/WithFastapi.md)
|
||||
|
||||
---
|
||||
|
||||
## 自定义新的便捷按钮 / 自定义函数插件
|
||||
|
||||
1. 自定义新的便捷按钮(学术快捷键)
|
||||
任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。)
|
||||
例如
|
||||
```
|
||||
"超级英译中": {
|
||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||
"Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n",
|
||||
|
||||
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来。
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
2. 自定义函数插件
|
||||
|
||||
编写强大的函数插件来执行任何你想得到的和想不到的任务。
|
||||
本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。
|
||||
详情请参考[函数插件指南](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。
|
||||
|
||||
|
||||
|
||||
## 版本:
|
||||
- version 3.2: 函数插件支持更多参数接口 (保存对话功能, 解读任意语言代码+同时询问任意的LLM组合)
|
||||
- version 3.1: 支持同时问询多个gpt模型!支持api2d,支持多个apikey负载均衡
|
||||
- version 3.0: 对chatglm和其他小型llm的支持
|
||||
- version 2.6: 重构了插件结构,提高了交互性,加入更多插件
|
||||
- version 2.5: 自更新,解决总结大工程源代码时文本过长、token溢出的问题
|
||||
- version 2.4: (1)新增PDF全文翻译功能; (2)新增输入区切换位置的功能; (3)新增垂直布局选项; (4)多线程函数插件优化。
|
||||
- version 2.3: 增强多线程交互性
|
||||
- version 2.2: 函数插件支持热重载
|
||||
- version 2.1: 可折叠式布局
|
||||
- version 2.0: 引入模块化函数插件
|
||||
- version 1.0: 基础功能
|
||||
|
||||
|
||||
## 参考与学习
|
||||
|
||||
```
|
||||
代码中参考了很多其他优秀项目中的设计,主要包括:
|
||||
|
||||
# 借鉴项目1:借鉴了ChuanhuChatGPT中诸多技巧
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# 借鉴项目2:清华ChatGLM-6B:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
```
|
||||
|
||||
149
check_proxy.py
普通文件
149
check_proxy.py
普通文件
@@ -0,0 +1,149 @@
|
||||
|
||||
def check_proxy(proxies):
|
||||
import requests
|
||||
proxies_https = proxies['https'] if proxies is not None else '无'
|
||||
try:
|
||||
response = requests.get("https://ipapi.co/json/",
|
||||
proxies=proxies, timeout=4)
|
||||
data = response.json()
|
||||
print(f'查询代理的地理位置,返回的结果是{data}')
|
||||
if 'country_name' in data:
|
||||
country = data['country_name']
|
||||
result = f"代理配置 {proxies_https}, 代理所在地:{country}"
|
||||
elif 'error' in data:
|
||||
result = f"代理配置 {proxies_https}, 代理所在地:未知,IP查询频率受限"
|
||||
print(result)
|
||||
return result
|
||||
except:
|
||||
result = f"代理配置 {proxies_https}, 代理所在地查询超时,代理可能无效"
|
||||
print(result)
|
||||
return result
|
||||
|
||||
|
||||
def backup_and_download(current_version, remote_version):
|
||||
"""
|
||||
一键更新协议:备份和下载
|
||||
"""
|
||||
from toolbox import get_conf
|
||||
import shutil
|
||||
import os
|
||||
import requests
|
||||
import zipfile
|
||||
os.makedirs(f'./history', exist_ok=True)
|
||||
backup_dir = f'./history/backup-{current_version}/'
|
||||
new_version_dir = f'./history/new-version-{remote_version}/'
|
||||
if os.path.exists(new_version_dir):
|
||||
return new_version_dir
|
||||
os.makedirs(new_version_dir)
|
||||
shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
|
||||
proxies, = get_conf('proxies')
|
||||
r = requests.get(
|
||||
'https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
|
||||
zip_file_path = backup_dir+'/master.zip'
|
||||
with open(zip_file_path, 'wb+') as f:
|
||||
f.write(r.content)
|
||||
dst_path = new_version_dir
|
||||
with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
|
||||
for zip_info in zip_ref.infolist():
|
||||
dst_file_path = os.path.join(dst_path, zip_info.filename)
|
||||
if os.path.exists(dst_file_path):
|
||||
os.remove(dst_file_path)
|
||||
zip_ref.extract(zip_info, dst_path)
|
||||
return new_version_dir
|
||||
|
||||
|
||||
def patch_and_restart(path):
|
||||
"""
|
||||
一键更新协议:覆盖和重启
|
||||
"""
|
||||
import distutils
|
||||
import shutil
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from colorful import print亮黄, print亮绿, print亮红
|
||||
# if not using config_private, move origin config.py as config_private.py
|
||||
if not os.path.exists('config_private.py'):
|
||||
print亮黄('由于您没有设置config_private.py私密配置,现将您的现有配置移动至config_private.py以防止配置丢失,',
|
||||
'另外您可以随时在history子文件夹下找回旧版的程序。')
|
||||
shutil.copyfile('config.py', 'config_private.py')
|
||||
distutils.dir_util.copy_tree(path+'/chatgpt_academic-master', './')
|
||||
import subprocess
|
||||
print亮绿('代码已经更新,即将更新pip包依赖……')
|
||||
for i in reversed(range(5)): time.sleep(1); print(i)
|
||||
try:
|
||||
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'])
|
||||
except:
|
||||
print亮红('pip包依赖安装出现问题,需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。')
|
||||
print亮绿('更新完成,您可以随时在history子文件夹下找回旧版的程序,5s之后重启')
|
||||
print亮红('假如重启失败,您可能需要手动安装新增的依赖库 `python -m pip install -r requirements.txt`,然后在用常规的`python main.py`的方式启动。')
|
||||
print(' ------------------------------ -----------------------------------')
|
||||
for i in reversed(range(8)): time.sleep(1); print(i)
|
||||
os.execl(sys.executable, sys.executable, *sys.argv)
|
||||
|
||||
|
||||
def get_current_version():
|
||||
import json
|
||||
try:
|
||||
with open('./version', 'r', encoding='utf8') as f:
|
||||
current_version = json.loads(f.read())['version']
|
||||
except:
|
||||
current_version = ""
|
||||
return current_version
|
||||
|
||||
|
||||
def auto_update():
|
||||
"""
|
||||
一键更新协议:查询版本和用户意见
|
||||
"""
|
||||
try:
|
||||
from toolbox import get_conf
|
||||
import requests
|
||||
import time
|
||||
import json
|
||||
proxies, = get_conf('proxies')
|
||||
response = requests.get(
|
||||
"https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
|
||||
remote_json_data = json.loads(response.text)
|
||||
remote_version = remote_json_data['version']
|
||||
if remote_json_data["show_feature"]:
|
||||
new_feature = "新功能:" + remote_json_data["new_feature"]
|
||||
else:
|
||||
new_feature = ""
|
||||
with open('./version', 'r', encoding='utf8') as f:
|
||||
current_version = f.read()
|
||||
current_version = json.loads(current_version)['version']
|
||||
if (remote_version - current_version) >= 0.01:
|
||||
from colorful import print亮黄
|
||||
print亮黄(
|
||||
f'\n新版本可用。新版本:{remote_version},当前版本:{current_version}。{new_feature}')
|
||||
print('(1)Github更新地址:\nhttps://github.com/binary-husky/chatgpt_academic\n')
|
||||
user_instruction = input('(2)是否一键更新代码(Y+回车=确认,输入其他/无输入+回车=不更新)?')
|
||||
if user_instruction in ['Y', 'y']:
|
||||
path = backup_and_download(current_version, remote_version)
|
||||
try:
|
||||
patch_and_restart(path)
|
||||
except:
|
||||
print('更新失败。')
|
||||
else:
|
||||
print('自动更新程序:已禁用')
|
||||
return
|
||||
else:
|
||||
return
|
||||
except:
|
||||
print('自动更新程序:已禁用')
|
||||
|
||||
def warm_up_modules():
|
||||
print('正在执行一些模块的预热...')
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
enc.encode("模块预热", disallowed_special=())
|
||||
enc = model_info["gpt-4"]['tokenizer']
|
||||
enc.encode("模块预热", disallowed_special=())
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os
|
||||
os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||
from toolbox import get_conf
|
||||
proxies, = get_conf('proxies')
|
||||
check_proxy(proxies)
|
||||
91
colorful.py
普通文件
91
colorful.py
普通文件
@@ -0,0 +1,91 @@
|
||||
import platform
|
||||
from sys import stdout
|
||||
|
||||
if platform.system()=="Linux":
|
||||
pass
|
||||
else:
|
||||
from colorama import init
|
||||
init()
|
||||
|
||||
# Do you like the elegance of Chinese characters?
|
||||
def print红(*kw,**kargs):
|
||||
print("\033[0;31m",*kw,"\033[0m",**kargs)
|
||||
def print绿(*kw,**kargs):
|
||||
print("\033[0;32m",*kw,"\033[0m",**kargs)
|
||||
def print黄(*kw,**kargs):
|
||||
print("\033[0;33m",*kw,"\033[0m",**kargs)
|
||||
def print蓝(*kw,**kargs):
|
||||
print("\033[0;34m",*kw,"\033[0m",**kargs)
|
||||
def print紫(*kw,**kargs):
|
||||
print("\033[0;35m",*kw,"\033[0m",**kargs)
|
||||
def print靛(*kw,**kargs):
|
||||
print("\033[0;36m",*kw,"\033[0m",**kargs)
|
||||
|
||||
def print亮红(*kw,**kargs):
|
||||
print("\033[1;31m",*kw,"\033[0m",**kargs)
|
||||
def print亮绿(*kw,**kargs):
|
||||
print("\033[1;32m",*kw,"\033[0m",**kargs)
|
||||
def print亮黄(*kw,**kargs):
|
||||
print("\033[1;33m",*kw,"\033[0m",**kargs)
|
||||
def print亮蓝(*kw,**kargs):
|
||||
print("\033[1;34m",*kw,"\033[0m",**kargs)
|
||||
def print亮紫(*kw,**kargs):
|
||||
print("\033[1;35m",*kw,"\033[0m",**kargs)
|
||||
def print亮靛(*kw,**kargs):
|
||||
print("\033[1;36m",*kw,"\033[0m",**kargs)
|
||||
|
||||
|
||||
|
||||
def print亮红(*kw,**kargs):
|
||||
print("\033[1;31m",*kw,"\033[0m",**kargs)
|
||||
def print亮绿(*kw,**kargs):
|
||||
print("\033[1;32m",*kw,"\033[0m",**kargs)
|
||||
def print亮黄(*kw,**kargs):
|
||||
print("\033[1;33m",*kw,"\033[0m",**kargs)
|
||||
def print亮蓝(*kw,**kargs):
|
||||
print("\033[1;34m",*kw,"\033[0m",**kargs)
|
||||
def print亮紫(*kw,**kargs):
|
||||
print("\033[1;35m",*kw,"\033[0m",**kargs)
|
||||
def print亮靛(*kw,**kargs):
|
||||
print("\033[1;36m",*kw,"\033[0m",**kargs)
|
||||
|
||||
print_red = print红
|
||||
print_green = print绿
|
||||
print_yellow = print黄
|
||||
print_blue = print蓝
|
||||
print_purple = print紫
|
||||
print_indigo = print靛
|
||||
|
||||
print_bold_red = print亮红
|
||||
print_bold_green = print亮绿
|
||||
print_bold_yellow = print亮黄
|
||||
print_bold_blue = print亮蓝
|
||||
print_bold_purple = print亮紫
|
||||
print_bold_indigo = print亮靛
|
||||
|
||||
if not stdout.isatty():
|
||||
# redirection, avoid a fucked up log file
|
||||
print红 = print
|
||||
print绿 = print
|
||||
print黄 = print
|
||||
print蓝 = print
|
||||
print紫 = print
|
||||
print靛 = print
|
||||
print亮红 = print
|
||||
print亮绿 = print
|
||||
print亮黄 = print
|
||||
print亮蓝 = print
|
||||
print亮紫 = print
|
||||
print亮靛 = print
|
||||
print_red = print
|
||||
print_green = print
|
||||
print_yellow = print
|
||||
print_blue = print
|
||||
print_purple = print
|
||||
print_indigo = print
|
||||
print_bold_red = print
|
||||
print_bold_green = print
|
||||
print_bold_yellow = print
|
||||
print_bold_blue = print
|
||||
print_bold_purple = print
|
||||
print_bold_indigo = print
|
||||
72
config.py
72
config.py
@@ -1,11 +1,71 @@
|
||||
# my_api_key = "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"
|
||||
API_KEY = "sk-此处填API秘钥"
|
||||
API_URL = "https://api.openai.com/v1/chat/completions"
|
||||
# [step 1]>> 例如: API_KEY = "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" (此key无效)
|
||||
API_KEY = "sk-此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
|
||||
|
||||
# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改
|
||||
USE_PROXY = False
|
||||
if USE_PROXY:
|
||||
proxies = { "http": "socks5h://localhost:11284", "https": "socks5h://localhost:11284", }
|
||||
print('网络代理状态:运行。')
|
||||
# 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
|
||||
# 例如 "socks5h://localhost:11284"
|
||||
# [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
|
||||
# [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
|
||||
# [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
|
||||
|
||||
# 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
|
||||
proxies = {
|
||||
# [协议]:// [地址] :[端口]
|
||||
"http": "socks5h://localhost:11284",
|
||||
"https": "socks5h://localhost:11284",
|
||||
}
|
||||
else:
|
||||
proxies = None
|
||||
print('网络代理状态:未配置。无代理状态下很可能无法访问。')
|
||||
|
||||
# [step 3]>> 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次
|
||||
# 一言以蔽之:免费用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview
|
||||
DEFAULT_WORKER_NUM = 3
|
||||
|
||||
|
||||
# [step 4]>> 以下配置可以优化体验,但大部分场合下并不需要修改
|
||||
# 对话窗的高度
|
||||
CHATBOT_HEIGHT = 1115
|
||||
|
||||
# 代码高亮
|
||||
CODE_HIGHLIGHT = True
|
||||
|
||||
# 窗口布局
|
||||
LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
|
||||
|
||||
# 发送请求到OpenAI后,等待多久判定为超时
|
||||
TIMEOUT_SECONDS = 30
|
||||
|
||||
# 网页的端口, -1代表随机端口
|
||||
WEB_PORT = -1
|
||||
|
||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
||||
MAX_RETRY = 2
|
||||
|
||||
# OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing"]
|
||||
|
||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||
|
||||
# 设置gradio的并行线程数(不需要修改)
|
||||
CONCURRENT_COUNT = 100
|
||||
|
||||
# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
|
||||
# [("username", "password"), ("username2", "password2"), ...]
|
||||
AUTHENTICATION = []
|
||||
|
||||
# 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!!)
|
||||
# 格式 {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
||||
API_URL_REDIRECT = {}
|
||||
|
||||
# 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!)
|
||||
CUSTOM_PATH = "/"
|
||||
|
||||
# 如果需要使用newbing,把newbing的长长的cookie放到这里
|
||||
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
||||
NEWBING_COOKIES = """
|
||||
your bing cookies here
|
||||
"""
|
||||
71
core_functional.py
普通文件
71
core_functional.py
普通文件
@@ -0,0 +1,71 @@
|
||||
# 'primary' 颜色对应 theme.py 中的 primary_hue
|
||||
# 'secondary' 颜色对应 theme.py 中的 neutral_hue
|
||||
# 'stop' 颜色对应 theme.py 中的 color_er
|
||||
# 默认按钮颜色是 secondary
|
||||
from toolbox import clear_line_break
|
||||
|
||||
|
||||
def get_core_functions():
|
||||
return {
|
||||
"英语学术润色": {
|
||||
# 前言
|
||||
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
|
||||
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
|
||||
r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n",
|
||||
# 后语
|
||||
"Suffix": r"",
|
||||
"Color": r"secondary", # 按钮颜色
|
||||
},
|
||||
"中文学术润色": {
|
||||
"Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
|
||||
r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
|
||||
"Suffix": r"",
|
||||
},
|
||||
"查找语法错误": {
|
||||
"Prefix": r"Can you help me ensure that the grammar and the spelling is correct? " +
|
||||
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good." +
|
||||
r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " +
|
||||
r"put the original text the first column, " +
|
||||
r"put the corrected text in the second column and highlight the key words you fixed.""\n"
|
||||
r"Example:""\n"
|
||||
r"Paragraph: How is you? Do you knows what is it?""\n"
|
||||
r"| Original sentence | Corrected sentence |""\n"
|
||||
r"| :--- | :--- |""\n"
|
||||
r"| How **is** you? | How **are** you? |""\n"
|
||||
r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n"
|
||||
r"Below is a paragraph from an academic paper. "
|
||||
r"You need to report all grammar and spelling mistakes as the example before."
|
||||
+ "\n\n",
|
||||
"Suffix": r"",
|
||||
"PreProcess": clear_line_break, # 预处理:清除换行符
|
||||
},
|
||||
"中译英": {
|
||||
"Prefix": r"Please translate following sentence to English:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
},
|
||||
"学术中英互译": {
|
||||
"Prefix": r"I want you to act as a scientific English-Chinese translator, " +
|
||||
r"I will provide you with some paragraphs in one language " +
|
||||
r"and your task is to accurately and academically translate the paragraphs only into the other language. " +
|
||||
r"Do not repeat the original provided paragraphs after translation. " +
|
||||
r"You should use artificial intelligence tools, " +
|
||||
r"such as natural language processing, and rhetorical knowledge " +
|
||||
r"and experience about effective writing techniques to reply. " +
|
||||
r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:" + "\n\n",
|
||||
"Suffix": "",
|
||||
"Color": "secondary",
|
||||
},
|
||||
"英译中": {
|
||||
"Prefix": r"翻译成地道的中文:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
},
|
||||
"找图片": {
|
||||
"Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
|
||||
r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
|
||||
"Suffix": r"",
|
||||
},
|
||||
"解释代码": {
|
||||
"Prefix": r"请解释以下代码:" + "\n```\n",
|
||||
"Suffix": "\n```\n",
|
||||
},
|
||||
}
|
||||
229
crazy_functional.py
普通文件
229
crazy_functional.py
普通文件
@@ -0,0 +1,229 @@
|
||||
from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效
|
||||
|
||||
|
||||
def get_crazy_functions():
|
||||
###################### 第一组插件 ###########################
|
||||
from crazy_functions.读文章写摘要 import 读文章写摘要
|
||||
from crazy_functions.生成函数注释 import 批量生成函数注释
|
||||
from crazy_functions.解析项目源代码 import 解析项目本身
|
||||
from crazy_functions.解析项目源代码 import 解析一个Python项目
|
||||
from crazy_functions.解析项目源代码 import 解析一个C项目的头文件
|
||||
from crazy_functions.解析项目源代码 import 解析一个C项目
|
||||
from crazy_functions.解析项目源代码 import 解析一个Golang项目
|
||||
from crazy_functions.解析项目源代码 import 解析一个Java项目
|
||||
from crazy_functions.解析项目源代码 import 解析一个Rect项目
|
||||
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
||||
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
||||
from crazy_functions.Latex全文润色 import Latex英文润色
|
||||
from crazy_functions.询问多个大语言模型 import 同时问询
|
||||
from crazy_functions.解析项目源代码 import 解析一个Lua项目
|
||||
from crazy_functions.解析项目源代码 import 解析一个CSharp项目
|
||||
from crazy_functions.总结word文档 import 总结word文档
|
||||
from crazy_functions.解析JupyterNotebook import 解析ipynb文件
|
||||
from crazy_functions.对话历史存档 import 对话历史存档
|
||||
from crazy_functions.批量Markdown翻译 import Markdown英译中
|
||||
function_plugins = {
|
||||
|
||||
"解析整个Python项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(解析一个Python项目)
|
||||
},
|
||||
"保存当前的对话": {
|
||||
"AsButton":False,
|
||||
"Function": HotReload(对话历史存档)
|
||||
},
|
||||
"[测试功能] 解析Jupyter Notebook文件": {
|
||||
"Color": "stop",
|
||||
"AsButton":False,
|
||||
"Function": HotReload(解析ipynb文件),
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "若输入0,则不解析notebook中的Markdown块", # 高级参数输入区的显示提示
|
||||
},
|
||||
"批量总结Word文档": {
|
||||
"Color": "stop",
|
||||
"Function": HotReload(总结word文档)
|
||||
},
|
||||
"解析整个C++项目头文件": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个C项目的头文件)
|
||||
},
|
||||
"解析整个C++项目(.cpp/.hpp/.c/.h)": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个C项目)
|
||||
},
|
||||
"解析整个Go项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Golang项目)
|
||||
},
|
||||
"解析整个Java项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Java项目)
|
||||
},
|
||||
"解析整个React项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Rect项目)
|
||||
},
|
||||
"解析整个Lua项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个Lua项目)
|
||||
},
|
||||
"解析整个CSharp项目": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(解析一个CSharp项目)
|
||||
},
|
||||
"读Tex论文写摘要": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(读文章写摘要)
|
||||
},
|
||||
"Markdown/Readme英译中": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"Function": HotReload(Markdown英译中)
|
||||
},
|
||||
"批量生成函数注释": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(批量生成函数注释)
|
||||
},
|
||||
"[多线程Demo] 解析此项目本身(源码自译解)": {
|
||||
"Function": HotReload(解析项目本身)
|
||||
},
|
||||
"[多线程demo] 把本项目源代码切换成全英文": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(全项目切换英文)
|
||||
},
|
||||
"[函数插件模板Demo] 历史上的今天": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Function": HotReload(高阶功能模板函数)
|
||||
},
|
||||
|
||||
}
|
||||
###################### 第二组插件 ###########################
|
||||
# [第二组插件]: 经过充分测试
|
||||
from crazy_functions.批量总结PDF文档 import 批量总结PDF文档
|
||||
from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer
|
||||
from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
|
||||
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
||||
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
||||
from crazy_functions.Latex全文润色 import Latex中文润色
|
||||
from crazy_functions.Latex全文翻译 import Latex中译英
|
||||
from crazy_functions.Latex全文翻译 import Latex英译中
|
||||
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||
|
||||
function_plugins.update({
|
||||
"批量翻译PDF文档(多线程)": {
|
||||
"Color": "stop",
|
||||
"AsButton": True, # 加入下拉菜单中
|
||||
"Function": HotReload(批量翻译PDF文档)
|
||||
},
|
||||
"询问多个GPT模型": {
|
||||
"Color": "stop", # 按钮颜色
|
||||
"Function": HotReload(同时问询)
|
||||
},
|
||||
"[测试功能] 批量总结PDF文档": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Function": HotReload(批量总结PDF文档)
|
||||
},
|
||||
"[测试功能] 批量总结PDF文档pdfminer": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(批量总结PDF文档pdfminer)
|
||||
},
|
||||
"谷歌学术检索助手(输入谷歌学术搜索页url)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(谷歌检索小助手)
|
||||
},
|
||||
|
||||
"理解PDF文档内容 (模仿ChatPDF)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(理解PDF文档内容标准文件输入)
|
||||
},
|
||||
"[测试功能] 英文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex英文润色)
|
||||
},
|
||||
"[测试功能] 中文Latex项目全文润色(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex中文润色)
|
||||
},
|
||||
"[测试功能] Latex项目全文中译英(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex中译英)
|
||||
},
|
||||
"[测试功能] Latex项目全文英译中(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Latex英译中)
|
||||
},
|
||||
"[测试功能] 批量Markdown中译英(输入路径或上传压缩包)": {
|
||||
# HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(Markdown中译英)
|
||||
},
|
||||
|
||||
|
||||
})
|
||||
|
||||
###################### 第三组插件 ###########################
|
||||
# [第三组插件]: 尚未充分测试的函数插件,放在这里
|
||||
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
|
||||
function_plugins.update({
|
||||
"一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(下载arxiv论文并翻译摘要)
|
||||
}
|
||||
})
|
||||
|
||||
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
function_plugins.update({
|
||||
"连接网络回答问题(先输入问题,再点击按钮,需要访问谷歌)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False, # 加入下拉菜单中
|
||||
"Function": HotReload(连接网络回答问题)
|
||||
}
|
||||
})
|
||||
|
||||
from crazy_functions.解析项目源代码 import 解析任意code项目
|
||||
function_plugins.update({
|
||||
"解析项目源代码(手动指定和筛选源代码文件类型)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(解析任意code项目)
|
||||
},
|
||||
})
|
||||
from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
|
||||
function_plugins.update({
|
||||
"询问多个GPT模型(手动指定询问哪些模型)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
|
||||
"ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&api2d-gpt-4", # 高级参数输入区的显示提示
|
||||
"Function": HotReload(同时问询_指定模型)
|
||||
},
|
||||
})
|
||||
###################### 第n组插件 ###########################
|
||||
return function_plugins
|
||||
175
crazy_functions/Latex全文润色.py
普通文件
175
crazy_functions/Latex全文润色.py
普通文件
@@ -0,0 +1,175 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
fast_debug = False
|
||||
|
||||
class PaperFileGroup():
|
||||
def __init__(self):
|
||||
self.file_paths = []
|
||||
self.file_contents = []
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
"""
|
||||
for index, file_content in enumerate(self.file_contents):
|
||||
if self.get_token_num(file_content) < max_token_limit:
|
||||
self.sp_file_contents.append(file_content)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||
|
||||
print('Segmentation: done')
|
||||
|
||||
def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
|
||||
import time, os, re
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
|
||||
# <-------- 读取Latex文件,删除其中的所有注释 ---------->
|
||||
pfg = PaperFileGroup()
|
||||
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
# 定义注释的正则表达式
|
||||
comment_pattern = r'%.*'
|
||||
# 使用正则表达式查找注释,并替换为空字符串
|
||||
clean_tex_content = re.sub(comment_pattern, '', file_content)
|
||||
# 记录删除注释后的文本
|
||||
pfg.file_paths.append(fp)
|
||||
pfg.file_contents.append(clean_tex_content)
|
||||
|
||||
# <-------- 拆分过长的latex文件 ---------->
|
||||
pfg.run_file_split(max_token_limit=1024)
|
||||
n_split = len(pfg.sp_file_contents)
|
||||
|
||||
# <-------- 抽取摘要 ---------->
|
||||
# if language == 'en':
|
||||
# abs_extract_inputs = f"Please write an abstract for this paper"
|
||||
|
||||
# # 单线,获取文章meta信息
|
||||
# paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
# inputs=abs_extract_inputs,
|
||||
# inputs_show_user=f"正在抽取摘要信息。",
|
||||
# llm_kwargs=llm_kwargs,
|
||||
# chatbot=chatbot, history=[],
|
||||
# sys_prompt="Your job is to collect information from materials。",
|
||||
# )
|
||||
|
||||
# <-------- 多线程润色开始 ---------->
|
||||
if language == 'en':
|
||||
inputs_array = ["Below is a section from an academic paper, polish this section to meet the academic standard, improve the grammar, clarity and overall readability, do not modify any latex command such as \section, \cite and equations:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"Polish {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
||||
elif language == 'zh':
|
||||
inputs_array = [f"以下是一篇学术论文中的一段内容,请将此部分润色以满足学术标准,提高语法、清晰度和整体可读性,不要修改任何LaTeX命令,例如\section,\cite和方程式:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"润色 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array=["你是一位专业的中文学术论文作家。" for _ in range(n_split)]
|
||||
|
||||
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=inputs_array,
|
||||
inputs_show_user_array=inputs_show_user_array,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[""] for _ in range(n_split)],
|
||||
sys_prompt_array=sys_prompt_array,
|
||||
# max_workers=5, # 并行任务数量限制,最多同时执行5个,其他的排队等待
|
||||
scroller_max_len = 80
|
||||
)
|
||||
|
||||
# <-------- 整理结果,退出 ---------->
|
||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
||||
history = gpt_response_collection
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh')
|
||||
175
crazy_functions/Latex全文翻译.py
普通文件
175
crazy_functions/Latex全文翻译.py
普通文件
@@ -0,0 +1,175 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
fast_debug = False
|
||||
|
||||
class PaperFileGroup():
|
||||
def __init__(self):
|
||||
self.file_paths = []
|
||||
self.file_contents = []
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
"""
|
||||
for index, file_content in enumerate(self.file_contents):
|
||||
if self.get_token_num(file_content) < max_token_limit:
|
||||
self.sp_file_contents.append(file_content)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
|
||||
|
||||
print('Segmentation: done')
|
||||
|
||||
def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
|
||||
import time, os, re
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
# <-------- 读取Latex文件,删除其中的所有注释 ---------->
|
||||
pfg = PaperFileGroup()
|
||||
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
# 定义注释的正则表达式
|
||||
comment_pattern = r'%.*'
|
||||
# 使用正则表达式查找注释,并替换为空字符串
|
||||
clean_tex_content = re.sub(comment_pattern, '', file_content)
|
||||
# 记录删除注释后的文本
|
||||
pfg.file_paths.append(fp)
|
||||
pfg.file_contents.append(clean_tex_content)
|
||||
|
||||
# <-------- 拆分过长的latex文件 ---------->
|
||||
pfg.run_file_split(max_token_limit=1024)
|
||||
n_split = len(pfg.sp_file_contents)
|
||||
|
||||
# <-------- 抽取摘要 ---------->
|
||||
# if language == 'en':
|
||||
# abs_extract_inputs = f"Please write an abstract for this paper"
|
||||
|
||||
# # 单线,获取文章meta信息
|
||||
# paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
# inputs=abs_extract_inputs,
|
||||
# inputs_show_user=f"正在抽取摘要信息。",
|
||||
# llm_kwargs=llm_kwargs,
|
||||
# chatbot=chatbot, history=[],
|
||||
# sys_prompt="Your job is to collect information from materials。",
|
||||
# )
|
||||
|
||||
# <-------- 多线程润色开始 ---------->
|
||||
if language == 'en->zh':
|
||||
inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||
elif language == 'zh->en':
|
||||
inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=inputs_array,
|
||||
inputs_show_user_array=inputs_show_user_array,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[""] for _ in range(n_split)],
|
||||
sys_prompt_array=sys_prompt_array,
|
||||
# max_workers=5, # OpenAI所允许的最大并行过载
|
||||
scroller_max_len = 80
|
||||
)
|
||||
|
||||
# <-------- 整理结果,退出 ---------->
|
||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
||||
history = gpt_response_collection
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
||||
0
crazy_functions/__init__.py
普通文件
0
crazy_functions/__init__.py
普通文件
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
这是什么?
|
||||
这个文件用于函数插件的单元测试
|
||||
运行方法 python crazy_functions/crazy_functions_test.py
|
||||
"""
|
||||
|
||||
def validate_path():
|
||||
import os, sys
|
||||
dir_name = os.path.dirname(__file__)
|
||||
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
||||
os.chdir(root_dir_assume)
|
||||
sys.path.append(root_dir_assume)
|
||||
|
||||
validate_path() # validate path so you can run from base directory
|
||||
from colorful import *
|
||||
from toolbox import get_conf, ChatBotWithCookies
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
|
||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
|
||||
|
||||
llm_kwargs = {
|
||||
'api_key': API_KEY,
|
||||
'llm_model': LLM_MODEL,
|
||||
'top_p':1.0,
|
||||
'max_length': None,
|
||||
'temperature':1.0,
|
||||
}
|
||||
plugin_kwargs = { }
|
||||
chatbot = ChatBotWithCookies(llm_kwargs)
|
||||
history = []
|
||||
system_prompt = "Serve me as a writing and programming assistant."
|
||||
web_port = 1024
|
||||
|
||||
|
||||
def test_解析一个Python项目():
|
||||
from crazy_functions.解析项目源代码 import 解析一个Python项目
|
||||
txt = "crazy_functions/test_project/python/dqn"
|
||||
for cookies, cb, hist, msg in 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_解析一个Cpp项目():
|
||||
from crazy_functions.解析项目源代码 import 解析一个C项目
|
||||
txt = "crazy_functions/test_project/cpp/cppipc"
|
||||
for cookies, cb, hist, msg in 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_Latex英文润色():
|
||||
from crazy_functions.Latex全文润色 import Latex英文润色
|
||||
txt = "crazy_functions/test_project/latex/attention"
|
||||
for cookies, cb, hist, msg in Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_Markdown中译英():
|
||||
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
||||
txt = "README.md"
|
||||
for cookies, cb, hist, msg in Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_批量翻译PDF文档():
|
||||
from crazy_functions.批量翻译PDF文档_多线程 import 批量翻译PDF文档
|
||||
txt = "crazy_functions/test_project/pdf_and_word"
|
||||
for cookies, cb, hist, msg in 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_谷歌检索小助手():
|
||||
from crazy_functions.谷歌检索小助手 import 谷歌检索小助手
|
||||
txt = "https://scholar.google.com/scholar?hl=en&as_sdt=0%2C5&q=auto+reinforcement+learning&btnG="
|
||||
for cookies, cb, hist, msg in 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_总结word文档():
|
||||
from crazy_functions.总结word文档 import 总结word文档
|
||||
txt = "crazy_functions/test_project/pdf_and_word"
|
||||
for cookies, cb, hist, msg in 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_下载arxiv论文并翻译摘要():
|
||||
from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
|
||||
txt = "1812.10695"
|
||||
for cookies, cb, hist, msg in 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
def test_联网回答问题():
|
||||
from crazy_functions.联网的ChatGPT import 连接网络回答问题
|
||||
# txt = "“我们称之为高效”是什么梗?"
|
||||
# >> 从第0份、第1份、第2份搜索结果可以看出,“我们称之为高效”是指在游戏社区中,用户们用来形容一些游戏策略或行为非常高效且能够带来好的效果的用语。这个用语最初可能是在群星(Stellaris)这个游戏里面流行起来的,后来也传播到了其他游戏中,比如巨像(Titan)等游戏。其中第1份搜索结果中的一篇文章也指出,“我们称之为高效”这 一用语来源于群星(Stellaris)游戏中的一个情节。
|
||||
# txt = "为什么说枪毙P社玩家没有一个冤枉的?"
|
||||
# >> 它们都是关于一个知乎用户所发的帖子,引用了一群游戏玩家对于需要对P社玩家进行枪毙的讨论,这个话题的本质是玩家们对于P 社游戏中的政治与历史元素的不同看法,以及其中不少玩家以极端立场宣扬的想法和言论,因此有人就以枪毙这些玩家来回应此类言论。但是这个话题本身并没有实质内容,只是一个玩笑或者恶搞,并不应该被当做真实的态度或者观点,因此这种说法没有实际意义。
|
||||
# txt = "谁是应急食品?"
|
||||
# >> '根据以上搜索结果可以得知,应急食品是“原神”游戏中的角色派蒙的外号。'
|
||||
# txt = "道路千万条,安全第一条。后面两句是?"
|
||||
# >> '行车不规范,亲人两行泪。'
|
||||
# txt = "What is in the canister?"
|
||||
# >> Rainbow Six Siege 游戏中 Smoke 的 Canister 中装有何种物质相关的官方信息。
|
||||
# txt = "失败的man是什么?"
|
||||
# >> 根据第1份搜索结果,可以得知失败的man是指一位在B站购买了蜘蛛侠COS服后穿上后被网友嘲笑的UP主,而“失败的man”是蜘蛛侠英文名“spiderman”的谐音梗,并且网友们还 给这位UP主起了“苍蝇侠”的外号。因此,失败的man是指这位UP主在穿上蜘蛛侠COS服后被网友嘲笑的情况。
|
||||
# txt = "老六是什么,起源于哪里?"
|
||||
# >> 老六是网络流行语,最初起源于游戏《CSGO》,指游戏中玩家中独来独往、游离于队伍之外的“自由人”或玩得比较菜或者玩得比较阴险的人 ,后来逐渐演变成指玩得比较阴险的玩家。
|
||||
# txt = "罗小黑战记因为什么经常被吐槽?"
|
||||
# >> 3. 更新速度。罗小黑战记的更新时间不定,时而快时而慢,给观众留下了等待的时间过长的印象。
|
||||
# txt = "沙特、伊朗最近的关系如何?"
|
||||
# >> 最近在中国的斡旋下,沙特和伊朗于3月10日达成了恢复两国外交关系的协议,这表明两国关系已经重新回到正常化状态。
|
||||
# txt = "You should have gone for the head. What does that mean?"
|
||||
# >> The phrase "You should have gone for the head" is a quote from the Marvel movies, Avengers: Infinity War and Avengers: Endgame. It was spoken by the character Thanos in Infinity War and by Thor in Endgame.
|
||||
txt = "AutoGPT是什么?"
|
||||
# >> AutoGPT是一个基于GPT-4语言模型的开源应用程序。它可以根据用户需求自主执行任务,包括事件分析、营销方案撰写、代码编程、数学运算等等,并完全不需要用户插手。它可以自己思考,给出实现的步骤和实现细节,甚至可以自问自答执 行任务。最近它在GitHub上爆火,成为了业内最热门的项目之一。
|
||||
# txt = "钟离带什么圣遗物?"
|
||||
for cookies, cb, hist, msg in 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print("当前问答:", cb[-1][-1].replace("\n"," "))
|
||||
for i, it in enumerate(cb): print亮蓝(it[0]); print亮黄(it[1])
|
||||
|
||||
def test_解析ipynb文件():
|
||||
from crazy_functions.解析JupyterNotebook import 解析ipynb文件
|
||||
txt = "crazy_functions/test_samples"
|
||||
for cookies, cb, hist, msg in 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
print(cb)
|
||||
|
||||
|
||||
# test_解析一个Python项目()
|
||||
# test_Latex英文润色()
|
||||
# test_Markdown中译英()
|
||||
# test_批量翻译PDF文档()
|
||||
# test_谷歌检索小助手()
|
||||
# test_总结word文档()
|
||||
# test_下载arxiv论文并翻译摘要()
|
||||
# test_解析一个Cpp项目()
|
||||
# test_联网回答问题()
|
||||
test_解析ipynb文件()
|
||||
|
||||
input("程序完成,回车退出。")
|
||||
print("退出。")
|
||||
565
crazy_functions/crazy_utils.py
普通文件
565
crazy_functions/crazy_utils.py
普通文件
@@ -0,0 +1,565 @@
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc
|
||||
|
||||
def input_clipping(inputs, history, max_token_limit):
|
||||
import numpy as np
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
|
||||
mode = 'input-and-history'
|
||||
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
|
||||
input_token_num = get_token_num(inputs)
|
||||
if input_token_num < max_token_limit//2:
|
||||
mode = 'only-history'
|
||||
max_token_limit = max_token_limit - input_token_num
|
||||
|
||||
everything = [inputs] if mode == 'input-and-history' else ['']
|
||||
everything.extend(history)
|
||||
n_token = get_token_num('\n'.join(everything))
|
||||
everything_token = [get_token_num(e) for e in everything]
|
||||
delta = max(everything_token) // 16 # 截断时的颗粒度
|
||||
|
||||
while n_token > max_token_limit:
|
||||
where = np.argmax(everything_token)
|
||||
encoded = enc.encode(everything[where], disallowed_special=())
|
||||
clipped_encoded = encoded[:len(encoded)-delta]
|
||||
everything[where] = enc.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
|
||||
everything_token[where] = get_token_num(everything[where])
|
||||
n_token = get_token_num('\n'.join(everything))
|
||||
|
||||
if mode == 'input-and-history':
|
||||
inputs = everything[0]
|
||||
else:
|
||||
pass
|
||||
history = everything[1:]
|
||||
return inputs, history
|
||||
|
||||
def request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs, inputs_show_user, llm_kwargs,
|
||||
chatbot, history, sys_prompt, refresh_interval=0.2,
|
||||
handle_token_exceed=True,
|
||||
retry_times_at_unknown_error=2,
|
||||
):
|
||||
"""
|
||||
Request GPT model,请求GPT模型同时维持用户界面活跃。
|
||||
|
||||
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
|
||||
inputs (string): List of inputs (输入)
|
||||
inputs_show_user (string): List of inputs to show user(展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
|
||||
top_p (float): Top p value for sampling from model distribution (GPT参数,浮点数)
|
||||
temperature (float): Temperature value for sampling from model distribution(GPT参数,浮点数)
|
||||
chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
|
||||
history (list): List of chat history (历史,对话历史列表)
|
||||
sys_prompt (string): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
|
||||
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
|
||||
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
|
||||
retry_times_at_unknown_error:失败时的重试次数
|
||||
|
||||
输出 Returns:
|
||||
future: 输出,GPT返回的结果
|
||||
"""
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||
# 用户反馈
|
||||
chatbot.append([inputs_show_user, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
executor = ThreadPoolExecutor(max_workers=16)
|
||||
mutable = ["", time.time(), ""]
|
||||
def _req_gpt(inputs, history, sys_prompt):
|
||||
retry_op = retry_times_at_unknown_error
|
||||
exceeded_cnt = 0
|
||||
while True:
|
||||
# watchdog error
|
||||
if len(mutable) >= 2 and (time.time()-mutable[1]) > 5:
|
||||
raise RuntimeError("检测到程序终止。")
|
||||
try:
|
||||
# 【第一种情况】:顺利完成
|
||||
result = predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs,
|
||||
history=history, sys_prompt=sys_prompt, observe_window=mutable)
|
||||
return result
|
||||
except ConnectionAbortedError as token_exceeded_error:
|
||||
# 【第二种情况】:Token溢出
|
||||
if handle_token_exceed:
|
||||
exceeded_cnt += 1
|
||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||
from toolbox import get_reduce_token_percent
|
||||
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
||||
MAX_TOKEN = 4096
|
||||
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
||||
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
||||
mutable[0] += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
||||
continue # 返回重试
|
||||
else:
|
||||
# 【选择放弃】
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||
return mutable[0] # 放弃
|
||||
except:
|
||||
# 【第三种情况】:其他错误:重试几次
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
print(tb_str)
|
||||
mutable[0] += f"[Local Message] 警告,在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||
if retry_op > 0:
|
||||
retry_op -= 1
|
||||
mutable[0] += f"[Local Message] 重试中,请稍等 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}:\n\n"
|
||||
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
|
||||
time.sleep(30)
|
||||
time.sleep(5)
|
||||
continue # 返回重试
|
||||
else:
|
||||
time.sleep(5)
|
||||
return mutable[0] # 放弃
|
||||
|
||||
# 提交任务
|
||||
future = executor.submit(_req_gpt, inputs, history, sys_prompt)
|
||||
while True:
|
||||
# yield一次以刷新前端页面
|
||||
time.sleep(refresh_interval)
|
||||
# “喂狗”(看门狗)
|
||||
mutable[1] = time.time()
|
||||
if future.done():
|
||||
break
|
||||
chatbot[-1] = [chatbot[-1][0], mutable[0]]
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
|
||||
final_result = future.result()
|
||||
chatbot[-1] = [chatbot[-1][0], final_result]
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 如果最后成功了,则删除报错信息
|
||||
return final_result
|
||||
|
||||
|
||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array, inputs_show_user_array, llm_kwargs,
|
||||
chatbot, history_array, sys_prompt_array,
|
||||
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
|
||||
handle_token_exceed=True, show_user_at_complete=False,
|
||||
retry_times_at_unknown_error=2,
|
||||
):
|
||||
"""
|
||||
Request GPT model using multiple threads with UI and high efficiency
|
||||
请求GPT模型的[多线程]版。
|
||||
具备以下功能:
|
||||
实时在UI上反馈远程数据流
|
||||
使用线程池,可调节线程池的大小避免openai的流量限制错误
|
||||
处理中途中止的情况
|
||||
网络等出问题时,会把traceback和已经接收的数据转入输出
|
||||
|
||||
输入参数 Args (以_array结尾的输入变量都是列表,列表长度为子任务的数量,执行时,会把列表拆解,放到每个子线程中分别执行):
|
||||
inputs_array (list): List of inputs (每个子任务的输入)
|
||||
inputs_show_user_array (list): List of inputs to show user(每个子任务展现在报告中的输入,借助此参数,在汇总报告中隐藏啰嗦的真实输入,增强报告的可读性)
|
||||
llm_kwargs: llm_kwargs参数
|
||||
chatbot: chatbot (用户界面对话窗口句柄,用于数据流可视化)
|
||||
history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史)
|
||||
sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
|
||||
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
|
||||
max_workers (int, optional): Maximum number of threads (default: see config.py) (最大线程数,如果子任务非常多,需要用此选项防止高频地请求openai导致错误)
|
||||
scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果)
|
||||
handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本)
|
||||
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
|
||||
show_user_at_complete (bool, optional): (在结束时,把完整输入-输出结果显示在聊天框)
|
||||
retry_times_at_unknown_error:子任务失败时的重试次数
|
||||
|
||||
输出 Returns:
|
||||
list: List of GPT model responses (每个子任务的输出汇总,如果某个子任务出错,response中会携带traceback报错信息,方便调试和定位问题。)
|
||||
"""
|
||||
import time, random
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||
assert len(inputs_array) == len(history_array)
|
||||
assert len(inputs_array) == len(sys_prompt_array)
|
||||
if max_workers == -1: # 读取配置文件
|
||||
try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
|
||||
except: max_workers = 8
|
||||
if max_workers <= 0: max_workers = 3
|
||||
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
|
||||
if not (llm_kwargs['llm_model'].startswith('gpt-') or llm_kwargs['llm_model'].startswith('api2d-')):
|
||||
max_workers = 1
|
||||
|
||||
executor = ThreadPoolExecutor(max_workers=max_workers)
|
||||
n_frag = len(inputs_array)
|
||||
# 用户反馈
|
||||
chatbot.append(["请开始多线程操作。", ""])
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
# 跨线程传递
|
||||
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
|
||||
|
||||
# 子线程任务
|
||||
def _req_gpt(index, inputs, history, sys_prompt):
|
||||
gpt_say = ""
|
||||
retry_op = retry_times_at_unknown_error
|
||||
exceeded_cnt = 0
|
||||
mutable[index][2] = "执行中"
|
||||
while True:
|
||||
# watchdog error
|
||||
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5:
|
||||
raise RuntimeError("检测到程序终止。")
|
||||
try:
|
||||
# 【第一种情况】:顺利完成
|
||||
# time.sleep(10); raise RuntimeError("测试")
|
||||
gpt_say = predict_no_ui_long_connection(
|
||||
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
||||
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
|
||||
)
|
||||
mutable[index][2] = "已成功"
|
||||
return gpt_say
|
||||
except ConnectionAbortedError as token_exceeded_error:
|
||||
# 【第二种情况】:Token溢出,
|
||||
if handle_token_exceed:
|
||||
exceeded_cnt += 1
|
||||
# 【选择处理】 尝试计算比例,尽可能多地保留文本
|
||||
from toolbox import get_reduce_token_percent
|
||||
p_ratio, n_exceed = get_reduce_token_percent(str(token_exceeded_error))
|
||||
MAX_TOKEN = 4096
|
||||
EXCEED_ALLO = 512 + 512 * exceeded_cnt
|
||||
inputs, history = input_clipping(inputs, history, max_token_limit=MAX_TOKEN-EXCEED_ALLO)
|
||||
gpt_say += f'[Local Message] 警告,文本过长将进行截断,Token溢出数:{n_exceed}。\n\n'
|
||||
mutable[index][2] = f"截断重试"
|
||||
continue # 返回重试
|
||||
else:
|
||||
# 【选择放弃】
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
|
||||
mutable[index][2] = "输入过长已放弃"
|
||||
return gpt_say # 放弃
|
||||
except:
|
||||
# 【第三种情况】:其他错误
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
print(tb_str)
|
||||
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
||||
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
|
||||
if retry_op > 0:
|
||||
retry_op -= 1
|
||||
wait = random.randint(5, 20)
|
||||
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
|
||||
wait = wait * 3
|
||||
fail_info = "OpenAI绑定信用卡可解除频率限制 "
|
||||
else:
|
||||
fail_info = ""
|
||||
# 也许等待十几秒后,情况会好转
|
||||
for i in range(wait):
|
||||
mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
|
||||
# 开始重试
|
||||
mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
|
||||
continue # 返回重试
|
||||
else:
|
||||
mutable[index][2] = "已失败"
|
||||
wait = 5
|
||||
time.sleep(5)
|
||||
return gpt_say # 放弃
|
||||
|
||||
# 异步任务开始
|
||||
futures = [executor.submit(_req_gpt, index, inputs, history, sys_prompt) for index, inputs, history, sys_prompt in zip(
|
||||
range(len(inputs_array)), inputs_array, history_array, sys_prompt_array)]
|
||||
cnt = 0
|
||||
while True:
|
||||
# yield一次以刷新前端页面
|
||||
time.sleep(refresh_interval)
|
||||
cnt += 1
|
||||
worker_done = [h.done() for h in futures]
|
||||
if all(worker_done):
|
||||
executor.shutdown()
|
||||
break
|
||||
# 更好的UI视觉效果
|
||||
observe_win = []
|
||||
# 每个线程都要“喂狗”(看门狗)
|
||||
for thread_index, _ in enumerate(worker_done):
|
||||
mutable[thread_index][1] = time.time()
|
||||
# 在前端打印些好玩的东西
|
||||
for thread_index, _ in enumerate(worker_done):
|
||||
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
|
||||
replace('\n', '').replace('```', '...').replace(
|
||||
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
||||
observe_win.append(print_something_really_funny)
|
||||
# 在前端打印些好玩的东西
|
||||
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
||||
if not done else f'`{mutable[thread_index][2]}`\n\n'
|
||||
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
|
||||
# 在前端打印些好玩的东西
|
||||
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
|
||||
# 异步任务结束
|
||||
gpt_response_collection = []
|
||||
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
||||
gpt_res = f.result()
|
||||
gpt_response_collection.extend([inputs_show_user, gpt_res])
|
||||
|
||||
# 是否在结束时,在界面上显示结果
|
||||
if show_user_at_complete:
|
||||
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
||||
gpt_res = f.result()
|
||||
chatbot.append([inputs_show_user, gpt_res])
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
time.sleep(0.3)
|
||||
return gpt_response_collection
|
||||
|
||||
|
||||
def breakdown_txt_to_satisfy_token_limit(txt, get_token_fn, limit):
|
||||
def cut(txt_tocut, must_break_at_empty_line): # 递归
|
||||
if get_token_fn(txt_tocut) <= limit:
|
||||
return [txt_tocut]
|
||||
else:
|
||||
lines = txt_tocut.split('\n')
|
||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
||||
estimated_line_cut = int(estimated_line_cut)
|
||||
for cnt in reversed(range(estimated_line_cut)):
|
||||
if must_break_at_empty_line:
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
print(cnt)
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
if cnt == 0:
|
||||
raise RuntimeError("存在一行极长的文本!")
|
||||
# print(len(post))
|
||||
# 列表递归接龙
|
||||
result = [prev]
|
||||
result.extend(cut(post, must_break_at_empty_line))
|
||||
return result
|
||||
try:
|
||||
return cut(txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
return cut(txt, must_break_at_empty_line=False)
|
||||
|
||||
|
||||
def force_breakdown(txt, limit, get_token_fn):
|
||||
"""
|
||||
当无法用标点、空行分割时,我们用最暴力的方法切割
|
||||
"""
|
||||
for i in reversed(range(len(txt))):
|
||||
if get_token_fn(txt[:i]) < limit:
|
||||
return txt[:i], txt[i:]
|
||||
return "Tiktoken未知错误", "Tiktoken未知错误"
|
||||
|
||||
def breakdown_txt_to_satisfy_token_limit_for_pdf(txt, get_token_fn, limit):
|
||||
# 递归
|
||||
def cut(txt_tocut, must_break_at_empty_line, break_anyway=False):
|
||||
if get_token_fn(txt_tocut) <= limit:
|
||||
return [txt_tocut]
|
||||
else:
|
||||
lines = txt_tocut.split('\n')
|
||||
estimated_line_cut = limit / get_token_fn(txt_tocut) * len(lines)
|
||||
estimated_line_cut = int(estimated_line_cut)
|
||||
cnt = 0
|
||||
for cnt in reversed(range(estimated_line_cut)):
|
||||
if must_break_at_empty_line:
|
||||
if lines[cnt] != "":
|
||||
continue
|
||||
prev = "\n".join(lines[:cnt])
|
||||
post = "\n".join(lines[cnt:])
|
||||
if get_token_fn(prev) < limit:
|
||||
break
|
||||
if cnt == 0:
|
||||
if break_anyway:
|
||||
prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
|
||||
else:
|
||||
raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
|
||||
# print(len(post))
|
||||
# 列表递归接龙
|
||||
result = [prev]
|
||||
result.extend(cut(post, must_break_at_empty_line, break_anyway=break_anyway))
|
||||
return result
|
||||
try:
|
||||
# 第1次尝试,将双空行(\n\n)作为切分点
|
||||
return cut(txt, must_break_at_empty_line=True)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第2次尝试,将单空行(\n)作为切分点
|
||||
return cut(txt, must_break_at_empty_line=False)
|
||||
except RuntimeError:
|
||||
try:
|
||||
# 第3次尝试,将英文句号(.)作为切分点
|
||||
res = cut(txt.replace('.', '。\n'), must_break_at_empty_line=False) # 这个中文的句号是故意的,作为一个标识而存在
|
||||
return [r.replace('。\n', '.') for r in res]
|
||||
except RuntimeError as e:
|
||||
try:
|
||||
# 第4次尝试,将中文句号(。)作为切分点
|
||||
res = cut(txt.replace('。', '。。\n'), must_break_at_empty_line=False)
|
||||
return [r.replace('。。\n', '。') for r in res]
|
||||
except RuntimeError as e:
|
||||
# 第5次尝试,没办法了,随便切一下敷衍吧
|
||||
return cut(txt, must_break_at_empty_line=False, break_anyway=True)
|
||||
|
||||
|
||||
|
||||
def read_and_clean_pdf_text(fp):
|
||||
"""
|
||||
这个函数用于分割pdf,用了很多trick,逻辑较乱,效果奇好
|
||||
|
||||
**输入参数说明**
|
||||
- `fp`:需要读取和清理文本的pdf文件路径
|
||||
|
||||
**输出参数说明**
|
||||
- `meta_txt`:清理后的文本内容字符串
|
||||
- `page_one_meta`:第一页清理后的文本内容列表
|
||||
|
||||
**函数功能**
|
||||
读取pdf文件并清理其中的文本内容,清理规则包括:
|
||||
- 提取所有块元的文本信息,并合并为一个字符串
|
||||
- 去除短块(字符数小于100)并替换为回车符
|
||||
- 清理多余的空行
|
||||
- 合并小写字母开头的段落块并替换为空格
|
||||
- 清除重复的换行
|
||||
- 将每个换行符替换为两个换行符,使每个段落之间有两个换行符分隔
|
||||
"""
|
||||
import fitz, copy
|
||||
import re
|
||||
import numpy as np
|
||||
from colorful import print亮黄, print亮绿
|
||||
fc = 0 # Index 0 文本
|
||||
fs = 1 # Index 1 字体
|
||||
fb = 2 # Index 2 框框
|
||||
REMOVE_FOOT_NOTE = True # 是否丢弃掉 不是正文的内容 (比正文字体小,如参考文献、脚注、图注等)
|
||||
REMOVE_FOOT_FFSIZE_PERCENT = 0.95 # 小于正文的?时,判定为不是正文(有些文章的正文部分字体大小不是100%统一的,有肉眼不可见的小变化)
|
||||
def primary_ffsize(l):
|
||||
"""
|
||||
提取文本块主字体
|
||||
"""
|
||||
fsize_statiscs = {}
|
||||
for wtf in l['spans']:
|
||||
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
|
||||
fsize_statiscs[wtf['size']] += len(wtf['text'])
|
||||
return max(fsize_statiscs, key=fsize_statiscs.get)
|
||||
|
||||
def ffsize_same(a,b):
|
||||
"""
|
||||
提取字体大小是否近似相等
|
||||
"""
|
||||
return abs((a-b)/max(a,b)) < 0.02
|
||||
|
||||
with fitz.open(fp) as doc:
|
||||
meta_txt = []
|
||||
meta_font = []
|
||||
|
||||
meta_line = []
|
||||
meta_span = []
|
||||
############################## <第 1 步,搜集初始信息> ##################################
|
||||
for index, page in enumerate(doc):
|
||||
# file_content += page.get_text()
|
||||
text_areas = page.get_text("dict") # 获取页面上的文本信息
|
||||
for t in text_areas['blocks']:
|
||||
if 'lines' in t:
|
||||
pf = 998
|
||||
for l in t['lines']:
|
||||
txt_line = "".join([wtf['text'] for wtf in l['spans']])
|
||||
if len(txt_line) == 0: continue
|
||||
pf = primary_ffsize(l)
|
||||
meta_line.append([txt_line, pf, l['bbox'], l])
|
||||
for wtf in l['spans']: # for l in t['lines']:
|
||||
meta_span.append([wtf['text'], wtf['size'], len(wtf['text'])])
|
||||
# meta_line.append(["NEW_BLOCK", pf])
|
||||
# 块元提取 for each word segment with in line for each line cross-line words for each block
|
||||
meta_txt.extend([" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
||||
'- ', '') for t in text_areas['blocks'] if 'lines' in t])
|
||||
meta_font.extend([np.mean([np.mean([wtf['size'] for wtf in l['spans']])
|
||||
for l in t['lines']]) for t in text_areas['blocks'] if 'lines' in t])
|
||||
if index == 0:
|
||||
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
||||
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
|
||||
|
||||
############################## <第 2 步,获取正文主字体> ##################################
|
||||
fsize_statiscs = {}
|
||||
for span in meta_span:
|
||||
if span[1] not in fsize_statiscs: fsize_statiscs[span[1]] = 0
|
||||
fsize_statiscs[span[1]] += span[2]
|
||||
main_fsize = max(fsize_statiscs, key=fsize_statiscs.get)
|
||||
if REMOVE_FOOT_NOTE:
|
||||
give_up_fize_threshold = main_fsize * REMOVE_FOOT_FFSIZE_PERCENT
|
||||
|
||||
############################## <第 3 步,切分和重新整合> ##################################
|
||||
mega_sec = []
|
||||
sec = []
|
||||
for index, line in enumerate(meta_line):
|
||||
if index == 0:
|
||||
sec.append(line[fc])
|
||||
continue
|
||||
if REMOVE_FOOT_NOTE:
|
||||
if meta_line[index][fs] <= give_up_fize_threshold:
|
||||
continue
|
||||
if ffsize_same(meta_line[index][fs], meta_line[index-1][fs]):
|
||||
# 尝试识别段落
|
||||
if meta_line[index][fc].endswith('.') and\
|
||||
(meta_line[index-1][fc] != 'NEW_BLOCK') and \
|
||||
(meta_line[index][fb][2] - meta_line[index][fb][0]) < (meta_line[index-1][fb][2] - meta_line[index-1][fb][0]) * 0.7:
|
||||
sec[-1] += line[fc]
|
||||
sec[-1] += "\n\n"
|
||||
else:
|
||||
sec[-1] += " "
|
||||
sec[-1] += line[fc]
|
||||
else:
|
||||
if (index+1 < len(meta_line)) and \
|
||||
meta_line[index][fs] > main_fsize:
|
||||
# 单行 + 字体大
|
||||
mega_sec.append(copy.deepcopy(sec))
|
||||
sec = []
|
||||
sec.append("# " + line[fc])
|
||||
else:
|
||||
# 尝试识别section
|
||||
if meta_line[index-1][fs] > meta_line[index][fs]:
|
||||
sec.append("\n" + line[fc])
|
||||
else:
|
||||
sec.append(line[fc])
|
||||
mega_sec.append(copy.deepcopy(sec))
|
||||
|
||||
finals = []
|
||||
for ms in mega_sec:
|
||||
final = " ".join(ms)
|
||||
final = final.replace('- ', ' ')
|
||||
finals.append(final)
|
||||
meta_txt = finals
|
||||
|
||||
############################## <第 4 步,乱七八糟的后处理> ##################################
|
||||
def 把字符太少的块清除为回车(meta_txt):
|
||||
for index, block_txt in enumerate(meta_txt):
|
||||
if len(block_txt) < 100:
|
||||
meta_txt[index] = '\n'
|
||||
return meta_txt
|
||||
meta_txt = 把字符太少的块清除为回车(meta_txt)
|
||||
|
||||
def 清理多余的空行(meta_txt):
|
||||
for index in reversed(range(1, len(meta_txt))):
|
||||
if meta_txt[index] == '\n' and meta_txt[index-1] == '\n':
|
||||
meta_txt.pop(index)
|
||||
return meta_txt
|
||||
meta_txt = 清理多余的空行(meta_txt)
|
||||
|
||||
def 合并小写开头的段落块(meta_txt):
|
||||
def starts_with_lowercase_word(s):
|
||||
pattern = r"^[a-z]+"
|
||||
match = re.match(pattern, s)
|
||||
if match:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
for _ in range(100):
|
||||
for index, block_txt in enumerate(meta_txt):
|
||||
if starts_with_lowercase_word(block_txt):
|
||||
if meta_txt[index-1] != '\n':
|
||||
meta_txt[index-1] += ' '
|
||||
else:
|
||||
meta_txt[index-1] = ''
|
||||
meta_txt[index-1] += meta_txt[index]
|
||||
meta_txt[index] = '\n'
|
||||
return meta_txt
|
||||
meta_txt = 合并小写开头的段落块(meta_txt)
|
||||
meta_txt = 清理多余的空行(meta_txt)
|
||||
|
||||
meta_txt = '\n'.join(meta_txt)
|
||||
# 清除重复的换行
|
||||
for _ in range(5):
|
||||
meta_txt = meta_txt.replace('\n\n', '\n')
|
||||
|
||||
# 换行 -> 双换行
|
||||
meta_txt = meta_txt.replace('\n', '\n\n')
|
||||
|
||||
############################## <第 5 步,展示分割效果> ##################################
|
||||
# for f in finals:
|
||||
# print亮黄(f)
|
||||
# print亮绿('***************************')
|
||||
|
||||
return meta_txt, page_one_meta
|
||||
@@ -0,0 +1,87 @@
|
||||
#include "libipc/buffer.h"
|
||||
#include "libipc/utility/pimpl.h"
|
||||
|
||||
#include <cstring>
|
||||
|
||||
namespace ipc {
|
||||
|
||||
bool operator==(buffer const & b1, buffer const & b2) {
|
||||
return (b1.size() == b2.size()) && (std::memcmp(b1.data(), b2.data(), b1.size()) == 0);
|
||||
}
|
||||
|
||||
bool operator!=(buffer const & b1, buffer const & b2) {
|
||||
return !(b1 == b2);
|
||||
}
|
||||
|
||||
class buffer::buffer_ : public pimpl<buffer_> {
|
||||
public:
|
||||
void* p_;
|
||||
std::size_t s_;
|
||||
void* a_;
|
||||
buffer::destructor_t d_;
|
||||
|
||||
buffer_(void* p, std::size_t s, buffer::destructor_t d, void* a)
|
||||
: p_(p), s_(s), a_(a), d_(d) {
|
||||
}
|
||||
|
||||
~buffer_() {
|
||||
if (d_ == nullptr) return;
|
||||
d_((a_ == nullptr) ? p_ : a_, s_);
|
||||
}
|
||||
};
|
||||
|
||||
buffer::buffer()
|
||||
: buffer(nullptr, 0, nullptr, nullptr) {
|
||||
}
|
||||
|
||||
buffer::buffer(void* p, std::size_t s, destructor_t d)
|
||||
: p_(p_->make(p, s, d, nullptr)) {
|
||||
}
|
||||
|
||||
buffer::buffer(void* p, std::size_t s, destructor_t d, void* additional)
|
||||
: p_(p_->make(p, s, d, additional)) {
|
||||
}
|
||||
|
||||
buffer::buffer(void* p, std::size_t s)
|
||||
: buffer(p, s, nullptr) {
|
||||
}
|
||||
|
||||
buffer::buffer(char const & c)
|
||||
: buffer(const_cast<char*>(&c), 1) {
|
||||
}
|
||||
|
||||
buffer::buffer(buffer&& rhs)
|
||||
: buffer() {
|
||||
swap(rhs);
|
||||
}
|
||||
|
||||
buffer::~buffer() {
|
||||
p_->clear();
|
||||
}
|
||||
|
||||
void buffer::swap(buffer& rhs) {
|
||||
std::swap(p_, rhs.p_);
|
||||
}
|
||||
|
||||
buffer& buffer::operator=(buffer rhs) {
|
||||
swap(rhs);
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool buffer::empty() const noexcept {
|
||||
return (impl(p_)->p_ == nullptr) || (impl(p_)->s_ == 0);
|
||||
}
|
||||
|
||||
void* buffer::data() noexcept {
|
||||
return impl(p_)->p_;
|
||||
}
|
||||
|
||||
void const * buffer::data() const noexcept {
|
||||
return impl(p_)->p_;
|
||||
}
|
||||
|
||||
std::size_t buffer::size() const noexcept {
|
||||
return impl(p_)->s_;
|
||||
}
|
||||
|
||||
} // namespace ipc
|
||||
@@ -0,0 +1,701 @@
|
||||
|
||||
#include <type_traits>
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <utility> // std::pair, std::move, std::forward
|
||||
#include <atomic>
|
||||
#include <type_traits> // aligned_storage_t
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <array>
|
||||
#include <cassert>
|
||||
|
||||
#include "libipc/ipc.h"
|
||||
#include "libipc/def.h"
|
||||
#include "libipc/shm.h"
|
||||
#include "libipc/pool_alloc.h"
|
||||
#include "libipc/queue.h"
|
||||
#include "libipc/policy.h"
|
||||
#include "libipc/rw_lock.h"
|
||||
#include "libipc/waiter.h"
|
||||
|
||||
#include "libipc/utility/log.h"
|
||||
#include "libipc/utility/id_pool.h"
|
||||
#include "libipc/utility/scope_guard.h"
|
||||
#include "libipc/utility/utility.h"
|
||||
|
||||
#include "libipc/memory/resource.h"
|
||||
#include "libipc/platform/detail.h"
|
||||
#include "libipc/circ/elem_array.h"
|
||||
|
||||
namespace {
|
||||
|
||||
using msg_id_t = std::uint32_t;
|
||||
using acc_t = std::atomic<msg_id_t>;
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct msg_t;
|
||||
|
||||
template <std::size_t AlignSize>
|
||||
struct msg_t<0, AlignSize> {
|
||||
msg_id_t cc_id_;
|
||||
msg_id_t id_;
|
||||
std::int32_t remain_;
|
||||
bool storage_;
|
||||
};
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct msg_t : msg_t<0, AlignSize> {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
|
||||
msg_t() = default;
|
||||
msg_t(msg_id_t cc_id, msg_id_t id, std::int32_t remain, void const * data, std::size_t size)
|
||||
: msg_t<0, AlignSize> {cc_id, id, remain, (data == nullptr) || (size == 0)} {
|
||||
if (this->storage_) {
|
||||
if (data != nullptr) {
|
||||
// copy storage-id
|
||||
*reinterpret_cast<ipc::storage_id_t*>(&data_) =
|
||||
*static_cast<ipc::storage_id_t const *>(data);
|
||||
}
|
||||
}
|
||||
else std::memcpy(&data_, data, size);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
ipc::buff_t make_cache(T& data, std::size_t size) {
|
||||
auto ptr = ipc::mem::alloc(size);
|
||||
std::memcpy(ptr, &data, (ipc::detail::min)(sizeof(data), size));
|
||||
return { ptr, size, ipc::mem::free };
|
||||
}
|
||||
|
||||
struct cache_t {
|
||||
std::size_t fill_;
|
||||
ipc::buff_t buff_;
|
||||
|
||||
cache_t(std::size_t f, ipc::buff_t && b)
|
||||
: fill_(f), buff_(std::move(b))
|
||||
{}
|
||||
|
||||
void append(void const * data, std::size_t size) {
|
||||
if (fill_ >= buff_.size() || data == nullptr || size == 0) return;
|
||||
auto new_fill = (ipc::detail::min)(fill_ + size, buff_.size());
|
||||
std::memcpy(static_cast<ipc::byte_t*>(buff_.data()) + fill_, data, new_fill - fill_);
|
||||
fill_ = new_fill;
|
||||
}
|
||||
};
|
||||
|
||||
auto cc_acc() {
|
||||
static ipc::shm::handle acc_h("__CA_CONN__", sizeof(acc_t));
|
||||
return static_cast<acc_t*>(acc_h.get());
|
||||
}
|
||||
|
||||
IPC_CONSTEXPR_ std::size_t align_chunk_size(std::size_t size) noexcept {
|
||||
return (((size - 1) / ipc::large_msg_align) + 1) * ipc::large_msg_align;
|
||||
}
|
||||
|
||||
IPC_CONSTEXPR_ std::size_t calc_chunk_size(std::size_t size) noexcept {
|
||||
return ipc::make_align(alignof(std::max_align_t), align_chunk_size(
|
||||
ipc::make_align(alignof(std::max_align_t), sizeof(std::atomic<ipc::circ::cc_t>)) + size));
|
||||
}
|
||||
|
||||
struct chunk_t {
|
||||
std::atomic<ipc::circ::cc_t> &conns() noexcept {
|
||||
return *reinterpret_cast<std::atomic<ipc::circ::cc_t> *>(this);
|
||||
}
|
||||
|
||||
void *data() noexcept {
|
||||
return reinterpret_cast<ipc::byte_t *>(this)
|
||||
+ ipc::make_align(alignof(std::max_align_t), sizeof(std::atomic<ipc::circ::cc_t>));
|
||||
}
|
||||
};
|
||||
|
||||
struct chunk_info_t {
|
||||
ipc::id_pool<> pool_;
|
||||
ipc::spin_lock lock_;
|
||||
|
||||
IPC_CONSTEXPR_ static std::size_t chunks_mem_size(std::size_t chunk_size) noexcept {
|
||||
return ipc::id_pool<>::max_count * chunk_size;
|
||||
}
|
||||
|
||||
ipc::byte_t *chunks_mem() noexcept {
|
||||
return reinterpret_cast<ipc::byte_t *>(this + 1);
|
||||
}
|
||||
|
||||
chunk_t *at(std::size_t chunk_size, ipc::storage_id_t id) noexcept {
|
||||
if (id < 0) return nullptr;
|
||||
return reinterpret_cast<chunk_t *>(chunks_mem() + (chunk_size * id));
|
||||
}
|
||||
};
|
||||
|
||||
auto& chunk_storages() {
|
||||
class chunk_handle_t {
|
||||
ipc::shm::handle handle_;
|
||||
|
||||
public:
|
||||
chunk_info_t *get_info(std::size_t chunk_size) {
|
||||
if (!handle_.valid() &&
|
||||
!handle_.acquire( ("__CHUNK_INFO__" + ipc::to_string(chunk_size)).c_str(),
|
||||
sizeof(chunk_info_t) + chunk_info_t::chunks_mem_size(chunk_size) )) {
|
||||
ipc::error("[chunk_storages] chunk_shm.id_info_.acquire failed: chunk_size = %zd\n", chunk_size);
|
||||
return nullptr;
|
||||
}
|
||||
auto info = static_cast<chunk_info_t*>(handle_.get());
|
||||
if (info == nullptr) {
|
||||
ipc::error("[chunk_storages] chunk_shm.id_info_.get failed: chunk_size = %zd\n", chunk_size);
|
||||
return nullptr;
|
||||
}
|
||||
return info;
|
||||
}
|
||||
};
|
||||
static ipc::map<std::size_t, chunk_handle_t> chunk_hs;
|
||||
return chunk_hs;
|
||||
}
|
||||
|
||||
chunk_info_t *chunk_storage_info(std::size_t chunk_size) {
|
||||
auto &storages = chunk_storages();
|
||||
std::decay_t<decltype(storages)>::iterator it;
|
||||
{
|
||||
static ipc::rw_lock lock;
|
||||
IPC_UNUSED_ std::shared_lock<ipc::rw_lock> guard {lock};
|
||||
if ((it = storages.find(chunk_size)) == storages.end()) {
|
||||
using chunk_handle_t = std::decay_t<decltype(storages)>::value_type::second_type;
|
||||
guard.unlock();
|
||||
IPC_UNUSED_ std::lock_guard<ipc::rw_lock> guard {lock};
|
||||
it = storages.emplace(chunk_size, chunk_handle_t{}).first;
|
||||
}
|
||||
}
|
||||
return it->second.get_info(chunk_size);
|
||||
}
|
||||
|
||||
std::pair<ipc::storage_id_t, void*> acquire_storage(std::size_t size, ipc::circ::cc_t conns) {
|
||||
std::size_t chunk_size = calc_chunk_size(size);
|
||||
auto info = chunk_storage_info(chunk_size);
|
||||
if (info == nullptr) return {};
|
||||
|
||||
info->lock_.lock();
|
||||
info->pool_.prepare();
|
||||
// got an unique id
|
||||
auto id = info->pool_.acquire();
|
||||
info->lock_.unlock();
|
||||
|
||||
auto chunk = info->at(chunk_size, id);
|
||||
if (chunk == nullptr) return {};
|
||||
chunk->conns().store(conns, std::memory_order_relaxed);
|
||||
return { id, chunk->data() };
|
||||
}
|
||||
|
||||
void *find_storage(ipc::storage_id_t id, std::size_t size) {
|
||||
if (id < 0) {
|
||||
ipc::error("[find_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
|
||||
return nullptr;
|
||||
}
|
||||
std::size_t chunk_size = calc_chunk_size(size);
|
||||
auto info = chunk_storage_info(chunk_size);
|
||||
if (info == nullptr) return nullptr;
|
||||
return info->at(chunk_size, id)->data();
|
||||
}
|
||||
|
||||
void release_storage(ipc::storage_id_t id, std::size_t size) {
|
||||
if (id < 0) {
|
||||
ipc::error("[release_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
|
||||
return;
|
||||
}
|
||||
std::size_t chunk_size = calc_chunk_size(size);
|
||||
auto info = chunk_storage_info(chunk_size);
|
||||
if (info == nullptr) return;
|
||||
info->lock_.lock();
|
||||
info->pool_.release(id);
|
||||
info->lock_.unlock();
|
||||
}
|
||||
|
||||
template <ipc::relat Rp, ipc::relat Rc>
|
||||
bool sub_rc(ipc::wr<Rp, Rc, ipc::trans::unicast>,
|
||||
std::atomic<ipc::circ::cc_t> &/*conns*/, ipc::circ::cc_t /*curr_conns*/, ipc::circ::cc_t /*conn_id*/) noexcept {
|
||||
return true;
|
||||
}
|
||||
|
||||
template <ipc::relat Rp, ipc::relat Rc>
|
||||
bool sub_rc(ipc::wr<Rp, Rc, ipc::trans::broadcast>,
|
||||
std::atomic<ipc::circ::cc_t> &conns, ipc::circ::cc_t curr_conns, ipc::circ::cc_t conn_id) noexcept {
|
||||
auto last_conns = curr_conns & ~conn_id;
|
||||
for (unsigned k = 0;;) {
|
||||
auto chunk_conns = conns.load(std::memory_order_acquire);
|
||||
if (conns.compare_exchange_weak(chunk_conns, chunk_conns & last_conns, std::memory_order_release)) {
|
||||
return (chunk_conns & last_conns) == 0;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
void recycle_storage(ipc::storage_id_t id, std::size_t size, ipc::circ::cc_t curr_conns, ipc::circ::cc_t conn_id) {
|
||||
if (id < 0) {
|
||||
ipc::error("[recycle_storage] id is invalid: id = %ld, size = %zd\n", (long)id, size);
|
||||
return;
|
||||
}
|
||||
std::size_t chunk_size = calc_chunk_size(size);
|
||||
auto info = chunk_storage_info(chunk_size);
|
||||
if (info == nullptr) return;
|
||||
|
||||
auto chunk = info->at(chunk_size, id);
|
||||
if (chunk == nullptr) return;
|
||||
|
||||
if (!sub_rc(Flag{}, chunk->conns(), curr_conns, conn_id)) {
|
||||
return;
|
||||
}
|
||||
info->lock_.lock();
|
||||
info->pool_.release(id);
|
||||
info->lock_.unlock();
|
||||
}
|
||||
|
||||
template <typename MsgT>
|
||||
bool clear_message(void* p) {
|
||||
auto msg = static_cast<MsgT*>(p);
|
||||
if (msg->storage_) {
|
||||
std::int32_t r_size = static_cast<std::int32_t>(ipc::data_length) + msg->remain_;
|
||||
if (r_size <= 0) {
|
||||
ipc::error("[clear_message] invalid msg size: %d\n", (int)r_size);
|
||||
return true;
|
||||
}
|
||||
release_storage(
|
||||
*reinterpret_cast<ipc::storage_id_t*>(&msg->data_),
|
||||
static_cast<std::size_t>(r_size));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
struct conn_info_head {
|
||||
|
||||
ipc::string name_;
|
||||
msg_id_t cc_id_; // connection-info id
|
||||
ipc::detail::waiter cc_waiter_, wt_waiter_, rd_waiter_;
|
||||
ipc::shm::handle acc_h_;
|
||||
|
||||
conn_info_head(char const * name)
|
||||
: name_ {name}
|
||||
, cc_id_ {(cc_acc() == nullptr) ? 0 : cc_acc()->fetch_add(1, std::memory_order_relaxed)}
|
||||
, cc_waiter_{("__CC_CONN__" + name_).c_str()}
|
||||
, wt_waiter_{("__WT_CONN__" + name_).c_str()}
|
||||
, rd_waiter_{("__RD_CONN__" + name_).c_str()}
|
||||
, acc_h_ {("__AC_CONN__" + name_).c_str(), sizeof(acc_t)} {
|
||||
}
|
||||
|
||||
void quit_waiting() {
|
||||
cc_waiter_.quit_waiting();
|
||||
wt_waiter_.quit_waiting();
|
||||
rd_waiter_.quit_waiting();
|
||||
}
|
||||
|
||||
auto acc() {
|
||||
return static_cast<acc_t*>(acc_h_.get());
|
||||
}
|
||||
|
||||
auto& recv_cache() {
|
||||
thread_local ipc::unordered_map<msg_id_t, cache_t> tls;
|
||||
return tls;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename W, typename F>
|
||||
bool wait_for(W& waiter, F&& pred, std::uint64_t tm) {
|
||||
if (tm == 0) return !pred();
|
||||
for (unsigned k = 0; pred();) {
|
||||
bool ret = true;
|
||||
ipc::sleep(k, [&k, &ret, &waiter, &pred, tm] {
|
||||
ret = waiter.wait_if(std::forward<F>(pred), tm);
|
||||
k = 0;
|
||||
});
|
||||
if (!ret) return false; // timeout or fail
|
||||
if (k == 0) break; // k has been reset
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename Policy,
|
||||
std::size_t DataSize = ipc::data_length,
|
||||
std::size_t AlignSize = (ipc::detail::min)(DataSize, alignof(std::max_align_t))>
|
||||
struct queue_generator {
|
||||
|
||||
using queue_t = ipc::queue<msg_t<DataSize, AlignSize>, Policy>;
|
||||
|
||||
struct conn_info_t : conn_info_head {
|
||||
queue_t que_;
|
||||
|
||||
conn_info_t(char const * name)
|
||||
: conn_info_head{name}
|
||||
, que_{("__QU_CONN__" +
|
||||
ipc::to_string(DataSize) + "__" +
|
||||
ipc::to_string(AlignSize) + "__" + name).c_str()} {
|
||||
}
|
||||
|
||||
void disconnect_receiver() {
|
||||
bool dis = que_.disconnect();
|
||||
this->quit_waiting();
|
||||
if (dis) {
|
||||
this->recv_cache().clear();
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
template <typename Policy>
|
||||
struct detail_impl {
|
||||
|
||||
using policy_t = Policy;
|
||||
using flag_t = typename policy_t::flag_t;
|
||||
using queue_t = typename queue_generator<policy_t>::queue_t;
|
||||
using conn_info_t = typename queue_generator<policy_t>::conn_info_t;
|
||||
|
||||
constexpr static conn_info_t* info_of(ipc::handle_t h) noexcept {
|
||||
return static_cast<conn_info_t*>(h);
|
||||
}
|
||||
|
||||
constexpr static queue_t* queue_of(ipc::handle_t h) noexcept {
|
||||
return (info_of(h) == nullptr) ? nullptr : &(info_of(h)->que_);
|
||||
}
|
||||
|
||||
/* API implementations */
|
||||
|
||||
static void disconnect(ipc::handle_t h) {
|
||||
auto que = queue_of(h);
|
||||
if (que == nullptr) {
|
||||
return;
|
||||
}
|
||||
que->shut_sending();
|
||||
assert(info_of(h) != nullptr);
|
||||
info_of(h)->disconnect_receiver();
|
||||
}
|
||||
|
||||
static bool reconnect(ipc::handle_t * ph, bool start_to_recv) {
|
||||
assert(ph != nullptr);
|
||||
assert(*ph != nullptr);
|
||||
auto que = queue_of(*ph);
|
||||
if (que == nullptr) {
|
||||
return false;
|
||||
}
|
||||
if (start_to_recv) {
|
||||
que->shut_sending();
|
||||
if (que->connect()) { // wouldn't connect twice
|
||||
info_of(*ph)->cc_waiter_.broadcast();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// start_to_recv == false
|
||||
if (que->connected()) {
|
||||
info_of(*ph)->disconnect_receiver();
|
||||
}
|
||||
return que->ready_sending();
|
||||
}
|
||||
|
||||
static bool connect(ipc::handle_t * ph, char const * name, bool start_to_recv) {
|
||||
assert(ph != nullptr);
|
||||
if (*ph == nullptr) {
|
||||
*ph = ipc::mem::alloc<conn_info_t>(name);
|
||||
}
|
||||
return reconnect(ph, start_to_recv);
|
||||
}
|
||||
|
||||
static void destroy(ipc::handle_t h) {
|
||||
disconnect(h);
|
||||
ipc::mem::free(info_of(h));
|
||||
}
|
||||
|
||||
static std::size_t recv_count(ipc::handle_t h) noexcept {
|
||||
auto que = queue_of(h);
|
||||
if (que == nullptr) {
|
||||
return ipc::invalid_value;
|
||||
}
|
||||
return que->conn_count();
|
||||
}
|
||||
|
||||
static bool wait_for_recv(ipc::handle_t h, std::size_t r_count, std::uint64_t tm) {
|
||||
auto que = queue_of(h);
|
||||
if (que == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return wait_for(info_of(h)->cc_waiter_, [que, r_count] {
|
||||
return que->conn_count() < r_count;
|
||||
}, tm);
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
static bool send(F&& gen_push, ipc::handle_t h, void const * data, std::size_t size) {
|
||||
if (data == nullptr || size == 0) {
|
||||
ipc::error("fail: send(%p, %zd)\n", data, size);
|
||||
return false;
|
||||
}
|
||||
auto que = queue_of(h);
|
||||
if (que == nullptr) {
|
||||
ipc::error("fail: send, queue_of(h) == nullptr\n");
|
||||
return false;
|
||||
}
|
||||
if (que->elems() == nullptr) {
|
||||
ipc::error("fail: send, queue_of(h)->elems() == nullptr\n");
|
||||
return false;
|
||||
}
|
||||
if (!que->ready_sending()) {
|
||||
ipc::error("fail: send, que->ready_sending() == false\n");
|
||||
return false;
|
||||
}
|
||||
ipc::circ::cc_t conns = que->elems()->connections(std::memory_order_relaxed);
|
||||
if (conns == 0) {
|
||||
ipc::error("fail: send, there is no receiver on this connection.\n");
|
||||
return false;
|
||||
}
|
||||
// calc a new message id
|
||||
auto acc = info_of(h)->acc();
|
||||
if (acc == nullptr) {
|
||||
ipc::error("fail: send, info_of(h)->acc() == nullptr\n");
|
||||
return false;
|
||||
}
|
||||
auto msg_id = acc->fetch_add(1, std::memory_order_relaxed);
|
||||
auto try_push = std::forward<F>(gen_push)(info_of(h), que, msg_id);
|
||||
if (size > ipc::large_msg_limit) {
|
||||
auto dat = acquire_storage(size, conns);
|
||||
void * buf = dat.second;
|
||||
if (buf != nullptr) {
|
||||
std::memcpy(buf, data, size);
|
||||
return try_push(static_cast<std::int32_t>(size) -
|
||||
static_cast<std::int32_t>(ipc::data_length), &(dat.first), 0);
|
||||
}
|
||||
// try using message fragment
|
||||
//ipc::log("fail: shm::handle for big message. msg_id: %zd, size: %zd\n", msg_id, size);
|
||||
}
|
||||
// push message fragment
|
||||
std::int32_t offset = 0;
|
||||
for (std::int32_t i = 0; i < static_cast<std::int32_t>(size / ipc::data_length); ++i, offset += ipc::data_length) {
|
||||
if (!try_push(static_cast<std::int32_t>(size) - offset - static_cast<std::int32_t>(ipc::data_length),
|
||||
static_cast<ipc::byte_t const *>(data) + offset, ipc::data_length)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// if remain > 0, this is the last message fragment
|
||||
std::int32_t remain = static_cast<std::int32_t>(size) - offset;
|
||||
if (remain > 0) {
|
||||
if (!try_push(remain - static_cast<std::int32_t>(ipc::data_length),
|
||||
static_cast<ipc::byte_t const *>(data) + offset,
|
||||
static_cast<std::size_t>(remain))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||
return send([tm](auto info, auto que, auto msg_id) {
|
||||
return [tm, info, que, msg_id](std::int32_t remain, void const * data, std::size_t size) {
|
||||
if (!wait_for(info->wt_waiter_, [&] {
|
||||
return !que->push(
|
||||
[](void*) { return true; },
|
||||
info->cc_id_, msg_id, remain, data, size);
|
||||
}, tm)) {
|
||||
ipc::log("force_push: msg_id = %zd, remain = %d, size = %zd\n", msg_id, remain, size);
|
||||
if (!que->force_push(
|
||||
clear_message<typename queue_t::value_t>,
|
||||
info->cc_id_, msg_id, remain, data, size)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
info->rd_waiter_.broadcast();
|
||||
return true;
|
||||
};
|
||||
}, h, data, size);
|
||||
}
|
||||
|
||||
static bool try_send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||
return send([tm](auto info, auto que, auto msg_id) {
|
||||
return [tm, info, que, msg_id](std::int32_t remain, void const * data, std::size_t size) {
|
||||
if (!wait_for(info->wt_waiter_, [&] {
|
||||
return !que->push(
|
||||
[](void*) { return true; },
|
||||
info->cc_id_, msg_id, remain, data, size);
|
||||
}, tm)) {
|
||||
return false;
|
||||
}
|
||||
info->rd_waiter_.broadcast();
|
||||
return true;
|
||||
};
|
||||
}, h, data, size);
|
||||
}
|
||||
|
||||
static ipc::buff_t recv(ipc::handle_t h, std::uint64_t tm) {
|
||||
auto que = queue_of(h);
|
||||
if (que == nullptr) {
|
||||
ipc::error("fail: recv, queue_of(h) == nullptr\n");
|
||||
return {};
|
||||
}
|
||||
if (!que->connected()) {
|
||||
// hasn't connected yet, just return.
|
||||
return {};
|
||||
}
|
||||
auto& rc = info_of(h)->recv_cache();
|
||||
for (;;) {
|
||||
// pop a new message
|
||||
typename queue_t::value_t msg;
|
||||
if (!wait_for(info_of(h)->rd_waiter_, [que, &msg] {
|
||||
return !que->pop(msg);
|
||||
}, tm)) {
|
||||
// pop failed, just return.
|
||||
return {};
|
||||
}
|
||||
info_of(h)->wt_waiter_.broadcast();
|
||||
if ((info_of(h)->acc() != nullptr) && (msg.cc_id_ == info_of(h)->cc_id_)) {
|
||||
continue; // ignore message to self
|
||||
}
|
||||
// msg.remain_ may minus & abs(msg.remain_) < data_length
|
||||
std::int32_t r_size = static_cast<std::int32_t>(ipc::data_length) + msg.remain_;
|
||||
if (r_size <= 0) {
|
||||
ipc::error("fail: recv, r_size = %d\n", (int)r_size);
|
||||
return {};
|
||||
}
|
||||
std::size_t msg_size = static_cast<std::size_t>(r_size);
|
||||
// large message
|
||||
if (msg.storage_) {
|
||||
ipc::storage_id_t buf_id = *reinterpret_cast<ipc::storage_id_t*>(&msg.data_);
|
||||
void* buf = find_storage(buf_id, msg_size);
|
||||
if (buf != nullptr) {
|
||||
struct recycle_t {
|
||||
ipc::storage_id_t storage_id;
|
||||
ipc::circ::cc_t curr_conns;
|
||||
ipc::circ::cc_t conn_id;
|
||||
} *r_info = ipc::mem::alloc<recycle_t>(recycle_t{
|
||||
buf_id, que->elems()->connections(std::memory_order_relaxed), que->connected_id()
|
||||
});
|
||||
if (r_info == nullptr) {
|
||||
ipc::log("fail: ipc::mem::alloc<recycle_t>.\n");
|
||||
return ipc::buff_t{buf, msg_size}; // no recycle
|
||||
} else {
|
||||
return ipc::buff_t{buf, msg_size, [](void* p_info, std::size_t size) {
|
||||
auto r_info = static_cast<recycle_t *>(p_info);
|
||||
IPC_UNUSED_ auto finally = ipc::guard([r_info] {
|
||||
ipc::mem::free(r_info);
|
||||
});
|
||||
recycle_storage<flag_t>(r_info->storage_id, size, r_info->curr_conns, r_info->conn_id);
|
||||
}, r_info};
|
||||
}
|
||||
} else {
|
||||
ipc::log("fail: shm::handle for large message. msg_id: %zd, buf_id: %zd, size: %zd\n", msg.id_, buf_id, msg_size);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// find cache with msg.id_
|
||||
auto cac_it = rc.find(msg.id_);
|
||||
if (cac_it == rc.end()) {
|
||||
if (msg_size <= ipc::data_length) {
|
||||
return make_cache(msg.data_, msg_size);
|
||||
}
|
||||
// gc
|
||||
if (rc.size() > 1024) {
|
||||
std::vector<msg_id_t> need_del;
|
||||
for (auto const & pair : rc) {
|
||||
auto cmp = std::minmax(msg.id_, pair.first);
|
||||
if (cmp.second - cmp.first > 8192) {
|
||||
need_del.push_back(pair.first);
|
||||
}
|
||||
}
|
||||
for (auto id : need_del) rc.erase(id);
|
||||
}
|
||||
// cache the first message fragment
|
||||
rc.emplace(msg.id_, cache_t { ipc::data_length, make_cache(msg.data_, msg_size) });
|
||||
}
|
||||
// has cached before this message
|
||||
else {
|
||||
auto& cac = cac_it->second;
|
||||
// this is the last message fragment
|
||||
if (msg.remain_ <= 0) {
|
||||
cac.append(&(msg.data_), msg_size);
|
||||
// finish this message, erase it from cache
|
||||
auto buff = std::move(cac.buff_);
|
||||
rc.erase(cac_it);
|
||||
return buff;
|
||||
}
|
||||
// there are remain datas after this message
|
||||
cac.append(&(msg.data_), ipc::data_length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static ipc::buff_t try_recv(ipc::handle_t h) {
|
||||
return recv(h, 0);
|
||||
}
|
||||
|
||||
}; // detail_impl<Policy>
|
||||
|
||||
template <typename Flag>
|
||||
using policy_t = ipc::policy::choose<ipc::circ::elem_array, Flag>;
|
||||
|
||||
} // internal-linkage
|
||||
|
||||
namespace ipc {
|
||||
|
||||
template <typename Flag>
|
||||
ipc::handle_t chan_impl<Flag>::inited() {
|
||||
ipc::detail::waiter::init();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
bool chan_impl<Flag>::connect(ipc::handle_t * ph, char const * name, unsigned mode) {
|
||||
return detail_impl<policy_t<Flag>>::connect(ph, name, mode & receiver);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
bool chan_impl<Flag>::reconnect(ipc::handle_t * ph, unsigned mode) {
|
||||
return detail_impl<policy_t<Flag>>::reconnect(ph, mode & receiver);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
void chan_impl<Flag>::disconnect(ipc::handle_t h) {
|
||||
detail_impl<policy_t<Flag>>::disconnect(h);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
void chan_impl<Flag>::destroy(ipc::handle_t h) {
|
||||
detail_impl<policy_t<Flag>>::destroy(h);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
char const * chan_impl<Flag>::name(ipc::handle_t h) {
|
||||
auto info = detail_impl<policy_t<Flag>>::info_of(h);
|
||||
return (info == nullptr) ? nullptr : info->name_.c_str();
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
std::size_t chan_impl<Flag>::recv_count(ipc::handle_t h) {
|
||||
return detail_impl<policy_t<Flag>>::recv_count(h);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
bool chan_impl<Flag>::wait_for_recv(ipc::handle_t h, std::size_t r_count, std::uint64_t tm) {
|
||||
return detail_impl<policy_t<Flag>>::wait_for_recv(h, r_count, tm);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
bool chan_impl<Flag>::send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||
return detail_impl<policy_t<Flag>>::send(h, data, size, tm);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
buff_t chan_impl<Flag>::recv(ipc::handle_t h, std::uint64_t tm) {
|
||||
return detail_impl<policy_t<Flag>>::recv(h, tm);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
bool chan_impl<Flag>::try_send(ipc::handle_t h, void const * data, std::size_t size, std::uint64_t tm) {
|
||||
return detail_impl<policy_t<Flag>>::try_send(h, data, size, tm);
|
||||
}
|
||||
|
||||
template <typename Flag>
|
||||
buff_t chan_impl<Flag>::try_recv(ipc::handle_t h) {
|
||||
return detail_impl<policy_t<Flag>>::try_recv(h);
|
||||
}
|
||||
|
||||
template struct chan_impl<ipc::wr<relat::single, relat::single, trans::unicast >>;
|
||||
// template struct chan_impl<ipc::wr<relat::single, relat::multi , trans::unicast >>; // TBD
|
||||
// template struct chan_impl<ipc::wr<relat::multi , relat::multi , trans::unicast >>; // TBD
|
||||
template struct chan_impl<ipc::wr<relat::single, relat::multi , trans::broadcast>>;
|
||||
template struct chan_impl<ipc::wr<relat::multi , relat::multi , trans::broadcast>>;
|
||||
|
||||
} // namespace ipc
|
||||
@@ -0,0 +1,25 @@
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "libipc/def.h"
|
||||
#include "libipc/prod_cons.h"
|
||||
|
||||
#include "libipc/circ/elem_array.h"
|
||||
|
||||
namespace ipc {
|
||||
namespace policy {
|
||||
|
||||
template <template <typename, std::size_t...> class Elems, typename Flag>
|
||||
struct choose;
|
||||
|
||||
template <typename Flag>
|
||||
struct choose<circ::elem_array, Flag> {
|
||||
using flag_t = Flag;
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
using elems_t = circ::elem_array<ipc::prod_cons_impl<flag_t>, DataSize, AlignSize>;
|
||||
};
|
||||
|
||||
} // namespace policy
|
||||
} // namespace ipc
|
||||
@@ -0,0 +1,17 @@
|
||||
#include "libipc/pool_alloc.h"
|
||||
|
||||
#include "libipc/memory/resource.h"
|
||||
|
||||
namespace ipc {
|
||||
namespace mem {
|
||||
|
||||
void* pool_alloc::alloc(std::size_t size) {
|
||||
return async_pool_alloc::alloc(size);
|
||||
}
|
||||
|
||||
void pool_alloc::free(void* p, std::size_t size) {
|
||||
async_pool_alloc::free(p, size);
|
||||
}
|
||||
|
||||
} // namespace mem
|
||||
} // namespace ipc
|
||||
@@ -0,0 +1,433 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <utility>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <cstdint>
|
||||
|
||||
#include "libipc/def.h"
|
||||
|
||||
#include "libipc/platform/detail.h"
|
||||
#include "libipc/circ/elem_def.h"
|
||||
#include "libipc/utility/log.h"
|
||||
#include "libipc/utility/utility.h"
|
||||
|
||||
namespace ipc {
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
/// producer-consumer implementation
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
template <typename Flag>
|
||||
struct prod_cons_impl;
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||
|
||||
constexpr circ::u2_t cursor() const noexcept {
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||
auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
|
||||
return false; // full
|
||||
}
|
||||
std::forward<F>(f)(&(elems[cur_wt].data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
|
||||
* So we could just disconnect all connections of receiver, and return false.
|
||||
*/
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
|
||||
auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
|
||||
if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||
return false; // empty
|
||||
}
|
||||
std::forward<F>(f)(&(elems[cur_rd].data_));
|
||||
std::forward<R>(out)(true);
|
||||
rd_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
|
||||
: prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R,
|
||||
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||
byte_t buff[DS];
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||
if (circ::index_of(cur_rd) ==
|
||||
circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||
return false; // empty
|
||||
}
|
||||
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||
std::forward<F>(f)(buff);
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
|
||||
: prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
|
||||
|
||||
using flag_t = std::uint64_t;
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||
circ::u2_t cur_ct, nxt_ct;
|
||||
for (unsigned k = 0;;) {
|
||||
cur_ct = ct_.load(std::memory_order_relaxed);
|
||||
if (circ::index_of(nxt_ct = cur_ct + 1) ==
|
||||
circ::index_of(rd_.load(std::memory_order_acquire))) {
|
||||
return false; // full
|
||||
}
|
||||
if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
auto* el = elems + circ::index_of(cur_ct);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
while (1) {
|
||||
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||
if (cur_ct != wt_.load(std::memory_order_relaxed)) {
|
||||
return true;
|
||||
}
|
||||
if ((~cac_ct) != cur_ct) {
|
||||
return true;
|
||||
}
|
||||
if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
|
||||
return true;
|
||||
}
|
||||
wt_.store(nxt_ct, std::memory_order_release);
|
||||
cur_ct = nxt_ct;
|
||||
nxt_ct = cur_ct + 1;
|
||||
el = elems + circ::index_of(cur_ct);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R,
|
||||
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||
byte_t buff[DS];
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||
auto cur_wt = wt_.load(std::memory_order_acquire);
|
||||
auto id_rd = circ::index_of(cur_rd);
|
||||
auto id_wt = circ::index_of(cur_wt);
|
||||
if (id_rd == id_wt) {
|
||||
auto* el = elems + id_wt;
|
||||
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||
if ((~cac_ct) != cur_wt) {
|
||||
return false; // empty
|
||||
}
|
||||
if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
|
||||
wt_.store(cur_wt + 1, std::memory_order_release);
|
||||
}
|
||||
k = 0;
|
||||
}
|
||||
else {
|
||||
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||
std::forward<F>(f)(buff);
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
|
||||
|
||||
using rc_t = std::uint64_t;
|
||||
|
||||
enum : rc_t {
|
||||
ep_mask = 0x00000000ffffffffull,
|
||||
ep_incr = 0x0000000100000000ull
|
||||
};
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<rc_t> rc_ { 0 }; // read-counter
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||
alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
|
||||
|
||||
circ::u2_t cursor() const noexcept {
|
||||
return wt_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
|
||||
return false; // has not finished yet
|
||||
}
|
||||
// consider rem_cc to be 0 here
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
epoch_ += ep_incr;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||
if (cc & rem_cc) {
|
||||
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||
if (cc == 0) return false; // no reader
|
||||
}
|
||||
// just compare & exchange
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E>
|
||||
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
|
||||
if (cur == cursor()) return false; // acquire
|
||||
auto* el = elems + circ::index_of(cur++);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
if ((cur_rc & ep_mask) == 0) {
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
|
||||
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||
std::forward<R>(out)((nxt_rc & ep_mask) == 0);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
|
||||
|
||||
using rc_t = std::uint64_t;
|
||||
using flag_t = std::uint64_t;
|
||||
|
||||
enum : rc_t {
|
||||
rc_mask = 0x00000000ffffffffull,
|
||||
ep_mask = 0x00ffffffffffffffull,
|
||||
ep_incr = 0x0100000000000000ull,
|
||||
ic_mask = 0xff000000ffffffffull,
|
||||
ic_incr = 0x0000000100000000ull
|
||||
};
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<rc_t > rc_ { 0 }; // read-counter
|
||||
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||
alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
|
||||
|
||||
circ::u2_t cursor() const noexcept {
|
||||
return ct_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
constexpr static rc_t inc_rc(rc_t rc) noexcept {
|
||||
return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
|
||||
}
|
||||
|
||||
constexpr static rc_t inc_mask(rc_t rc) noexcept {
|
||||
return inc_rc(rc) & ~rc_mask;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
circ::u2_t cur_ct;
|
||||
rc_t epoch = epoch_.load(std::memory_order_acquire);
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_relaxed);
|
||||
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
|
||||
return false; // has not finished yet
|
||||
}
|
||||
else if (!rem_cc) {
|
||||
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||
if ((cur_fl != cur_ct) && cur_fl) {
|
||||
return false; // full
|
||||
}
|
||||
}
|
||||
// consider rem_cc to be 0 here
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
|
||||
epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
// only one thread/process would touch here at one time
|
||||
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
circ::u2_t cur_ct;
|
||||
rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||
if (cc & rem_cc) {
|
||||
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||
if (cc == 0) return false; // no reader
|
||||
}
|
||||
// just compare & exchange
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
|
||||
if (epoch == epoch_.load(std::memory_order_acquire)) {
|
||||
break;
|
||||
}
|
||||
else if (push(wrapper, std::forward<F>(f), elems)) {
|
||||
return true;
|
||||
}
|
||||
epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
// only one thread/process would touch here at one time
|
||||
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E, std::size_t N>
|
||||
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
|
||||
auto* el = elems + circ::index_of(cur);
|
||||
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||
if (cur_fl != ~static_cast<flag_t>(cur)) {
|
||||
return false; // empty
|
||||
}
|
||||
++cur;
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
if ((cur_rc & rc_mask) == 0) {
|
||||
std::forward<R>(out)(true);
|
||||
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
|
||||
bool last_one = false;
|
||||
if ((last_one = (nxt_rc & rc_mask) == 0)) {
|
||||
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||
}
|
||||
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||
std::forward<R>(out)(last_one);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ipc
|
||||
@@ -0,0 +1,216 @@
|
||||
#pragma once
|
||||
|
||||
#include <type_traits>
|
||||
#include <new>
|
||||
#include <utility> // [[since C++14]]: std::exchange
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <tuple>
|
||||
#include <thread>
|
||||
#include <chrono>
|
||||
#include <string>
|
||||
#include <cassert> // assert
|
||||
|
||||
#include "libipc/def.h"
|
||||
#include "libipc/shm.h"
|
||||
#include "libipc/rw_lock.h"
|
||||
|
||||
#include "libipc/utility/log.h"
|
||||
#include "libipc/platform/detail.h"
|
||||
#include "libipc/circ/elem_def.h"
|
||||
|
||||
namespace ipc {
|
||||
namespace detail {
|
||||
|
||||
class queue_conn {
|
||||
protected:
|
||||
circ::cc_t connected_ = 0;
|
||||
shm::handle elems_h_;
|
||||
|
||||
template <typename Elems>
|
||||
Elems* open(char const * name) {
|
||||
if (name == nullptr || name[0] == '\0') {
|
||||
ipc::error("fail open waiter: name is empty!\n");
|
||||
return nullptr;
|
||||
}
|
||||
if (!elems_h_.acquire(name, sizeof(Elems))) {
|
||||
return nullptr;
|
||||
}
|
||||
auto elems = static_cast<Elems*>(elems_h_.get());
|
||||
if (elems == nullptr) {
|
||||
ipc::error("fail acquire elems: %s\n", name);
|
||||
return nullptr;
|
||||
}
|
||||
elems->init();
|
||||
return elems;
|
||||
}
|
||||
|
||||
void close() {
|
||||
elems_h_.release();
|
||||
}
|
||||
|
||||
public:
|
||||
queue_conn() = default;
|
||||
queue_conn(const queue_conn&) = delete;
|
||||
queue_conn& operator=(const queue_conn&) = delete;
|
||||
|
||||
bool connected() const noexcept {
|
||||
return connected_ != 0;
|
||||
}
|
||||
|
||||
circ::cc_t connected_id() const noexcept {
|
||||
return connected_;
|
||||
}
|
||||
|
||||
template <typename Elems>
|
||||
auto connect(Elems* elems) noexcept
|
||||
/*needs 'optional' here*/
|
||||
-> std::tuple<bool, bool, decltype(std::declval<Elems>().cursor())> {
|
||||
if (elems == nullptr) return {};
|
||||
// if it's already connected, just return
|
||||
if (connected()) return {connected(), false, 0};
|
||||
connected_ = elems->connect_receiver();
|
||||
return {connected(), true, elems->cursor()};
|
||||
}
|
||||
|
||||
template <typename Elems>
|
||||
bool disconnect(Elems* elems) noexcept {
|
||||
if (elems == nullptr) return false;
|
||||
// if it's already disconnected, just return false
|
||||
if (!connected()) return false;
|
||||
elems->disconnect_receiver(std::exchange(connected_, 0));
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename Elems>
|
||||
class queue_base : public queue_conn {
|
||||
using base_t = queue_conn;
|
||||
|
||||
public:
|
||||
using elems_t = Elems;
|
||||
using policy_t = typename elems_t::policy_t;
|
||||
|
||||
protected:
|
||||
elems_t * elems_ = nullptr;
|
||||
decltype(std::declval<elems_t>().cursor()) cursor_ = 0;
|
||||
bool sender_flag_ = false;
|
||||
|
||||
public:
|
||||
using base_t::base_t;
|
||||
|
||||
queue_base() = default;
|
||||
|
||||
explicit queue_base(char const * name)
|
||||
: queue_base{} {
|
||||
elems_ = open<elems_t>(name);
|
||||
}
|
||||
|
||||
explicit queue_base(elems_t * elems) noexcept
|
||||
: queue_base{} {
|
||||
assert(elems != nullptr);
|
||||
elems_ = elems;
|
||||
}
|
||||
|
||||
/* not virtual */ ~queue_base() {
|
||||
base_t::close();
|
||||
}
|
||||
|
||||
elems_t * elems() noexcept { return elems_; }
|
||||
elems_t const * elems() const noexcept { return elems_; }
|
||||
|
||||
bool ready_sending() noexcept {
|
||||
if (elems_ == nullptr) return false;
|
||||
return sender_flag_ || (sender_flag_ = elems_->connect_sender());
|
||||
}
|
||||
|
||||
void shut_sending() noexcept {
|
||||
if (elems_ == nullptr) return;
|
||||
if (!sender_flag_) return;
|
||||
elems_->disconnect_sender();
|
||||
}
|
||||
|
||||
bool connect() noexcept {
|
||||
auto tp = base_t::connect(elems_);
|
||||
if (std::get<0>(tp) && std::get<1>(tp)) {
|
||||
cursor_ = std::get<2>(tp);
|
||||
return true;
|
||||
}
|
||||
return std::get<0>(tp);
|
||||
}
|
||||
|
||||
bool disconnect() noexcept {
|
||||
return base_t::disconnect(elems_);
|
||||
}
|
||||
|
||||
std::size_t conn_count() const noexcept {
|
||||
return (elems_ == nullptr) ? static_cast<std::size_t>(invalid_value) : elems_->conn_count();
|
||||
}
|
||||
|
||||
bool valid() const noexcept {
|
||||
return elems_ != nullptr;
|
||||
}
|
||||
|
||||
bool empty() const noexcept {
|
||||
return !valid() || (cursor_ == elems_->cursor());
|
||||
}
|
||||
|
||||
template <typename T, typename F, typename... P>
|
||||
bool push(F&& prep, P&&... params) {
|
||||
if (elems_ == nullptr) return false;
|
||||
return elems_->push(this, [&](void* p) {
|
||||
if (prep(p)) ::new (p) T(std::forward<P>(params)...);
|
||||
});
|
||||
}
|
||||
|
||||
template <typename T, typename F, typename... P>
|
||||
bool force_push(F&& prep, P&&... params) {
|
||||
if (elems_ == nullptr) return false;
|
||||
return elems_->force_push(this, [&](void* p) {
|
||||
if (prep(p)) ::new (p) T(std::forward<P>(params)...);
|
||||
});
|
||||
}
|
||||
|
||||
template <typename T, typename F>
|
||||
bool pop(T& item, F&& out) {
|
||||
if (elems_ == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return elems_->pop(this, &(this->cursor_), [&item](void* p) {
|
||||
::new (&item) T(std::move(*static_cast<T*>(p)));
|
||||
}, std::forward<F>(out));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
template <typename T, typename Policy>
|
||||
class queue final : public detail::queue_base<typename Policy::template elems_t<sizeof(T), alignof(T)>> {
|
||||
using base_t = detail::queue_base<typename Policy::template elems_t<sizeof(T), alignof(T)>>;
|
||||
|
||||
public:
|
||||
using value_t = T;
|
||||
|
||||
using base_t::base_t;
|
||||
|
||||
template <typename... P>
|
||||
bool push(P&&... params) {
|
||||
return base_t::template push<T>(std::forward<P>(params)...);
|
||||
}
|
||||
|
||||
template <typename... P>
|
||||
bool force_push(P&&... params) {
|
||||
return base_t::template force_push<T>(std::forward<P>(params)...);
|
||||
}
|
||||
|
||||
bool pop(T& item) {
|
||||
return base_t::pop(item, [](bool) {});
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
bool pop(T& item, F&& out) {
|
||||
return base_t::pop(item, std::forward<F>(out));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ipc
|
||||
@@ -0,0 +1,103 @@
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "libipc/shm.h"
|
||||
|
||||
#include "libipc/utility/pimpl.h"
|
||||
#include "libipc/memory/resource.h"
|
||||
|
||||
namespace ipc {
|
||||
namespace shm {
|
||||
|
||||
class handle::handle_ : public pimpl<handle_> {
|
||||
public:
|
||||
shm::id_t id_ = nullptr;
|
||||
void* m_ = nullptr;
|
||||
|
||||
ipc::string n_;
|
||||
std::size_t s_ = 0;
|
||||
};
|
||||
|
||||
handle::handle()
|
||||
: p_(p_->make()) {
|
||||
}
|
||||
|
||||
handle::handle(char const * name, std::size_t size, unsigned mode)
|
||||
: handle() {
|
||||
acquire(name, size, mode);
|
||||
}
|
||||
|
||||
handle::handle(handle&& rhs)
|
||||
: handle() {
|
||||
swap(rhs);
|
||||
}
|
||||
|
||||
handle::~handle() {
|
||||
release();
|
||||
p_->clear();
|
||||
}
|
||||
|
||||
void handle::swap(handle& rhs) {
|
||||
std::swap(p_, rhs.p_);
|
||||
}
|
||||
|
||||
handle& handle::operator=(handle rhs) {
|
||||
swap(rhs);
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool handle::valid() const noexcept {
|
||||
return impl(p_)->m_ != nullptr;
|
||||
}
|
||||
|
||||
std::size_t handle::size() const noexcept {
|
||||
return impl(p_)->s_;
|
||||
}
|
||||
|
||||
char const * handle::name() const noexcept {
|
||||
return impl(p_)->n_.c_str();
|
||||
}
|
||||
|
||||
std::int32_t handle::ref() const noexcept {
|
||||
return shm::get_ref(impl(p_)->id_);
|
||||
}
|
||||
|
||||
void handle::sub_ref() noexcept {
|
||||
shm::sub_ref(impl(p_)->id_);
|
||||
}
|
||||
|
||||
bool handle::acquire(char const * name, std::size_t size, unsigned mode) {
|
||||
release();
|
||||
impl(p_)->id_ = shm::acquire((impl(p_)->n_ = name).c_str(), size, mode);
|
||||
impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
|
||||
return valid();
|
||||
}
|
||||
|
||||
std::int32_t handle::release() {
|
||||
if (impl(p_)->id_ == nullptr) return -1;
|
||||
return shm::release(detach());
|
||||
}
|
||||
|
||||
void* handle::get() const {
|
||||
return impl(p_)->m_;
|
||||
}
|
||||
|
||||
void handle::attach(id_t id) {
|
||||
if (id == nullptr) return;
|
||||
release();
|
||||
impl(p_)->id_ = id;
|
||||
impl(p_)->m_ = shm::get_mem(impl(p_)->id_, &(impl(p_)->s_));
|
||||
}
|
||||
|
||||
id_t handle::detach() {
|
||||
auto old = impl(p_)->id_;
|
||||
impl(p_)->id_ = nullptr;
|
||||
impl(p_)->m_ = nullptr;
|
||||
impl(p_)->s_ = 0;
|
||||
impl(p_)->n_.clear();
|
||||
return old;
|
||||
}
|
||||
|
||||
} // namespace shm
|
||||
} // namespace ipc
|
||||
@@ -0,0 +1,83 @@
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
#include <string>
|
||||
#include <mutex>
|
||||
#include <atomic>
|
||||
|
||||
#include "libipc/def.h"
|
||||
#include "libipc/mutex.h"
|
||||
#include "libipc/condition.h"
|
||||
#include "libipc/platform/detail.h"
|
||||
|
||||
namespace ipc {
|
||||
namespace detail {
|
||||
|
||||
class waiter {
|
||||
ipc::sync::condition cond_;
|
||||
ipc::sync::mutex lock_;
|
||||
std::atomic<bool> quit_ {false};
|
||||
|
||||
public:
|
||||
static void init();
|
||||
|
||||
waiter() = default;
|
||||
waiter(char const *name) {
|
||||
open(name);
|
||||
}
|
||||
|
||||
~waiter() {
|
||||
close();
|
||||
}
|
||||
|
||||
bool valid() const noexcept {
|
||||
return cond_.valid() && lock_.valid();
|
||||
}
|
||||
|
||||
bool open(char const *name) noexcept {
|
||||
quit_.store(false, std::memory_order_relaxed);
|
||||
if (!cond_.open((std::string{"_waiter_cond_"} + name).c_str())) {
|
||||
return false;
|
||||
}
|
||||
if (!lock_.open((std::string{"_waiter_lock_"} + name).c_str())) {
|
||||
cond_.close();
|
||||
return false;
|
||||
}
|
||||
return valid();
|
||||
}
|
||||
|
||||
void close() noexcept {
|
||||
cond_.close();
|
||||
lock_.close();
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
bool wait_if(F &&pred, std::uint64_t tm = ipc::invalid_value) noexcept {
|
||||
IPC_UNUSED_ std::lock_guard<ipc::sync::mutex> guard {lock_};
|
||||
while ([this, &pred] {
|
||||
return !quit_.load(std::memory_order_relaxed)
|
||||
&& std::forward<F>(pred)();
|
||||
}()) {
|
||||
if (!cond_.wait(lock_, tm)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool notify() noexcept {
|
||||
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
|
||||
return cond_.notify(lock_);
|
||||
}
|
||||
|
||||
bool broadcast() noexcept {
|
||||
std::lock_guard<ipc::sync::mutex>{lock_}; // barrier
|
||||
return cond_.broadcast(lock_);
|
||||
}
|
||||
|
||||
bool quit_waiting() {
|
||||
quit_.store(true, std::memory_order_release);
|
||||
return broadcast();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
} // namespace ipc
|
||||
@@ -0,0 +1,3 @@
|
||||
https://github.com/mutouyun/cpp-ipc
|
||||
|
||||
A high-performance inter-process communication library using shared memory on Linux/Windows.
|
||||
文件差异内容过多而无法显示
加载差异
@@ -0,0 +1,316 @@
|
||||
// jpgd.h - C++ class for JPEG decompression.
|
||||
// Public domain, Rich Geldreich <richgel99@gmail.com>
|
||||
#ifndef JPEG_DECODER_H
|
||||
#define JPEG_DECODER_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <setjmp.h>
|
||||
|
||||
namespace jpgd
|
||||
{
|
||||
typedef unsigned char uint8;
|
||||
typedef signed short int16;
|
||||
typedef unsigned short uint16;
|
||||
typedef unsigned int uint;
|
||||
typedef signed int int32;
|
||||
|
||||
// Loads a JPEG image from a memory buffer or a file.
|
||||
// req_comps can be 1 (grayscale), 3 (RGB), or 4 (RGBA).
|
||||
// On return, width/height will be set to the image's dimensions, and actual_comps will be set to the either 1 (grayscale) or 3 (RGB).
|
||||
// Notes: For more control over where and how the source data is read, see the decompress_jpeg_image_from_stream() function below, or call the jpeg_decoder class directly.
|
||||
// Requesting a 8 or 32bpp image is currently a little faster than 24bpp because the jpeg_decoder class itself currently always unpacks to either 8 or 32bpp.
|
||||
// BEGIN EPIC MOD
|
||||
//unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps);
|
||||
unsigned char *decompress_jpeg_image_from_memory(const unsigned char *pSrc_data, int src_data_size, int *width, int *height, int *actual_comps, int req_comps, int format);
|
||||
// END EPIC MOD
|
||||
unsigned char *decompress_jpeg_image_from_file(const char *pSrc_filename, int *width, int *height, int *actual_comps, int req_comps);
|
||||
|
||||
// Success/failure error codes.
|
||||
enum jpgd_status
|
||||
{
|
||||
JPGD_SUCCESS = 0, JPGD_FAILED = -1, JPGD_DONE = 1,
|
||||
JPGD_BAD_DHT_COUNTS = -256, JPGD_BAD_DHT_INDEX, JPGD_BAD_DHT_MARKER, JPGD_BAD_DQT_MARKER, JPGD_BAD_DQT_TABLE,
|
||||
JPGD_BAD_PRECISION, JPGD_BAD_HEIGHT, JPGD_BAD_WIDTH, JPGD_TOO_MANY_COMPONENTS,
|
||||
JPGD_BAD_SOF_LENGTH, JPGD_BAD_VARIABLE_MARKER, JPGD_BAD_DRI_LENGTH, JPGD_BAD_SOS_LENGTH,
|
||||
JPGD_BAD_SOS_COMP_ID, JPGD_W_EXTRA_BYTES_BEFORE_MARKER, JPGD_NO_ARITHMITIC_SUPPORT, JPGD_UNEXPECTED_MARKER,
|
||||
JPGD_NOT_JPEG, JPGD_UNSUPPORTED_MARKER, JPGD_BAD_DQT_LENGTH, JPGD_TOO_MANY_BLOCKS,
|
||||
JPGD_UNDEFINED_QUANT_TABLE, JPGD_UNDEFINED_HUFF_TABLE, JPGD_NOT_SINGLE_SCAN, JPGD_UNSUPPORTED_COLORSPACE,
|
||||
JPGD_UNSUPPORTED_SAMP_FACTORS, JPGD_DECODE_ERROR, JPGD_BAD_RESTART_MARKER, JPGD_ASSERTION_ERROR,
|
||||
JPGD_BAD_SOS_SPECTRAL, JPGD_BAD_SOS_SUCCESSIVE, JPGD_STREAM_READ, JPGD_NOTENOUGHMEM
|
||||
};
|
||||
|
||||
// Input stream interface.
|
||||
// Derive from this class to read input data from sources other than files or memory. Set m_eof_flag to true when no more data is available.
|
||||
// The decoder is rather greedy: it will keep on calling this method until its internal input buffer is full, or until the EOF flag is set.
|
||||
// It the input stream contains data after the JPEG stream's EOI (end of image) marker it will probably be pulled into the internal buffer.
|
||||
// Call the get_total_bytes_read() method to determine the actual size of the JPEG stream after successful decoding.
|
||||
class jpeg_decoder_stream
|
||||
{
|
||||
public:
|
||||
jpeg_decoder_stream() { }
|
||||
virtual ~jpeg_decoder_stream() { }
|
||||
|
||||
// The read() method is called when the internal input buffer is empty.
|
||||
// Parameters:
|
||||
// pBuf - input buffer
|
||||
// max_bytes_to_read - maximum bytes that can be written to pBuf
|
||||
// pEOF_flag - set this to true if at end of stream (no more bytes remaining)
|
||||
// Returns -1 on error, otherwise return the number of bytes actually written to the buffer (which may be 0).
|
||||
// Notes: This method will be called in a loop until you set *pEOF_flag to true or the internal buffer is full.
|
||||
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag) = 0;
|
||||
};
|
||||
|
||||
// stdio FILE stream class.
|
||||
class jpeg_decoder_file_stream : public jpeg_decoder_stream
|
||||
{
|
||||
jpeg_decoder_file_stream(const jpeg_decoder_file_stream &);
|
||||
jpeg_decoder_file_stream &operator =(const jpeg_decoder_file_stream &);
|
||||
|
||||
FILE *m_pFile;
|
||||
bool m_eof_flag, m_error_flag;
|
||||
|
||||
public:
|
||||
jpeg_decoder_file_stream();
|
||||
virtual ~jpeg_decoder_file_stream();
|
||||
|
||||
bool open(const char *Pfilename);
|
||||
void close();
|
||||
|
||||
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
|
||||
};
|
||||
|
||||
// Memory stream class.
|
||||
class jpeg_decoder_mem_stream : public jpeg_decoder_stream
|
||||
{
|
||||
const uint8 *m_pSrc_data;
|
||||
uint m_ofs, m_size;
|
||||
|
||||
public:
|
||||
jpeg_decoder_mem_stream() : m_pSrc_data(NULL), m_ofs(0), m_size(0) { }
|
||||
jpeg_decoder_mem_stream(const uint8 *pSrc_data, uint size) : m_pSrc_data(pSrc_data), m_ofs(0), m_size(size) { }
|
||||
|
||||
virtual ~jpeg_decoder_mem_stream() { }
|
||||
|
||||
bool open(const uint8 *pSrc_data, uint size);
|
||||
void close() { m_pSrc_data = NULL; m_ofs = 0; m_size = 0; }
|
||||
|
||||
virtual int read(uint8 *pBuf, int max_bytes_to_read, bool *pEOF_flag);
|
||||
};
|
||||
|
||||
// Loads JPEG file from a jpeg_decoder_stream.
|
||||
unsigned char *decompress_jpeg_image_from_stream(jpeg_decoder_stream *pStream, int *width, int *height, int *actual_comps, int req_comps);
|
||||
|
||||
enum
|
||||
{
|
||||
JPGD_IN_BUF_SIZE = 8192, JPGD_MAX_BLOCKS_PER_MCU = 10, JPGD_MAX_HUFF_TABLES = 8, JPGD_MAX_QUANT_TABLES = 4,
|
||||
JPGD_MAX_COMPONENTS = 4, JPGD_MAX_COMPS_IN_SCAN = 4, JPGD_MAX_BLOCKS_PER_ROW = 8192, JPGD_MAX_HEIGHT = 16384, JPGD_MAX_WIDTH = 16384
|
||||
};
|
||||
|
||||
typedef int16 jpgd_quant_t;
|
||||
typedef int16 jpgd_block_t;
|
||||
|
||||
class jpeg_decoder
|
||||
{
|
||||
public:
|
||||
// Call get_error_code() after constructing to determine if the stream is valid or not. You may call the get_width(), get_height(), etc.
|
||||
// methods after the constructor is called. You may then either destruct the object, or begin decoding the image by calling begin_decoding(), then decode() on each scanline.
|
||||
jpeg_decoder(jpeg_decoder_stream *pStream);
|
||||
|
||||
~jpeg_decoder();
|
||||
|
||||
// Call this method after constructing the object to begin decompression.
|
||||
// If JPGD_SUCCESS is returned you may then call decode() on each scanline.
|
||||
int begin_decoding();
|
||||
|
||||
// Returns the next scan line.
|
||||
// For grayscale images, pScan_line will point to a buffer containing 8-bit pixels (get_bytes_per_pixel() will return 1).
|
||||
// Otherwise, it will always point to a buffer containing 32-bit RGBA pixels (A will always be 255, and get_bytes_per_pixel() will return 4).
|
||||
// Returns JPGD_SUCCESS if a scan line has been returned.
|
||||
// Returns JPGD_DONE if all scan lines have been returned.
|
||||
// Returns JPGD_FAILED if an error occurred. Call get_error_code() for a more info.
|
||||
int decode(const void** pScan_line, uint* pScan_line_len);
|
||||
|
||||
inline jpgd_status get_error_code() const { return m_error_code; }
|
||||
|
||||
inline int get_width() const { return m_image_x_size; }
|
||||
inline int get_height() const { return m_image_y_size; }
|
||||
|
||||
inline int get_num_components() const { return m_comps_in_frame; }
|
||||
|
||||
inline int get_bytes_per_pixel() const { return m_dest_bytes_per_pixel; }
|
||||
inline int get_bytes_per_scan_line() const { return m_image_x_size * get_bytes_per_pixel(); }
|
||||
|
||||
// Returns the total number of bytes actually consumed by the decoder (which should equal the actual size of the JPEG file).
|
||||
inline int get_total_bytes_read() const { return m_total_bytes_read; }
|
||||
|
||||
private:
|
||||
jpeg_decoder(const jpeg_decoder &);
|
||||
jpeg_decoder &operator =(const jpeg_decoder &);
|
||||
|
||||
typedef void (*pDecode_block_func)(jpeg_decoder *, int, int, int);
|
||||
|
||||
struct huff_tables
|
||||
{
|
||||
bool ac_table;
|
||||
uint look_up[256];
|
||||
uint look_up2[256];
|
||||
uint8 code_size[256];
|
||||
uint tree[512];
|
||||
};
|
||||
|
||||
struct coeff_buf
|
||||
{
|
||||
uint8 *pData;
|
||||
int block_num_x, block_num_y;
|
||||
int block_len_x, block_len_y;
|
||||
int block_size;
|
||||
};
|
||||
|
||||
struct mem_block
|
||||
{
|
||||
mem_block *m_pNext;
|
||||
size_t m_used_count;
|
||||
size_t m_size;
|
||||
char m_data[1];
|
||||
};
|
||||
|
||||
jmp_buf m_jmp_state;
|
||||
mem_block *m_pMem_blocks;
|
||||
int m_image_x_size;
|
||||
int m_image_y_size;
|
||||
jpeg_decoder_stream *m_pStream;
|
||||
int m_progressive_flag;
|
||||
uint8 m_huff_ac[JPGD_MAX_HUFF_TABLES];
|
||||
uint8* m_huff_num[JPGD_MAX_HUFF_TABLES]; // pointer to number of Huffman codes per bit size
|
||||
uint8* m_huff_val[JPGD_MAX_HUFF_TABLES]; // pointer to Huffman codes per bit size
|
||||
jpgd_quant_t* m_quant[JPGD_MAX_QUANT_TABLES]; // pointer to quantization tables
|
||||
int m_scan_type; // Gray, Yh1v1, Yh1v2, Yh2v1, Yh2v2 (CMYK111, CMYK4114 no longer supported)
|
||||
int m_comps_in_frame; // # of components in frame
|
||||
int m_comp_h_samp[JPGD_MAX_COMPONENTS]; // component's horizontal sampling factor
|
||||
int m_comp_v_samp[JPGD_MAX_COMPONENTS]; // component's vertical sampling factor
|
||||
int m_comp_quant[JPGD_MAX_COMPONENTS]; // component's quantization table selector
|
||||
int m_comp_ident[JPGD_MAX_COMPONENTS]; // component's ID
|
||||
int m_comp_h_blocks[JPGD_MAX_COMPONENTS];
|
||||
int m_comp_v_blocks[JPGD_MAX_COMPONENTS];
|
||||
int m_comps_in_scan; // # of components in scan
|
||||
int m_comp_list[JPGD_MAX_COMPS_IN_SCAN]; // components in this scan
|
||||
int m_comp_dc_tab[JPGD_MAX_COMPONENTS]; // component's DC Huffman coding table selector
|
||||
int m_comp_ac_tab[JPGD_MAX_COMPONENTS]; // component's AC Huffman coding table selector
|
||||
int m_spectral_start; // spectral selection start
|
||||
int m_spectral_end; // spectral selection end
|
||||
int m_successive_low; // successive approximation low
|
||||
int m_successive_high; // successive approximation high
|
||||
int m_max_mcu_x_size; // MCU's max. X size in pixels
|
||||
int m_max_mcu_y_size; // MCU's max. Y size in pixels
|
||||
int m_blocks_per_mcu;
|
||||
int m_max_blocks_per_row;
|
||||
int m_mcus_per_row, m_mcus_per_col;
|
||||
int m_mcu_org[JPGD_MAX_BLOCKS_PER_MCU];
|
||||
int m_total_lines_left; // total # lines left in image
|
||||
int m_mcu_lines_left; // total # lines left in this MCU
|
||||
int m_real_dest_bytes_per_scan_line;
|
||||
int m_dest_bytes_per_scan_line; // rounded up
|
||||
int m_dest_bytes_per_pixel; // 4 (RGB) or 1 (Y)
|
||||
huff_tables* m_pHuff_tabs[JPGD_MAX_HUFF_TABLES];
|
||||
coeff_buf* m_dc_coeffs[JPGD_MAX_COMPONENTS];
|
||||
coeff_buf* m_ac_coeffs[JPGD_MAX_COMPONENTS];
|
||||
int m_eob_run;
|
||||
int m_block_y_mcu[JPGD_MAX_COMPONENTS];
|
||||
uint8* m_pIn_buf_ofs;
|
||||
int m_in_buf_left;
|
||||
int m_tem_flag;
|
||||
bool m_eof_flag;
|
||||
uint8 m_in_buf_pad_start[128];
|
||||
uint8 m_in_buf[JPGD_IN_BUF_SIZE + 128];
|
||||
uint8 m_in_buf_pad_end[128];
|
||||
int m_bits_left;
|
||||
uint m_bit_buf;
|
||||
int m_restart_interval;
|
||||
int m_restarts_left;
|
||||
int m_next_restart_num;
|
||||
int m_max_mcus_per_row;
|
||||
int m_max_blocks_per_mcu;
|
||||
int m_expanded_blocks_per_mcu;
|
||||
int m_expanded_blocks_per_row;
|
||||
int m_expanded_blocks_per_component;
|
||||
bool m_freq_domain_chroma_upsample;
|
||||
int m_max_mcus_per_col;
|
||||
uint m_last_dc_val[JPGD_MAX_COMPONENTS];
|
||||
jpgd_block_t* m_pMCU_coefficients;
|
||||
int m_mcu_block_max_zag[JPGD_MAX_BLOCKS_PER_MCU];
|
||||
uint8* m_pSample_buf;
|
||||
int m_crr[256];
|
||||
int m_cbb[256];
|
||||
int m_crg[256];
|
||||
int m_cbg[256];
|
||||
uint8* m_pScan_line_0;
|
||||
uint8* m_pScan_line_1;
|
||||
jpgd_status m_error_code;
|
||||
bool m_ready_flag;
|
||||
int m_total_bytes_read;
|
||||
|
||||
void free_all_blocks();
|
||||
// BEGIN EPIC MOD
|
||||
UE_NORETURN void stop_decoding(jpgd_status status);
|
||||
// END EPIC MOD
|
||||
void *alloc(size_t n, bool zero = false);
|
||||
void word_clear(void *p, uint16 c, uint n);
|
||||
void prep_in_buffer();
|
||||
void read_dht_marker();
|
||||
void read_dqt_marker();
|
||||
void read_sof_marker();
|
||||
void skip_variable_marker();
|
||||
void read_dri_marker();
|
||||
void read_sos_marker();
|
||||
int next_marker();
|
||||
int process_markers();
|
||||
void locate_soi_marker();
|
||||
void locate_sof_marker();
|
||||
int locate_sos_marker();
|
||||
void init(jpeg_decoder_stream * pStream);
|
||||
void create_look_ups();
|
||||
void fix_in_buffer();
|
||||
void transform_mcu(int mcu_row);
|
||||
void transform_mcu_expand(int mcu_row);
|
||||
coeff_buf* coeff_buf_open(int block_num_x, int block_num_y, int block_len_x, int block_len_y);
|
||||
inline jpgd_block_t *coeff_buf_getp(coeff_buf *cb, int block_x, int block_y);
|
||||
void load_next_row();
|
||||
void decode_next_row();
|
||||
void make_huff_table(int index, huff_tables *pH);
|
||||
void check_quant_tables();
|
||||
void check_huff_tables();
|
||||
void calc_mcu_block_order();
|
||||
int init_scan();
|
||||
void init_frame();
|
||||
void process_restart();
|
||||
void decode_scan(pDecode_block_func decode_block_func);
|
||||
void init_progressive();
|
||||
void init_sequential();
|
||||
void decode_start();
|
||||
void decode_init(jpeg_decoder_stream * pStream);
|
||||
void H2V2Convert();
|
||||
void H2V1Convert();
|
||||
void H1V2Convert();
|
||||
void H1V1Convert();
|
||||
void gray_convert();
|
||||
void expanded_convert();
|
||||
void find_eoi();
|
||||
inline uint get_char();
|
||||
inline uint get_char(bool *pPadding_flag);
|
||||
inline void stuff_char(uint8 q);
|
||||
inline uint8 get_octet();
|
||||
inline uint get_bits(int num_bits);
|
||||
inline uint get_bits_no_markers(int numbits);
|
||||
inline int huff_decode(huff_tables *pH);
|
||||
inline int huff_decode(huff_tables *pH, int& extrabits);
|
||||
static inline uint8 clamp(int i);
|
||||
static void decode_block_dc_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||
static void decode_block_dc_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||
static void decode_block_ac_first(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||
static void decode_block_ac_refine(jpeg_decoder *pD, int component_id, int block_x, int block_y);
|
||||
};
|
||||
|
||||
} // namespace jpgd
|
||||
|
||||
#endif // JPEG_DECODER_H
|
||||
文件差异内容过多而无法显示
加载差异
@@ -0,0 +1,172 @@
|
||||
|
||||
// jpge.h - C++ class for JPEG compression.
|
||||
// Public domain, Rich Geldreich <richgel99@gmail.com>
|
||||
// Alex Evans: Added RGBA support, linear memory allocator.
|
||||
#ifndef JPEG_ENCODER_H
|
||||
#define JPEG_ENCODER_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
namespace jpge
|
||||
{
|
||||
typedef unsigned char uint8;
|
||||
typedef signed short int16;
|
||||
typedef signed int int32;
|
||||
typedef unsigned short uint16;
|
||||
typedef unsigned int uint32;
|
||||
typedef unsigned int uint;
|
||||
|
||||
// JPEG chroma subsampling factors. Y_ONLY (grayscale images) and H2V2 (color images) are the most common.
|
||||
enum subsampling_t { Y_ONLY = 0, H1V1 = 1, H2V1 = 2, H2V2 = 3 };
|
||||
|
||||
// JPEG compression parameters structure.
|
||||
struct params
|
||||
{
|
||||
inline params() : m_quality(85), m_subsampling(H2V2), m_no_chroma_discrim_flag(false), m_two_pass_flag(false) { }
|
||||
|
||||
inline bool check_valid() const
|
||||
{
|
||||
if ((m_quality < 1) || (m_quality > 100)) return false;
|
||||
if ((uint)m_subsampling > (uint)H2V2) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Quality: 1-100, higher is better. Typical values are around 50-95.
|
||||
int m_quality;
|
||||
|
||||
// m_subsampling:
|
||||
// 0 = Y (grayscale) only
|
||||
// 1 = YCbCr, no subsampling (H1V1, YCbCr 1x1x1, 3 blocks per MCU)
|
||||
// 2 = YCbCr, H2V1 subsampling (YCbCr 2x1x1, 4 blocks per MCU)
|
||||
// 3 = YCbCr, H2V2 subsampling (YCbCr 4x1x1, 6 blocks per MCU-- very common)
|
||||
subsampling_t m_subsampling;
|
||||
|
||||
// Disables CbCr discrimination - only intended for testing.
|
||||
// If true, the Y quantization table is also used for the CbCr channels.
|
||||
bool m_no_chroma_discrim_flag;
|
||||
|
||||
bool m_two_pass_flag;
|
||||
};
|
||||
|
||||
// Writes JPEG image to a file.
|
||||
// num_channels must be 1 (Y) or 3 (RGB), image pitch must be width*num_channels.
|
||||
bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
|
||||
|
||||
// Writes JPEG image to memory buffer.
|
||||
// On entry, buf_size is the size of the output buffer pointed at by pBuf, which should be at least ~1024 bytes.
|
||||
// If return value is true, buf_size will be set to the size of the compressed data.
|
||||
bool compress_image_to_jpeg_file_in_memory(void *pBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params());
|
||||
|
||||
// Output stream abstract class - used by the jpeg_encoder class to write to the output stream.
|
||||
// put_buf() is generally called with len==JPGE_OUT_BUF_SIZE bytes, but for headers it'll be called with smaller amounts.
|
||||
class output_stream
|
||||
{
|
||||
public:
|
||||
virtual ~output_stream() { };
|
||||
virtual bool put_buf(const void* Pbuf, int64_t len) = 0;
|
||||
template<class T> inline bool put_obj(const T& obj) { return put_buf(&obj, sizeof(T)); }
|
||||
};
|
||||
|
||||
// Lower level jpeg_encoder class - useful if more control is needed than the above helper functions.
|
||||
class jpeg_encoder
|
||||
{
|
||||
public:
|
||||
jpeg_encoder();
|
||||
~jpeg_encoder();
|
||||
|
||||
// Initializes the compressor.
|
||||
// pStream: The stream object to use for writing compressed data.
|
||||
// params - Compression parameters structure, defined above.
|
||||
// width, height - Image dimensions.
|
||||
// channels - May be 1, or 3. 1 indicates grayscale, 3 indicates RGB source data.
|
||||
// Returns false on out of memory or if a stream write fails.
|
||||
bool init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params = params());
|
||||
|
||||
const params &get_params() const { return m_params; }
|
||||
|
||||
// Deinitializes the compressor, freeing any allocated memory. May be called at any time.
|
||||
void deinit();
|
||||
|
||||
uint get_total_passes() const { return m_params.m_two_pass_flag ? 2 : 1; }
|
||||
inline uint get_cur_pass() { return m_pass_num; }
|
||||
|
||||
// Call this method with each source scanline.
|
||||
// width * src_channels bytes per scanline is expected (RGB or Y format).
|
||||
// You must call with NULL after all scanlines are processed to finish compression.
|
||||
// Returns false on out of memory or if a stream write fails.
|
||||
bool process_scanline(const void* pScanline);
|
||||
|
||||
private:
|
||||
jpeg_encoder(const jpeg_encoder &);
|
||||
jpeg_encoder &operator =(const jpeg_encoder &);
|
||||
|
||||
typedef int32 sample_array_t;
|
||||
|
||||
output_stream *m_pStream;
|
||||
params m_params;
|
||||
uint8 m_num_components;
|
||||
uint8 m_comp_h_samp[3], m_comp_v_samp[3];
|
||||
int m_image_x, m_image_y, m_image_bpp, m_image_bpl;
|
||||
int m_image_x_mcu, m_image_y_mcu;
|
||||
int m_image_bpl_xlt, m_image_bpl_mcu;
|
||||
int m_mcus_per_row;
|
||||
int m_mcu_x, m_mcu_y;
|
||||
uint8 *m_mcu_lines[16];
|
||||
uint8 m_mcu_y_ofs;
|
||||
sample_array_t m_sample_array[64];
|
||||
int16 m_coefficient_array[64];
|
||||
int32 m_quantization_tables[2][64];
|
||||
uint m_huff_codes[4][256];
|
||||
uint8 m_huff_code_sizes[4][256];
|
||||
uint8 m_huff_bits[4][17];
|
||||
uint8 m_huff_val[4][256];
|
||||
uint32 m_huff_count[4][256];
|
||||
int m_last_dc_val[3];
|
||||
enum { JPGE_OUT_BUF_SIZE = 2048 };
|
||||
uint8 m_out_buf[JPGE_OUT_BUF_SIZE];
|
||||
uint8 *m_pOut_buf;
|
||||
uint m_out_buf_left;
|
||||
uint32 m_bit_buffer;
|
||||
uint m_bits_in;
|
||||
uint8 m_pass_num;
|
||||
bool m_all_stream_writes_succeeded;
|
||||
|
||||
void optimize_huffman_table(int table_num, int table_len);
|
||||
void emit_byte(uint8 i);
|
||||
void emit_word(uint i);
|
||||
void emit_marker(int marker);
|
||||
void emit_jfif_app0();
|
||||
void emit_dqt();
|
||||
void emit_sof();
|
||||
void emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag);
|
||||
void emit_dhts();
|
||||
void emit_sos();
|
||||
void emit_markers();
|
||||
void compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val);
|
||||
void compute_quant_table(int32 *dst, int16 *src);
|
||||
void adjust_quant_table(int32 *dst, int32 *src);
|
||||
void first_pass_init();
|
||||
bool second_pass_init();
|
||||
bool jpg_open(int p_x_res, int p_y_res, int src_channels);
|
||||
void load_block_8_8_grey(int x);
|
||||
void load_block_8_8(int x, int y, int c);
|
||||
void load_block_16_8(int x, int c);
|
||||
void load_block_16_8_8(int x, int c);
|
||||
void load_quantized_coefficients(int component_num);
|
||||
void flush_output_buffer();
|
||||
void put_bits(uint bits, uint len);
|
||||
void code_coefficients_pass_one(int component_num);
|
||||
void code_coefficients_pass_two(int component_num);
|
||||
void code_block(int component_num);
|
||||
void process_mcu_row();
|
||||
bool terminate_pass_one();
|
||||
bool terminate_pass_two();
|
||||
bool process_end_of_image();
|
||||
void load_mcu(const void* src);
|
||||
void clear();
|
||||
void init();
|
||||
};
|
||||
|
||||
} // namespace jpge
|
||||
|
||||
#endif // JPEG_ENCODER
|
||||
@@ -0,0 +1,3 @@
|
||||
jpge.h - C++ class for JPEG compression.
|
||||
Public domain, Rich Geldreich <richgel99@gmail.com>
|
||||
Alex Evans: Added RGBA support, linear memory allocator.
|
||||
文件差异内容过多而无法显示
加载差异
文件差异内容过多而无法显示
加载差异
@@ -0,0 +1,433 @@
|
||||
#pragma once
|
||||
|
||||
#include <atomic>
|
||||
#include <utility>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <cstdint>
|
||||
|
||||
#include "libipc/def.h"
|
||||
|
||||
#include "libipc/platform/detail.h"
|
||||
#include "libipc/circ/elem_def.h"
|
||||
#include "libipc/utility/log.h"
|
||||
#include "libipc/utility/utility.h"
|
||||
|
||||
namespace ipc {
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
/// producer-consumer implementation
|
||||
////////////////////////////////////////////////////////////////
|
||||
|
||||
template <typename Flag>
|
||||
struct prod_cons_impl;
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||
|
||||
constexpr circ::u2_t cursor() const noexcept {
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||
auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
|
||||
return false; // full
|
||||
}
|
||||
std::forward<F>(f)(&(elems[cur_wt].data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
|
||||
* So we could just disconnect all connections of receiver, and return false.
|
||||
*/
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
|
||||
auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
|
||||
if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||
return false; // empty
|
||||
}
|
||||
std::forward<F>(f)(&(elems[cur_rd].data_));
|
||||
std::forward<R>(out)(true);
|
||||
rd_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
|
||||
: prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R,
|
||||
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||
byte_t buff[DS];
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||
if (circ::index_of(cur_rd) ==
|
||||
circ::index_of(wt_.load(std::memory_order_acquire))) {
|
||||
return false; // empty
|
||||
}
|
||||
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||
std::forward<F>(f)(buff);
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
|
||||
: prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
|
||||
|
||||
using flag_t = std::uint64_t;
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
||||
circ::u2_t cur_ct, nxt_ct;
|
||||
for (unsigned k = 0;;) {
|
||||
cur_ct = ct_.load(std::memory_order_relaxed);
|
||||
if (circ::index_of(nxt_ct = cur_ct + 1) ==
|
||||
circ::index_of(rd_.load(std::memory_order_acquire))) {
|
||||
return false; // full
|
||||
}
|
||||
if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
auto* el = elems + circ::index_of(cur_ct);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
while (1) {
|
||||
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||
if (cur_ct != wt_.load(std::memory_order_relaxed)) {
|
||||
return true;
|
||||
}
|
||||
if ((~cac_ct) != cur_ct) {
|
||||
return true;
|
||||
}
|
||||
if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
|
||||
return true;
|
||||
}
|
||||
wt_.store(nxt_ct, std::memory_order_release);
|
||||
cur_ct = nxt_ct;
|
||||
nxt_ct = cur_ct + 1;
|
||||
el = elems + circ::index_of(cur_ct);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&&, E*) {
|
||||
wrapper->elems()->disconnect_receiver(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R,
|
||||
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
||||
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
||||
byte_t buff[DS];
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
||||
auto cur_wt = wt_.load(std::memory_order_acquire);
|
||||
auto id_rd = circ::index_of(cur_rd);
|
||||
auto id_wt = circ::index_of(cur_wt);
|
||||
if (id_rd == id_wt) {
|
||||
auto* el = elems + id_wt;
|
||||
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
||||
if ((~cac_ct) != cur_wt) {
|
||||
return false; // empty
|
||||
}
|
||||
if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
|
||||
wt_.store(cur_wt + 1, std::memory_order_release);
|
||||
}
|
||||
k = 0;
|
||||
}
|
||||
else {
|
||||
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
||||
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
||||
std::forward<F>(f)(buff);
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
|
||||
|
||||
using rc_t = std::uint64_t;
|
||||
|
||||
enum : rc_t {
|
||||
ep_mask = 0x00000000ffffffffull,
|
||||
ep_incr = 0x0000000100000000ull
|
||||
};
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<rc_t> rc_ { 0 }; // read-counter
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
||||
alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
|
||||
|
||||
circ::u2_t cursor() const noexcept {
|
||||
return wt_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
|
||||
return false; // has not finished yet
|
||||
}
|
||||
// consider rem_cc to be 0 here
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
epoch_ += ep_incr;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & ep_mask;
|
||||
if (cc & rem_cc) {
|
||||
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||
if (cc == 0) return false; // no reader
|
||||
}
|
||||
// just compare & exchange
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
wt_.fetch_add(1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E>
|
||||
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
|
||||
if (cur == cursor()) return false; // acquire
|
||||
auto* el = elems + circ::index_of(cur++);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
if ((cur_rc & ep_mask) == 0) {
|
||||
std::forward<R>(out)(true);
|
||||
return true;
|
||||
}
|
||||
auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
|
||||
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||
std::forward<R>(out)((nxt_rc & ep_mask) == 0);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
|
||||
|
||||
using rc_t = std::uint64_t;
|
||||
using flag_t = std::uint64_t;
|
||||
|
||||
enum : rc_t {
|
||||
rc_mask = 0x00000000ffffffffull,
|
||||
ep_mask = 0x00ffffffffffffffull,
|
||||
ep_incr = 0x0100000000000000ull,
|
||||
ic_mask = 0xff000000ffffffffull,
|
||||
ic_incr = 0x0000000100000000ull
|
||||
};
|
||||
|
||||
template <std::size_t DataSize, std::size_t AlignSize>
|
||||
struct elem_t {
|
||||
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
||||
std::atomic<rc_t > rc_ { 0 }; // read-counter
|
||||
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
||||
};
|
||||
|
||||
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
||||
alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
|
||||
|
||||
circ::u2_t cursor() const noexcept {
|
||||
return ct_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
constexpr static rc_t inc_rc(rc_t rc) noexcept {
|
||||
return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
|
||||
}
|
||||
|
||||
constexpr static rc_t inc_mask(rc_t rc) noexcept {
|
||||
return inc_rc(rc) & ~rc_mask;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
circ::u2_t cur_ct;
|
||||
rc_t epoch = epoch_.load(std::memory_order_acquire);
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_relaxed);
|
||||
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
|
||||
return false; // has not finished yet
|
||||
}
|
||||
else if (!rem_cc) {
|
||||
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||
if ((cur_fl != cur_ct) && cur_fl) {
|
||||
return false; // full
|
||||
}
|
||||
}
|
||||
// consider rem_cc to be 0 here
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
|
||||
epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
|
||||
break;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
// only one thread/process would touch here at one time
|
||||
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename E>
|
||||
bool force_push(W* wrapper, F&& f, E* elems) {
|
||||
E* el;
|
||||
circ::u2_t cur_ct;
|
||||
rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||
for (unsigned k = 0;;) {
|
||||
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
||||
if (cc == 0) return false; // no reader
|
||||
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
||||
// check all consumers have finished reading this element
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
circ::cc_t rem_cc = cur_rc & rc_mask;
|
||||
if (cc & rem_cc) {
|
||||
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
||||
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
||||
if (cc == 0) return false; // no reader
|
||||
}
|
||||
// just compare & exchange
|
||||
if (el->rc_.compare_exchange_weak(
|
||||
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
|
||||
if (epoch == epoch_.load(std::memory_order_acquire)) {
|
||||
break;
|
||||
}
|
||||
else if (push(wrapper, std::forward<F>(f), elems)) {
|
||||
return true;
|
||||
}
|
||||
epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
// only one thread/process would touch here at one time
|
||||
ct_.store(cur_ct + 1, std::memory_order_release);
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
// set flag & try update wt
|
||||
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename W, typename F, typename R, typename E, std::size_t N>
|
||||
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
|
||||
auto* el = elems + circ::index_of(cur);
|
||||
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
||||
if (cur_fl != ~static_cast<flag_t>(cur)) {
|
||||
return false; // empty
|
||||
}
|
||||
++cur;
|
||||
std::forward<F>(f)(&(el->data_));
|
||||
for (unsigned k = 0;;) {
|
||||
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
||||
if ((cur_rc & rc_mask) == 0) {
|
||||
std::forward<R>(out)(true);
|
||||
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
|
||||
bool last_one = false;
|
||||
if ((last_one = (nxt_rc & rc_mask) == 0)) {
|
||||
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
||||
}
|
||||
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
||||
std::forward<R>(out)(last_one);
|
||||
return true;
|
||||
}
|
||||
ipc::yield(k);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace ipc
|
||||
@@ -0,0 +1,58 @@
|
||||
The goal of reducing sequential computation also forms the foundation of the Extended Neural GPU \citep{extendedngpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as basic building block, computing hidden representations in parallel for all input and output positions. In these models, the number of operations required to relate signals from two arbitrary input or output positions grows in the distance between positions, linearly for ConvS2S and logarithmically for ByteNet. This makes it more difficult to learn dependencies between distant positions \citep{hochreiter2001gradient}. In the Transformer this is reduced to a constant number of operations, albeit at the cost of reduced effective resolution due to averaging attention-weighted positions, an effect we counteract with Multi-Head Attention as described in section~\ref{sec:attention}.
|
||||
|
||||
Self-attention, sometimes called intra-attention is an attention mechanism relating different positions of a single sequence in order to compute a representation of the sequence. Self-attention has been used successfully in a variety of tasks including reading comprehension, abstractive summarization, textual entailment and learning task-independent sentence representations \citep{cheng2016long, decomposableAttnModel, paulus2017deep, lin2017structured}.
|
||||
|
||||
End-to-end memory networks are based on a recurrent attention mechanism instead of sequence-aligned recurrence and have been shown to perform well on simple-language question answering and language modeling tasks \citep{sukhbaatar2015}.
|
||||
|
||||
To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution.
|
||||
In the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as \citep{neural_gpu, NalBytenet2017} and \citep{JonasFaceNet2017}.
|
||||
|
||||
|
||||
%\citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs.
|
||||
|
||||
%For example,! in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at low computation cost, making it an essential ingredient in competitive recurrent models for machine translation.
|
||||
|
||||
%A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture.
|
||||
|
||||
%After the seminal models introduced in \citep{sutskever14, bahdanau2014neural, cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation (MT) and language modeling with recurrent endoder-decoder and recurrent language models. Recent effort \citep{shazeer2017outrageously} has successfully combined the power of conditional computation with sequence models to train very large models for MT, pushing SOTA at lower computational cost.
|
||||
|
||||
%Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state precludes processing all timesteps at once, instead requiring long sequences of sequential operations. In practice, this results in greatly reduced computational efficiency, as on modern computing hardware, a single operation on a large batch is much faster than a large number of operations on small batches. The problem gets worse at longer sequence lengths. Although sequential computation is not a severe bottleneck at inference time, as autoregressively generating each output requires all previous outputs, the inability to compute scores at all output positions at once hinders us from rapidly training our models over large datasets. Although impressive work such as \citep{Kuchaiev2017Factorization} is able to significantly accelerate the training of LSTMs with factorization tricks, we are still bound by the linear dependence on sequence length.
|
||||
|
||||
%If the model could compute hidden states at each time step using only the inputs and outputs, it would be liberated from the dependence on results from previous time steps during training. This line of thought is the foundation of recent efforts such as the Markovian neural GPU \citep{neural_gpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as a building block to compute hidden representations simultaneously for all timesteps, resulting in $O(1)$ sequential time complexity. \citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs.
|
||||
|
||||
%A crucial component for accurate sequence prediction is modeling cross-positional communication. For example, in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at a low computation cost, also $O(1)$ sequential time complexity, making it an essential ingredient in recurrent encoder-decoder architectures for MT. A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture.
|
||||
|
||||
|
||||
|
||||
%Note: Facebook model is no better than RNNs in this regard, since it requires a number of layers proportional to the distance you want to communicate. Bytenet is more promising, since it requires a logarithmnic number of layers (does bytenet have SOTA results)?
|
||||
|
||||
%Note: An attention layer can connect a very large number of positions at a low computation cost in O(1) sequential operations. This is why encoder-decoder attention has been so successful in seq-to-seq models so far. It is only natural, then, to also use attention to connect the timesteps of the same sequence.
|
||||
|
||||
%Note: I wouldn't say that long sequences are not a problem during inference. It would be great if we could infer with no long sequences. We could just say later on that, while our training graph is constant-depth, our model still requires sequential operations in the decoder part during inference due to the autoregressive nature of the model.
|
||||
|
||||
%\begin{table}[h!]
|
||||
%\caption{Attention models are quite efficient for cross-positional communications when sequence length is smaller than channel depth. $n$ represents the sequence length and $d$ represents the channel depth.}
|
||||
%\label{tab:op_complexities}
|
||||
%\begin{center}
|
||||
%\vspace{-5pt}
|
||||
%\scalebox{0.75}{
|
||||
|
||||
%\begin{tabular}{l|c|c|c}
|
||||
%\hline \hline
|
||||
%Layer Type & Receptive & Complexity & Sequential \\
|
||||
% & Field & & Operations \\
|
||||
%\hline
|
||||
%Pointwise Feed-Forward & $1$ & $O(n \cdot d^2)$ & $O(1)$ \\
|
||||
%\hline
|
||||
%Recurrent & $n$ & $O(n \cdot d^2)$ & $O(n)$ \\
|
||||
%\hline
|
||||
%Convolutional & $r$ & $O(r \cdot n \cdot d^2)$ & $O(1)$ \\
|
||||
%\hline
|
||||
%Convolutional (separable) & $r$ & $O(r \cdot n \cdot d + n %\cdot d^2)$ & $O(1)$ \\
|
||||
%\hline
|
||||
%Attention & $r$ & $O(r \cdot n \cdot d)$ & $O(1)$ \\
|
||||
%\hline \hline
|
||||
%\end{tabular}
|
||||
%}
|
||||
%\end{center}
|
||||
%\end{table}
|
||||
@@ -0,0 +1,18 @@
|
||||
Recurrent neural networks, long short-term memory \citep{hochreiter1997} and gated recurrent \citep{gruEval14} neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation \citep{sutskever14, bahdanau2014neural, cho2014learning}. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures \citep{wu2016google,luong2015effective,jozefowicz2016exploring}.
|
||||
|
||||
Recurrent models typically factor computation along the symbol positions of the input and output sequences. Aligning the positions to steps in computation time, they generate a sequence of hidden states $h_t$, as a function of the previous hidden state $h_{t-1}$ and the input for position $t$. This inherently sequential nature precludes parallelization within training examples, which becomes critical at longer sequence lengths, as memory constraints limit batching across examples.
|
||||
%\marginpar{not sure if the memory constraints are understandable here}
|
||||
Recent work has achieved significant improvements in computational efficiency through factorization tricks \citep{Kuchaiev2017Factorization} and conditional computation \citep{shazeer2017outrageously}, while also improving model performance in case of the latter. The fundamental constraint of sequential computation, however, remains.
|
||||
|
||||
%\marginpar{@all: there is work on analyzing what attention really does in seq2seq models, couldn't find it right away}
|
||||
|
||||
Attention mechanisms have become an integral part of compelling sequence modeling and transduction models in various tasks, allowing modeling of dependencies without regard to their distance in the input or output sequences \citep{bahdanau2014neural, structuredAttentionNetworks}. In all but a few cases \citep{decomposableAttnModel}, however, such attention mechanisms are used in conjunction with a recurrent network.
|
||||
|
||||
%\marginpar{not sure if "cross-positional communication" is understandable without explanation}
|
||||
%\marginpar{insert exact training times and stats for the model that reaches sota earliest, maybe even a single GPU model?}
|
||||
|
||||
In this work we propose the Transformer, a model architecture eschewing recurrence and instead relying entirely on an attention mechanism to draw global dependencies between input and output. The Transformer allows for significantly more parallelization and can reach a new state of the art in translation quality after being trained for as little as twelve hours on eight P100 GPUs.
|
||||
%\marginpar{you removed the constant number of repetitions part. I wrote it because I wanted to make it clear that the model does not only perform attention once, while it's also not recurrent. I thought that might be important to get across early.}
|
||||
|
||||
% Just a standard paragraph with citations, rewrite.
|
||||
%After the seminal papers of \citep{sutskever14}, \citep{bahdanau2014neural}, and \citep{cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation and language modeling with recurrent sequence models. Recent effort \citep{shazeer2017outrageously} has combined the power of conditional computation with sequence models to train very large models for machine translation, pushing SOTA at lower computational cost. Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state encumbers recurrnet models to process multiple inputs at once, and their time complexity is a linear function of the length of the input and output, both during training and inference. [What I want to say here is that although this is fine during decoding, at training time, we are given both input and output and this linear nature does not allow the RNN to process all inputs and outputs simultaneously and haven't been used on datasets that are the of the scale of the web. What's the largest dataset we have ? . Talk about Nividia and possibly other's effors to speed up things, and possibly other efforts that alleviate this, but are still limited by it's comptuational nature]. Rest of the intro: What if you could construct the state based on the actual inputs and outputs, then you could construct them all at once. This has been the foundation of many promising recent efforts, bytenet,facenet (Also talk about quasi rnn here). Now we talk about attention!! Along with cell architectures such as long short-term meory (LSTM) \citep{hochreiter1997}, and gated recurrent units (GRUs) \citep{cho2014learning}, attention has emerged as an essential ingredient in successful sequence models, in particular for machine translation. In recent years, many, if not all, state-of-the-art (SOTA) results in machine translation have been achieved with attention-based sequence models \citep{wu2016google,luong2015effective,jozefowicz2016exploring}. Talk about the neon work on how it played with attention to do self attention! Then talk about what we do.
|
||||
@@ -0,0 +1,155 @@
|
||||
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[scale=0.6]{Figures/ModalNet-21}
|
||||
\caption{The Transformer - model architecture.}
|
||||
\label{fig:model-arch}
|
||||
\end{figure}
|
||||
|
||||
% Although the primary workhorse of our model is attention,
|
||||
%Our model maintains the encoder-decoder structure that is common to many so-called sequence-to-sequence models \citep{bahdanau2014neural,sutskever14}. As in all such architectures, the encoder computes a representation of the input sequence, and the decoder consumes these representations along with the output tokens to autoregressively produce the output sequence. Where, traditionally, the encoder and decoder contain stacks of recurrent or convolutional layers, our encoder and decoder stacks are composed of attention layers and position-wise feed-forward layers (Figure~\ref{fig:model-arch}). The following sections describe the gross architecture and these particular components in detail.
|
||||
|
||||
Most competitive neural sequence transduction models have an encoder-decoder structure \citep{cho2014learning,bahdanau2014neural,sutskever14}. Here, the encoder maps an input sequence of symbol representations $(x_1, ..., x_n)$ to a sequence of continuous representations $\mathbf{z} = (z_1, ..., z_n)$. Given $\mathbf{z}$, the decoder then generates an output sequence $(y_1,...,y_m)$ of symbols one element at a time. At each step the model is auto-regressive \citep{graves2013generating}, consuming the previously generated symbols as additional input when generating the next.
|
||||
|
||||
The Transformer follows this overall architecture using stacked self-attention and point-wise, fully connected layers for both the encoder and decoder, shown in the left and right halves of Figure~\ref{fig:model-arch}, respectively.
|
||||
|
||||
\subsection{Encoder and Decoder Stacks}
|
||||
|
||||
\paragraph{Encoder:}The encoder is composed of a stack of $N=6$ identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position-wise fully connected feed-forward network. We employ a residual connection \citep{he2016deep} around each of the two sub-layers, followed by layer normalization \cite{layernorm2016}. That is, the output of each sub-layer is $\mathrm{LayerNorm}(x + \mathrm{Sublayer}(x))$, where $\mathrm{Sublayer}(x)$ is the function implemented by the sub-layer itself. To facilitate these residual connections, all sub-layers in the model, as well as the embedding layers, produce outputs of dimension $\dmodel=512$.
|
||||
|
||||
\paragraph{Decoder:}The decoder is also composed of a stack of $N=6$ identical layers. In addition to the two sub-layers in each encoder layer, the decoder inserts a third sub-layer, which performs multi-head attention over the output of the encoder stack. Similar to the encoder, we employ residual connections around each of the sub-layers, followed by layer normalization. We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position $i$ can depend only on the known outputs at positions less than $i$.
|
||||
|
||||
% In our model (Figure~\ref{fig:model-arch}), the encoder and decoder are composed of stacks of alternating self-attention layers (for cross-positional communication) and position-wise feed-forward layers (for in-place computation). In addition, the decoder stack contains encoder-decoder attention layers. Since attention is agnostic to the distances between words, our model requires a "positional encoding" to be added to the encoder and decoder input. The following sections describe all of these components in detail.
|
||||
|
||||
\subsection{Attention} \label{sec:attention}
|
||||
An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is computed as a weighted sum of the values, where the weight assigned to each value is computed by a compatibility function of the query with the corresponding key.
|
||||
|
||||
\subsubsection{Scaled Dot-Product Attention} \label{sec:scaled-dot-prod}
|
||||
|
||||
% \begin{figure}
|
||||
% \centering
|
||||
% \includegraphics[scale=0.6]{Figures/ModalNet-19}
|
||||
% \caption{Scaled Dot-Product Attention.}
|
||||
% \label{fig:multi-head-att}
|
||||
% \end{figure}
|
||||
|
||||
We call our particular attention "Scaled Dot-Product Attention" (Figure~\ref{fig:multi-head-att}). The input consists of queries and keys of dimension $d_k$, and values of dimension $d_v$. We compute the dot products of the query with all keys, divide each by $\sqrt{d_k}$, and apply a softmax function to obtain the weights on the values.
|
||||
|
||||
In practice, we compute the attention function on a set of queries simultaneously, packed together into a matrix $Q$. The keys and values are also packed together into matrices $K$ and $V$. We compute the matrix of outputs as:
|
||||
|
||||
\begin{equation}
|
||||
\mathrm{Attention}(Q, K, V) = \mathrm{softmax}(\frac{QK^T}{\sqrt{d_k}})V
|
||||
\end{equation}
|
||||
|
||||
The two most commonly used attention functions are additive attention \citep{bahdanau2014neural}, and dot-product (multiplicative) attention. Dot-product attention is identical to our algorithm, except for the scaling factor of $\frac{1}{\sqrt{d_k}}$. Additive attention computes the compatibility function using a feed-forward network with a single hidden layer. While the two are similar in theoretical complexity, dot-product attention is much faster and more space-efficient in practice, since it can be implemented using highly optimized matrix multiplication code.
|
||||
|
||||
%We scale the dot products by $1/\sqrt{d_k}$ to limit the magnitude of the dot products, which works well in practice. Otherwise, we found applying the softmax to often result in weights very close to 0 or 1, and hence minuscule gradients.
|
||||
|
||||
% Already described in the subsequent section
|
||||
%When used as part of decoder self-attention, an optional mask function is applied just before the softmax to prevent positions from attending to subsequent positions. This mask simply sets the logits corresponding to all illegal connections (those outside of the lower triangle) to $-\infty$.
|
||||
|
||||
%\paragraph{Comparison to Additive Attention: } We choose dot product attention over additive attention \citep{bahdanau2014neural} since it can be computed using highly optimized matrix multiplication code. This optimization is particularly important to us, as we employ many attention layers in our model.
|
||||
|
||||
While for small values of $d_k$ the two mechanisms perform similarly, additive attention outperforms dot product attention without scaling for larger values of $d_k$ \citep{DBLP:journals/corr/BritzGLL17}. We suspect that for large values of $d_k$, the dot products grow large in magnitude, pushing the softmax function into regions where it has extremely small gradients \footnote{To illustrate why the dot products get large, assume that the components of $q$ and $k$ are independent random variables with mean $0$ and variance $1$. Then their dot product, $q \cdot k = \sum_{i=1}^{d_k} q_ik_i$, has mean $0$ and variance $d_k$.}. To counteract this effect, we scale the dot products by $\frac{1}{\sqrt{d_k}}$.
|
||||
|
||||
|
||||
%We suspect this to be caused by the dot products growing too large in magnitude to result in useful gradients after applying the softmax function. To counteract this, we scale the dot product by $1/\sqrt{d_k}$.
|
||||
|
||||
|
||||
\subsubsection{Multi-Head Attention} \label{sec:multihead}
|
||||
|
||||
\begin{figure}
|
||||
\begin{minipage}[t]{0.5\textwidth}
|
||||
\centering
|
||||
Scaled Dot-Product Attention \\
|
||||
\vspace{0.5cm}
|
||||
\includegraphics[scale=0.6]{Figures/ModalNet-19}
|
||||
\end{minipage}
|
||||
\begin{minipage}[t]{0.5\textwidth}
|
||||
\centering
|
||||
Multi-Head Attention \\
|
||||
\vspace{0.1cm}
|
||||
\includegraphics[scale=0.6]{Figures/ModalNet-20}
|
||||
\end{minipage}
|
||||
|
||||
|
||||
% \centering
|
||||
|
||||
\caption{(left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several attention layers running in parallel.}
|
||||
\label{fig:multi-head-att}
|
||||
\end{figure}
|
||||
|
||||
Instead of performing a single attention function with $\dmodel$-dimensional keys, values and queries, we found it beneficial to linearly project the queries, keys and values $h$ times with different, learned linear projections to $d_k$, $d_k$ and $d_v$ dimensions, respectively.
|
||||
On each of these projected versions of queries, keys and values we then perform the attention function in parallel, yielding $d_v$-dimensional output values. These are concatenated and once again projected, resulting in the final values, as depicted in Figure~\ref{fig:multi-head-att}.
|
||||
|
||||
Multi-head attention allows the model to jointly attend to information from different representation subspaces at different positions. With a single attention head, averaging inhibits this.
|
||||
|
||||
\begin{align*}
|
||||
\mathrm{MultiHead}(Q, K, V) &= \mathrm{Concat}(\mathrm{head_1}, ..., \mathrm{head_h})W^O\\
|
||||
% \mathrm{where} \mathrm{head_i} &= \mathrm{Attention}(QW_Q_i^{\dmodel \times d_q}, KW_K_i^{\dmodel \times d_k}, VW^V_i^{\dmodel \times d_v})\\
|
||||
\text{where}~\mathrm{head_i} &= \mathrm{Attention}(QW^Q_i, KW^K_i, VW^V_i)\\
|
||||
\end{align*}
|
||||
|
||||
Where the projections are parameter matrices $W^Q_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^K_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^V_i \in \mathbb{R}^{\dmodel \times d_v}$ and $W^O \in \mathbb{R}^{hd_v \times \dmodel}$.
|
||||
|
||||
|
||||
%find it better (and no more expensive) to have multiple parallel attention layers (each over the full set of positions) with proportionally lower-dimensional keys, values and queries. We call this "Multi-Head Attention" (Figure~\ref{fig:multi-head-att}). The keys, values, and queries for each of these parallel attention layers are computed by learned linear transformations of the inputs to the multi-head attention. We use different linear transformations across different parallel attention layers. The output of the parallel attention layers are concatenated, and then passed through a final learned linear transformation.
|
||||
|
||||
In this work we employ $h=8$ parallel attention layers, or heads. For each of these we use $d_k=d_v=\dmodel/h=64$.
|
||||
Due to the reduced dimension of each head, the total computational cost is similar to that of single-head attention with full dimensionality.
|
||||
|
||||
\subsubsection{Applications of Attention in our Model}
|
||||
|
||||
The Transformer uses multi-head attention in three different ways:
|
||||
\begin{itemize}
|
||||
\item In "encoder-decoder attention" layers, the queries come from the previous decoder layer, and the memory keys and values come from the output of the encoder. This allows every position in the decoder to attend over all positions in the input sequence. This mimics the typical encoder-decoder attention mechanisms in sequence-to-sequence models such as \citep{wu2016google, bahdanau2014neural,JonasFaceNet2017}.
|
||||
|
||||
\item The encoder contains self-attention layers. In a self-attention layer all of the keys, values and queries come from the same place, in this case, the output of the previous layer in the encoder. Each position in the encoder can attend to all positions in the previous layer of the encoder.
|
||||
|
||||
\item Similarly, self-attention layers in the decoder allow each position in the decoder to attend to all positions in the decoder up to and including that position. We need to prevent leftward information flow in the decoder to preserve the auto-regressive property. We implement this inside of scaled dot-product attention by masking out (setting to $-\infty$) all values in the input of the softmax which correspond to illegal connections. See Figure~\ref{fig:multi-head-att}.
|
||||
|
||||
\end{itemize}
|
||||
|
||||
\subsection{Position-wise Feed-Forward Networks}\label{sec:ffn}
|
||||
|
||||
In addition to attention sub-layers, each of the layers in our encoder and decoder contains a fully connected feed-forward network, which is applied to each position separately and identically. This consists of two linear transformations with a ReLU activation in between.
|
||||
|
||||
\begin{equation}
|
||||
\mathrm{FFN}(x)=\max(0, xW_1 + b_1) W_2 + b_2
|
||||
\end{equation}
|
||||
|
||||
While the linear transformations are the same across different positions, they use different parameters from layer to layer. Another way of describing this is as two convolutions with kernel size 1. The dimensionality of input and output is $\dmodel=512$, and the inner-layer has dimensionality $d_{ff}=2048$.
|
||||
|
||||
|
||||
|
||||
%In the appendix, we describe how the position-wise feed-forward network can also be seen as a form of attention.
|
||||
|
||||
%from Jakob: The number of operations required for the model to relate signals from two arbitrary input or output positions grows in the distance between positions in input or output, linearly for ConvS2S and logarithmically for ByteNet, making it harder to learn dependencies between these positions \citep{hochreiter2001gradient}. In the transformer this is reduced to a constant number of operations, albeit at the cost of effective resolution caused by averaging attention-weighted positions, an effect we aim to counteract with multi-headed attention.
|
||||
|
||||
|
||||
%Figure~\ref{fig:simple-att} presents a simple attention function, $A$, with a single head, that forms the basis of our multi-head attention. $A$ takes a query key vector $\kq$, matrices of memory keys $\km$ and memory values $\vm$ ,and produces a query value vector $\vq$ as
|
||||
%\begin{equation*} \label{eq:attention}
|
||||
% A(\kq, \km, \vm) = {\vm}^T (Softmax(\km \kq).
|
||||
%\end{equation*}
|
||||
%We linearly transform $\kq,\,\km$, and $\vm$ with learned matrices ${\Wkq \text{,} \, \Wkm}$, and ${\Wvm}$ before calling the attention function, and transform the output query with $\Wvq$ before handing it to the feed forward layer. Each attention layer has it's own set of transformation matrices, which are shared across all query positions. $A$ is applied in parallel for each query position, and is implemented very efficiently as a batch of matrix multiplies. The self-attention and encoder-decoder attention layers use $A$, but with different arguments. For example, in encdoder self-attention, queries in encoder layer $i$ attention to memories in encoder layer $i-1$. To ensure that decoder self-attention layers do not look at future words, we add $- \inf$ to the softmax logits in positions $j+1$ to query length for query position $l$.
|
||||
|
||||
%In simple attention, the query value is a weighted combination of the memory values where the attention weights sum to one. Although this function performs well in practice, the constraint on attention weights can restrict the amount of information that flows from memories to queries because the query cannot focus on multiple memory positions at once, which might be desirable when translating long sequences. \marginpar{@usz, could you think of an example of this ?} We remedy this by maintaining multiple attention heads at each query position that attend to all memory positions in parallel, with a different set of parameters per attention head $h$.
|
||||
%\marginpar{}
|
||||
|
||||
\subsection{Embeddings and Softmax}
|
||||
Similarly to other sequence transduction models, we use learned embeddings to convert the input tokens and output tokens to vectors of dimension $\dmodel$. We also use the usual learned linear transformation and softmax function to convert the decoder output to predicted next-token probabilities. In our model, we share the same weight matrix between the two embedding layers and the pre-softmax linear transformation, similar to \citep{press2016using}. In the embedding layers, we multiply those weights by $\sqrt{\dmodel}$.
|
||||
|
||||
|
||||
\subsection{Positional Encoding}
|
||||
Since our model contains no recurrence and no convolution, in order for the model to make use of the order of the sequence, we must inject some information about the relative or absolute position of the tokens in the sequence. To this end, we add "positional encodings" to the input embeddings at the bottoms of the encoder and decoder stacks. The positional encodings have the same dimension $\dmodel$ as the embeddings, so that the two can be summed. There are many choices of positional encodings, learned and fixed \citep{JonasFaceNet2017}.
|
||||
|
||||
In this work, we use sine and cosine functions of different frequencies:
|
||||
|
||||
\begin{align*}
|
||||
PE_{(pos,2i)} = sin(pos / 10000^{2i/\dmodel}) \\
|
||||
PE_{(pos,2i+1)} = cos(pos / 10000^{2i/\dmodel})
|
||||
\end{align*}
|
||||
|
||||
where $pos$ is the position and $i$ is the dimension. That is, each dimension of the positional encoding corresponds to a sinusoid. The wavelengths form a geometric progression from $2\pi$ to $10000 \cdot 2\pi$. We chose this function because we hypothesized it would allow the model to easily learn to attend by relative positions, since for any fixed offset $k$, $PE_{pos+k}$ can be represented as a linear function of $PE_{pos}$.
|
||||
|
||||
We also experimented with using learned positional embeddings \citep{JonasFaceNet2017} instead, and found that the two versions produced nearly identical results (see Table~\ref{tab:variations} row (E)). We chose the sinusoidal version because it may allow the model to extrapolate to sequence lengths longer than the ones encountered during training.
|
||||
@@ -0,0 +1,45 @@
|
||||
\pagebreak
|
||||
\section*{Two Feed-Forward Layers = Attention over Parameters}\label{sec:parameter_attention}
|
||||
|
||||
In addition to attention layers, our model contains position-wise feed-forward networks (Section \ref{sec:ffn}), which consist of two linear transformations with a ReLU activation in between. In fact, these networks too can be seen as a form of attention. Compare the formula for such a network with the formula for a simple dot-product attention layer (biases and scaling factors omitted):
|
||||
|
||||
\begin{align*}
|
||||
FFN(x, W_1, W_2) = ReLU(xW_1)W_2 \\
|
||||
A(q, K, V) = Softmax(qK^T)V
|
||||
\end{align*}
|
||||
|
||||
Based on the similarity of these formulae, the two-layer feed-forward network can be seen as a kind of attention, where the keys and values are the rows of the trainable parameter matrices $W_1$ and $W_2$, and where we use ReLU instead of Softmax in the compatibility function.
|
||||
|
||||
%the compatablity function is $compat(q, k_i) = ReLU(q \cdot k_i)$ instead of $Softmax(qK_T)_i$.
|
||||
|
||||
Given this similarity, we experimented with replacing the position-wise feed-forward networks with attention layers similar to the ones we use everywhere else our model. The multi-head-attention-over-parameters sublayer is identical to the multi-head attention described in \ref{sec:multihead}, except that the "keys" and "values" inputs to each attention head are trainable model parameters, as opposed to being linear projections of a previous layer. These parameters are scaled up by a factor of $\sqrt{d_{model}}$ in order to be more similar to activations.
|
||||
|
||||
In our first experiment, we replaced each position-wise feed-forward network with a multi-head-attention-over-parameters sublayer with $h_p=8$ heads, key-dimensionality $d_{pk}=64$, and value-dimensionality $d_{pv}=64$, using $n_p=1536$ key-value pairs for each attention head. The sublayer has a total of $2097152$ parameters, including the parameters in the query projection and the output projection. This matches the number of parameters in the position-wise feed-forward network that we replaced. While the theoretical amount of computation is also the same, in practice, the attention version caused the step times to be about 30\% longer.
|
||||
|
||||
In our second experiment, we used $h_p=8$ heads, and $n_p=512$ key-value pairs for each attention head, again matching the total number of parameters in the base model.
|
||||
|
||||
Results for the first experiment were slightly worse than for the base model, and results for the second experiment were slightly better, see Table~\ref{tab:parameter_attention}.
|
||||
|
||||
\begin{table}[h]
|
||||
\caption{Replacing the position-wise feed-forward networks with multihead-attention-over-parameters produces similar results to the base model. All metrics are on the English-to-German translation development set, newstest2013.}
|
||||
\label{tab:parameter_attention}
|
||||
\begin{center}
|
||||
\vspace{-2mm}
|
||||
%\scalebox{1.0}{
|
||||
\begin{tabular}{c|cccccc|cccc}
|
||||
\hline\rule{0pt}{2.0ex}
|
||||
& \multirow{2}{*}{$\dmodel$} & \multirow{2}{*}{$\dff$} &
|
||||
\multirow{2}{*}{$h_p$} & \multirow{2}{*}{$d_{pk}$} & \multirow{2}{*}{$d_{pv}$} &
|
||||
\multirow{2}{*}{$n_p$} &
|
||||
PPL & BLEU & params & training\\
|
||||
& & & & & & & (dev) & (dev) & $\times10^6$ & time \\
|
||||
\hline\rule{0pt}{2.0ex}
|
||||
base & 512 & 2048 & & & & & 4.92 & 25.8 & 65 & 12 hours\\
|
||||
\hline\rule{0pt}{2.0ex}
|
||||
AOP$_1$ & 512 & & 8 & 64 & 64 & 1536 & 4.92& 25.5 & 65 & 16 hours\\
|
||||
AOP$_2$ & 512 & & 16 & 64 & 64 & 512 & \textbf{4.86} & \textbf{25.9} & 65 & 16 hours \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
%}
|
||||
\end{center}
|
||||
\end{table}
|
||||
@@ -0,0 +1,8 @@
|
||||
chatgpt的老祖宗《Attention is all you need》
|
||||
|
||||
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin
|
||||
|
||||
真实的摘要如下
|
||||
The dominant sequence transduction models are based on complex recurrent or convolutional neural networks in an encoder-decoder configuration. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.
|
||||
|
||||
https://arxiv.org/abs/1706.03762
|
||||
@@ -0,0 +1,2 @@
|
||||
from stable_baselines3.dqn.dqn import DQN
|
||||
from stable_baselines3.dqn.policies import CnnPolicy, MlpPolicy
|
||||
@@ -0,0 +1,245 @@
|
||||
from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
import gym
|
||||
import numpy as np
|
||||
import torch as th
|
||||
from torch.nn import functional as F
|
||||
|
||||
from stable_baselines3.common import logger
|
||||
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
|
||||
from stable_baselines3.common.preprocessing import maybe_transpose
|
||||
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
|
||||
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
|
||||
from stable_baselines3.dqn.policies import DQNPolicy
|
||||
|
||||
|
||||
class DQN(OffPolicyAlgorithm):
|
||||
"""
|
||||
Deep Q-Network (DQN)
|
||||
|
||||
Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
|
||||
Default hyperparameters are taken from the nature paper,
|
||||
except for the optimizer and learning rate that were taken from Stable Baselines defaults.
|
||||
|
||||
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
|
||||
:param env: The environment to learn from (if registered in Gym, can be str)
|
||||
:param learning_rate: The learning rate, it can be a function
|
||||
of the current progress remaining (from 1 to 0)
|
||||
:param buffer_size: size of the replay buffer
|
||||
:param learning_starts: how many steps of the model to collect transitions for before learning starts
|
||||
:param batch_size: Minibatch size for each gradient update
|
||||
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
|
||||
:param gamma: the discount factor
|
||||
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
|
||||
like ``(5, "step")`` or ``(2, "episode")``.
|
||||
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
|
||||
Set to ``-1`` means to do as many gradient steps as steps done in the environment
|
||||
during the rollout.
|
||||
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
|
||||
at a cost of more complexity.
|
||||
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
|
||||
:param target_update_interval: update the target network every ``target_update_interval``
|
||||
environment steps.
|
||||
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
|
||||
:param exploration_initial_eps: initial value of random action probability
|
||||
:param exploration_final_eps: final value of random action probability
|
||||
:param max_grad_norm: The maximum value for the gradient clipping
|
||||
:param tensorboard_log: the log location for tensorboard (if None, no logging)
|
||||
:param create_eval_env: Whether to create a second environment that will be
|
||||
used for evaluating the agent periodically. (Only available when passing string for the environment)
|
||||
:param policy_kwargs: additional arguments to be passed to the policy on creation
|
||||
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
|
||||
:param seed: Seed for the pseudo random generators
|
||||
:param device: Device (cpu, cuda, ...) on which the code should be run.
|
||||
Setting it to auto, the code will be run on the GPU if possible.
|
||||
:param _init_setup_model: Whether or not to build the network at the creation of the instance
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
policy: Union[str, Type[DQNPolicy]],
|
||||
env: Union[GymEnv, str],
|
||||
learning_rate: Union[float, Schedule] = 1e-4,
|
||||
buffer_size: int = 1000000,
|
||||
learning_starts: int = 50000,
|
||||
batch_size: Optional[int] = 32,
|
||||
tau: float = 1.0,
|
||||
gamma: float = 0.99,
|
||||
train_freq: Union[int, Tuple[int, str]] = 4,
|
||||
gradient_steps: int = 1,
|
||||
optimize_memory_usage: bool = False,
|
||||
target_update_interval: int = 10000,
|
||||
exploration_fraction: float = 0.1,
|
||||
exploration_initial_eps: float = 1.0,
|
||||
exploration_final_eps: float = 0.05,
|
||||
max_grad_norm: float = 10,
|
||||
tensorboard_log: Optional[str] = None,
|
||||
create_eval_env: bool = False,
|
||||
policy_kwargs: Optional[Dict[str, Any]] = None,
|
||||
verbose: int = 0,
|
||||
seed: Optional[int] = None,
|
||||
device: Union[th.device, str] = "auto",
|
||||
_init_setup_model: bool = True,
|
||||
):
|
||||
|
||||
super(DQN, self).__init__(
|
||||
policy,
|
||||
env,
|
||||
DQNPolicy,
|
||||
learning_rate,
|
||||
buffer_size,
|
||||
learning_starts,
|
||||
batch_size,
|
||||
tau,
|
||||
gamma,
|
||||
train_freq,
|
||||
gradient_steps,
|
||||
action_noise=None, # No action noise
|
||||
policy_kwargs=policy_kwargs,
|
||||
tensorboard_log=tensorboard_log,
|
||||
verbose=verbose,
|
||||
device=device,
|
||||
create_eval_env=create_eval_env,
|
||||
seed=seed,
|
||||
sde_support=False,
|
||||
optimize_memory_usage=optimize_memory_usage,
|
||||
supported_action_spaces=(gym.spaces.Discrete,),
|
||||
)
|
||||
|
||||
self.exploration_initial_eps = exploration_initial_eps
|
||||
self.exploration_final_eps = exploration_final_eps
|
||||
self.exploration_fraction = exploration_fraction
|
||||
self.target_update_interval = target_update_interval
|
||||
self.max_grad_norm = max_grad_norm
|
||||
# "epsilon" for the epsilon-greedy exploration
|
||||
self.exploration_rate = 0.0
|
||||
# Linear schedule will be defined in `_setup_model()`
|
||||
self.exploration_schedule = None
|
||||
self.q_net, self.q_net_target = None, None
|
||||
|
||||
if _init_setup_model:
|
||||
self._setup_model()
|
||||
|
||||
def _setup_model(self) -> None:
|
||||
super(DQN, self)._setup_model()
|
||||
self._create_aliases()
|
||||
self.exploration_schedule = get_linear_fn(
|
||||
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
|
||||
)
|
||||
|
||||
def _create_aliases(self) -> None:
|
||||
self.q_net = self.policy.q_net
|
||||
self.q_net_target = self.policy.q_net_target
|
||||
|
||||
def _on_step(self) -> None:
|
||||
"""
|
||||
Update the exploration rate and target network if needed.
|
||||
This method is called in ``collect_rollouts()`` after each step in the environment.
|
||||
"""
|
||||
if self.num_timesteps % self.target_update_interval == 0:
|
||||
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
|
||||
|
||||
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
|
||||
logger.record("rollout/exploration rate", self.exploration_rate)
|
||||
|
||||
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
|
||||
# Update learning rate according to schedule
|
||||
self._update_learning_rate(self.policy.optimizer)
|
||||
|
||||
losses = []
|
||||
for _ in range(gradient_steps):
|
||||
# Sample replay buffer
|
||||
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
|
||||
|
||||
with th.no_grad():
|
||||
# Compute the next Q-values using the target network
|
||||
next_q_values = self.q_net_target(replay_data.next_observations)
|
||||
# Follow greedy policy: use the one with the highest value
|
||||
next_q_values, _ = next_q_values.max(dim=1)
|
||||
# Avoid potential broadcast issue
|
||||
next_q_values = next_q_values.reshape(-1, 1)
|
||||
# 1-step TD target
|
||||
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
|
||||
|
||||
# Get current Q-values estimates
|
||||
current_q_values = self.q_net(replay_data.observations)
|
||||
|
||||
# Retrieve the q-values for the actions from the replay buffer
|
||||
current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())
|
||||
|
||||
# Compute Huber loss (less sensitive to outliers)
|
||||
loss = F.smooth_l1_loss(current_q_values, target_q_values)
|
||||
losses.append(loss.item())
|
||||
|
||||
# Optimize the policy
|
||||
self.policy.optimizer.zero_grad()
|
||||
loss.backward()
|
||||
# Clip gradient norm
|
||||
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
|
||||
self.policy.optimizer.step()
|
||||
|
||||
# Increase update counter
|
||||
self._n_updates += gradient_steps
|
||||
|
||||
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
|
||||
logger.record("train/loss", np.mean(losses))
|
||||
|
||||
def predict(
|
||||
self,
|
||||
observation: np.ndarray,
|
||||
state: Optional[np.ndarray] = None,
|
||||
mask: Optional[np.ndarray] = None,
|
||||
deterministic: bool = False,
|
||||
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
|
||||
"""
|
||||
Overrides the base_class predict function to include epsilon-greedy exploration.
|
||||
|
||||
:param observation: the input observation
|
||||
:param state: The last states (can be None, used in recurrent policies)
|
||||
:param mask: The last masks (can be None, used in recurrent policies)
|
||||
:param deterministic: Whether or not to return deterministic actions.
|
||||
:return: the model's action and the next state
|
||||
(used in recurrent policies)
|
||||
"""
|
||||
if not deterministic and np.random.rand() < self.exploration_rate:
|
||||
if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):
|
||||
n_batch = observation.shape[0]
|
||||
action = np.array([self.action_space.sample() for _ in range(n_batch)])
|
||||
else:
|
||||
action = np.array(self.action_space.sample())
|
||||
else:
|
||||
action, state = self.policy.predict(observation, state, mask, deterministic)
|
||||
return action, state
|
||||
|
||||
def learn(
|
||||
self,
|
||||
total_timesteps: int,
|
||||
callback: MaybeCallback = None,
|
||||
log_interval: int = 4,
|
||||
eval_env: Optional[GymEnv] = None,
|
||||
eval_freq: int = -1,
|
||||
n_eval_episodes: int = 5,
|
||||
tb_log_name: str = "DQN",
|
||||
eval_log_path: Optional[str] = None,
|
||||
reset_num_timesteps: bool = True,
|
||||
) -> OffPolicyAlgorithm:
|
||||
|
||||
return super(DQN, self).learn(
|
||||
total_timesteps=total_timesteps,
|
||||
callback=callback,
|
||||
log_interval=log_interval,
|
||||
eval_env=eval_env,
|
||||
eval_freq=eval_freq,
|
||||
n_eval_episodes=n_eval_episodes,
|
||||
tb_log_name=tb_log_name,
|
||||
eval_log_path=eval_log_path,
|
||||
reset_num_timesteps=reset_num_timesteps,
|
||||
)
|
||||
|
||||
def _excluded_save_params(self) -> List[str]:
|
||||
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
|
||||
|
||||
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
|
||||
state_dicts = ["policy", "policy.optimizer"]
|
||||
|
||||
return state_dicts, []
|
||||
@@ -0,0 +1,237 @@
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
import gym
|
||||
import torch as th
|
||||
from torch import nn
|
||||
|
||||
from stable_baselines3.common.policies import BasePolicy, register_policy
|
||||
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
|
||||
from stable_baselines3.common.type_aliases import Schedule
|
||||
|
||||
|
||||
class QNetwork(BasePolicy):
|
||||
"""
|
||||
Action-Value (Q-Value) network for DQN
|
||||
|
||||
:param observation_space: Observation space
|
||||
:param action_space: Action space
|
||||
:param net_arch: The specification of the policy and value networks.
|
||||
:param activation_fn: Activation function
|
||||
:param normalize_images: Whether to normalize images or not,
|
||||
dividing by 255.0 (True by default)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
observation_space: gym.spaces.Space,
|
||||
action_space: gym.spaces.Space,
|
||||
features_extractor: nn.Module,
|
||||
features_dim: int,
|
||||
net_arch: Optional[List[int]] = None,
|
||||
activation_fn: Type[nn.Module] = nn.ReLU,
|
||||
normalize_images: bool = True,
|
||||
):
|
||||
super(QNetwork, self).__init__(
|
||||
observation_space,
|
||||
action_space,
|
||||
features_extractor=features_extractor,
|
||||
normalize_images=normalize_images,
|
||||
)
|
||||
|
||||
if net_arch is None:
|
||||
net_arch = [64, 64]
|
||||
|
||||
self.net_arch = net_arch
|
||||
self.activation_fn = activation_fn
|
||||
self.features_extractor = features_extractor
|
||||
self.features_dim = features_dim
|
||||
self.normalize_images = normalize_images
|
||||
action_dim = self.action_space.n # number of actions
|
||||
q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
|
||||
self.q_net = nn.Sequential(*q_net)
|
||||
|
||||
def forward(self, obs: th.Tensor) -> th.Tensor:
|
||||
"""
|
||||
Predict the q-values.
|
||||
|
||||
:param obs: Observation
|
||||
:return: The estimated Q-Value for each action.
|
||||
"""
|
||||
return self.q_net(self.extract_features(obs))
|
||||
|
||||
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
||||
q_values = self.forward(observation)
|
||||
# Greedy action
|
||||
action = q_values.argmax(dim=1).reshape(-1)
|
||||
return action
|
||||
|
||||
def _get_constructor_parameters(self) -> Dict[str, Any]:
|
||||
data = super()._get_constructor_parameters()
|
||||
|
||||
data.update(
|
||||
dict(
|
||||
net_arch=self.net_arch,
|
||||
features_dim=self.features_dim,
|
||||
activation_fn=self.activation_fn,
|
||||
features_extractor=self.features_extractor,
|
||||
)
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
class DQNPolicy(BasePolicy):
|
||||
"""
|
||||
Policy class with Q-Value Net and target net for DQN
|
||||
|
||||
:param observation_space: Observation space
|
||||
:param action_space: Action space
|
||||
:param lr_schedule: Learning rate schedule (could be constant)
|
||||
:param net_arch: The specification of the policy and value networks.
|
||||
:param activation_fn: Activation function
|
||||
:param features_extractor_class: Features extractor to use.
|
||||
:param features_extractor_kwargs: Keyword arguments
|
||||
to pass to the features extractor.
|
||||
:param normalize_images: Whether to normalize images or not,
|
||||
dividing by 255.0 (True by default)
|
||||
:param optimizer_class: The optimizer to use,
|
||||
``th.optim.Adam`` by default
|
||||
:param optimizer_kwargs: Additional keyword arguments,
|
||||
excluding the learning rate, to pass to the optimizer
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
observation_space: gym.spaces.Space,
|
||||
action_space: gym.spaces.Space,
|
||||
lr_schedule: Schedule,
|
||||
net_arch: Optional[List[int]] = None,
|
||||
activation_fn: Type[nn.Module] = nn.ReLU,
|
||||
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
|
||||
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
normalize_images: bool = True,
|
||||
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
|
||||
optimizer_kwargs: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
super(DQNPolicy, self).__init__(
|
||||
observation_space,
|
||||
action_space,
|
||||
features_extractor_class,
|
||||
features_extractor_kwargs,
|
||||
optimizer_class=optimizer_class,
|
||||
optimizer_kwargs=optimizer_kwargs,
|
||||
)
|
||||
|
||||
if net_arch is None:
|
||||
if features_extractor_class == FlattenExtractor:
|
||||
net_arch = [64, 64]
|
||||
else:
|
||||
net_arch = []
|
||||
|
||||
self.net_arch = net_arch
|
||||
self.activation_fn = activation_fn
|
||||
self.normalize_images = normalize_images
|
||||
|
||||
self.net_args = {
|
||||
"observation_space": self.observation_space,
|
||||
"action_space": self.action_space,
|
||||
"net_arch": self.net_arch,
|
||||
"activation_fn": self.activation_fn,
|
||||
"normalize_images": normalize_images,
|
||||
}
|
||||
|
||||
self.q_net, self.q_net_target = None, None
|
||||
self._build(lr_schedule)
|
||||
|
||||
def _build(self, lr_schedule: Schedule) -> None:
|
||||
"""
|
||||
Create the network and the optimizer.
|
||||
|
||||
:param lr_schedule: Learning rate schedule
|
||||
lr_schedule(1) is the initial learning rate
|
||||
"""
|
||||
|
||||
self.q_net = self.make_q_net()
|
||||
self.q_net_target = self.make_q_net()
|
||||
self.q_net_target.load_state_dict(self.q_net.state_dict())
|
||||
|
||||
# Setup optimizer with initial learning rate
|
||||
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
|
||||
|
||||
def make_q_net(self) -> QNetwork:
|
||||
# Make sure we always have separate networks for features extractors etc
|
||||
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
|
||||
return QNetwork(**net_args).to(self.device)
|
||||
|
||||
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
||||
return self._predict(obs, deterministic=deterministic)
|
||||
|
||||
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
|
||||
return self.q_net._predict(obs, deterministic=deterministic)
|
||||
|
||||
def _get_constructor_parameters(self) -> Dict[str, Any]:
|
||||
data = super()._get_constructor_parameters()
|
||||
|
||||
data.update(
|
||||
dict(
|
||||
net_arch=self.net_args["net_arch"],
|
||||
activation_fn=self.net_args["activation_fn"],
|
||||
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
|
||||
optimizer_class=self.optimizer_class,
|
||||
optimizer_kwargs=self.optimizer_kwargs,
|
||||
features_extractor_class=self.features_extractor_class,
|
||||
features_extractor_kwargs=self.features_extractor_kwargs,
|
||||
)
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
MlpPolicy = DQNPolicy
|
||||
|
||||
|
||||
class CnnPolicy(DQNPolicy):
|
||||
"""
|
||||
Policy class for DQN when using images as input.
|
||||
|
||||
:param observation_space: Observation space
|
||||
:param action_space: Action space
|
||||
:param lr_schedule: Learning rate schedule (could be constant)
|
||||
:param net_arch: The specification of the policy and value networks.
|
||||
:param activation_fn: Activation function
|
||||
:param features_extractor_class: Features extractor to use.
|
||||
:param normalize_images: Whether to normalize images or not,
|
||||
dividing by 255.0 (True by default)
|
||||
:param optimizer_class: The optimizer to use,
|
||||
``th.optim.Adam`` by default
|
||||
:param optimizer_kwargs: Additional keyword arguments,
|
||||
excluding the learning rate, to pass to the optimizer
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
observation_space: gym.spaces.Space,
|
||||
action_space: gym.spaces.Space,
|
||||
lr_schedule: Schedule,
|
||||
net_arch: Optional[List[int]] = None,
|
||||
activation_fn: Type[nn.Module] = nn.ReLU,
|
||||
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
|
||||
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
|
||||
normalize_images: bool = True,
|
||||
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
|
||||
optimizer_kwargs: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
super(CnnPolicy, self).__init__(
|
||||
observation_space,
|
||||
action_space,
|
||||
lr_schedule,
|
||||
net_arch,
|
||||
activation_fn,
|
||||
features_extractor_class,
|
||||
features_extractor_kwargs,
|
||||
normalize_images,
|
||||
optimizer_class,
|
||||
optimizer_kwargs,
|
||||
)
|
||||
|
||||
|
||||
register_policy("MlpPolicy", MlpPolicy)
|
||||
register_policy("CnnPolicy", CnnPolicy)
|
||||
@@ -0,0 +1,2 @@
|
||||
github stablebaseline3
|
||||
https://github.com/DLR-RM/stable-baselines3
|
||||
@@ -0,0 +1,27 @@
|
||||
"In practice, we found that a high-entropy initial state is more likely to increase the speed of training.
|
||||
The entropy is calculated by:
|
||||
$$H=-\sum_{k= 1}^{n_k} p(k) \cdot \log p(k), p(k)=\frac{|A_k|}{|\mathcal{A}|}$$
|
||||
where $H$ is the entropy, $|A_k|$ is the number of agent nodes in $k$-th cluster, $|\mathcal{A}|$ is the total number of agents.
|
||||
To ensure the Cooperation Graph initialization has higher entropy,
|
||||
we will randomly generate multiple initial states,
|
||||
rank by their entropy and then pick the one with maximum $H$."
|
||||
|
||||
```
|
||||
FROM ubuntu:latest
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y python3 python3-pip && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN echo '[global]' > /etc/pip.conf && \
|
||||
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||
|
||||
RUN pip3 install gradio requests[socks] mdtex2html
|
||||
|
||||
COPY . /gpt
|
||||
WORKDIR /gpt
|
||||
|
||||
|
||||
CMD ["python3", "main.py"]
|
||||
```
|
||||
194
crazy_functions/下载arxiv论文翻译摘要.py
普通文件
194
crazy_functions/下载arxiv论文翻译摘要.py
普通文件
@@ -0,0 +1,194 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file, get_conf
|
||||
import re, requests, unicodedata, os
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
def download_arxiv_(url_pdf):
|
||||
if 'arxiv.org' not in url_pdf:
|
||||
if ('.' in url_pdf) and ('/' not in url_pdf):
|
||||
new_url = 'https://arxiv.org/abs/'+url_pdf
|
||||
print('下载编号:', url_pdf, '自动定位:', new_url)
|
||||
# download_arxiv_(new_url)
|
||||
return download_arxiv_(new_url)
|
||||
else:
|
||||
print('不能识别的URL!')
|
||||
return None
|
||||
if 'abs' in url_pdf:
|
||||
url_pdf = url_pdf.replace('abs', 'pdf')
|
||||
url_pdf = url_pdf + '.pdf'
|
||||
|
||||
url_abs = url_pdf.replace('.pdf', '').replace('pdf', 'abs')
|
||||
title, other_info = get_name(_url_=url_abs)
|
||||
|
||||
paper_id = title.split()[0] # '[1712.00559]'
|
||||
if '2' in other_info['year']:
|
||||
title = other_info['year'] + ' ' + title
|
||||
|
||||
known_conf = ['NeurIPS', 'NIPS', 'Nature', 'Science', 'ICLR', 'AAAI']
|
||||
for k in known_conf:
|
||||
if k in other_info['comment']:
|
||||
title = k + ' ' + title
|
||||
|
||||
download_dir = './gpt_log/arxiv/'
|
||||
os.makedirs(download_dir, exist_ok=True)
|
||||
|
||||
title_str = title.replace('?', '?')\
|
||||
.replace(':', ':')\
|
||||
.replace('\"', '“')\
|
||||
.replace('\n', '')\
|
||||
.replace(' ', ' ')\
|
||||
.replace(' ', ' ')
|
||||
|
||||
requests_pdf_url = url_pdf
|
||||
file_path = download_dir+title_str
|
||||
# if os.path.exists(file_path):
|
||||
# print('返回缓存文件')
|
||||
# return './gpt_log/arxiv/'+title_str
|
||||
|
||||
print('下载中')
|
||||
proxies, = get_conf('proxies')
|
||||
r = requests.get(requests_pdf_url, proxies=proxies)
|
||||
with open(file_path, 'wb+') as f:
|
||||
f.write(r.content)
|
||||
print('下载完成')
|
||||
|
||||
# print('输出下载命令:','aria2c -o \"%s\" %s'%(title_str,url_pdf))
|
||||
# subprocess.call('aria2c --all-proxy=\"172.18.116.150:11084\" -o \"%s\" %s'%(download_dir+title_str,url_pdf), shell=True)
|
||||
|
||||
x = "%s %s %s.bib" % (paper_id, other_info['year'], other_info['authors'])
|
||||
x = x.replace('?', '?')\
|
||||
.replace(':', ':')\
|
||||
.replace('\"', '“')\
|
||||
.replace('\n', '')\
|
||||
.replace(' ', ' ')\
|
||||
.replace(' ', ' ')
|
||||
return './gpt_log/arxiv/'+title_str, other_info
|
||||
|
||||
|
||||
def get_name(_url_):
|
||||
import os
|
||||
from bs4 import BeautifulSoup
|
||||
print('正在获取文献名!')
|
||||
print(_url_)
|
||||
|
||||
# arxiv_recall = {}
|
||||
# if os.path.exists('./arxiv_recall.pkl'):
|
||||
# with open('./arxiv_recall.pkl', 'rb') as f:
|
||||
# arxiv_recall = pickle.load(f)
|
||||
|
||||
# if _url_ in arxiv_recall:
|
||||
# print('在缓存中')
|
||||
# return arxiv_recall[_url_]
|
||||
|
||||
proxies, = get_conf('proxies')
|
||||
res = requests.get(_url_, proxies=proxies)
|
||||
|
||||
bs = BeautifulSoup(res.text, 'html.parser')
|
||||
other_details = {}
|
||||
|
||||
# get year
|
||||
try:
|
||||
year = bs.find_all(class_='dateline')[0].text
|
||||
year = re.search(r'(\d{4})', year, re.M | re.I).group(1)
|
||||
other_details['year'] = year
|
||||
abstract = bs.find_all(class_='abstract mathjax')[0].text
|
||||
other_details['abstract'] = abstract
|
||||
except:
|
||||
other_details['year'] = ''
|
||||
print('年份获取失败')
|
||||
|
||||
# get author
|
||||
try:
|
||||
authors = bs.find_all(class_='authors')[0].text
|
||||
authors = authors.split('Authors:')[1]
|
||||
other_details['authors'] = authors
|
||||
except:
|
||||
other_details['authors'] = ''
|
||||
print('authors获取失败')
|
||||
|
||||
# get comment
|
||||
try:
|
||||
comment = bs.find_all(class_='metatable')[0].text
|
||||
real_comment = None
|
||||
for item in comment.replace('\n', ' ').split(' '):
|
||||
if 'Comments' in item:
|
||||
real_comment = item
|
||||
if real_comment is not None:
|
||||
other_details['comment'] = real_comment
|
||||
else:
|
||||
other_details['comment'] = ''
|
||||
except:
|
||||
other_details['comment'] = ''
|
||||
print('年份获取失败')
|
||||
|
||||
title_str = BeautifulSoup(
|
||||
res.text, 'html.parser').find('title').contents[0]
|
||||
print('获取成功:', title_str)
|
||||
# arxiv_recall[_url_] = (title_str+'.pdf', other_details)
|
||||
# with open('./arxiv_recall.pkl', 'wb') as f:
|
||||
# pickle.dump(arxiv_recall, f)
|
||||
|
||||
return title_str+'.pdf', other_details
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
|
||||
CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……"
|
||||
import glob
|
||||
import os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append(["函数插件功能?", CRAZY_FUNCTION_INFO])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import pdfminer, bs4
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
# 提取摘要,下载PDF文档
|
||||
try:
|
||||
pdf_path, info = download_arxiv_(txt)
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"下载pdf文件未成功")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 翻译摘要等
|
||||
i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}"
|
||||
i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
# 单线,获取文章meta信息
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot, history=[],
|
||||
sys_prompt="Your job is to collect information from materials and translate to Chinese。",
|
||||
)
|
||||
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
# 写入文件
|
||||
import shutil
|
||||
# 重置文件的创建时间
|
||||
shutil.copyfile(pdf_path, f'./gpt_log/{os.path.basename(pdf_path)}'); os.remove(pdf_path)
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载"))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
138
crazy_functions/代码重写为全英文_多线程.py
普通文件
138
crazy_functions/代码重写为全英文_多线程.py
普通文件
@@ -0,0 +1,138 @@
|
||||
import threading
|
||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, write_results_to_file, report_execption
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit
|
||||
|
||||
def extract_code_block_carefully(txt):
|
||||
splitted = txt.split('```')
|
||||
n_code_block_seg = len(splitted) - 1
|
||||
if n_code_block_seg <= 1: return txt
|
||||
# 剩下的情况都开头除去 ``` 结尾除去一次 ```
|
||||
txt_out = '```'.join(splitted[1:-1])
|
||||
return txt_out
|
||||
|
||||
|
||||
|
||||
def break_txt_into_half_at_some_linebreak(txt):
|
||||
lines = txt.split('\n')
|
||||
n_lines = len(lines)
|
||||
pre = lines[:(n_lines//2)]
|
||||
post = lines[(n_lines//2):]
|
||||
return "\n".join(pre), "\n".join(post)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
|
||||
# 第1步:清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
# 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 第3步:集合文件
|
||||
import time, glob, os, shutil, re
|
||||
os.makedirs('gpt_log/generated_english_version', exist_ok=True)
|
||||
os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
|
||||
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
||||
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
||||
# file_manifest = ['./toolbox.py']
|
||||
i_say_show_user_buffer = []
|
||||
|
||||
# 第4步:随便显示点什么防止卡顿的感觉
|
||||
for index, fp in enumerate(file_manifest):
|
||||
# if 'test_project' in fp: continue
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}'
|
||||
i_say_show_user_buffer.append(i_say_show_user)
|
||||
chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
# 第5步:Token限制下的截断与处理
|
||||
MAX_TOKEN = 3000
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
|
||||
|
||||
# 第6步:任务函数
|
||||
mutable_return = [None for _ in file_manifest]
|
||||
observe_window = [[""] for _ in file_manifest]
|
||||
def thread_worker(fp,index):
|
||||
if index > 10:
|
||||
time.sleep(60)
|
||||
print('Openai 限制免费用户每分钟20次请求,降低请求频率中。')
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
|
||||
try:
|
||||
gpt_say = ""
|
||||
# 分解代码文件
|
||||
file_content_breakdown = breakdown_txt_to_satisfy_token_limit(file_content, get_token_fn, MAX_TOKEN)
|
||||
for file_content_partial in file_content_breakdown:
|
||||
i_say = i_say_template(fp, file_content_partial)
|
||||
# # ** gpt request **
|
||||
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index])
|
||||
gpt_say_partial = extract_code_block_carefully(gpt_say_partial)
|
||||
gpt_say += gpt_say_partial
|
||||
mutable_return[index] = gpt_say
|
||||
except ConnectionAbortedError as token_exceed_err:
|
||||
print('至少一个线程任务Token溢出而失败', e)
|
||||
except Exception as e:
|
||||
print('至少一个线程任务意外失败', e)
|
||||
|
||||
# 第7步:所有线程同时开始执行任务函数
|
||||
handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
|
||||
for h in handles:
|
||||
h.daemon = True
|
||||
h.start()
|
||||
chatbot.append(('开始了吗?', f'多线程操作已经开始'))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 第8步:循环轮询各个线程是否执行完毕
|
||||
cnt = 0
|
||||
while True:
|
||||
cnt += 1
|
||||
time.sleep(0.2)
|
||||
th_alive = [h.is_alive() for h in handles]
|
||||
if not any(th_alive): break
|
||||
# 更好的UI视觉效果
|
||||
observe_win = []
|
||||
for thread_index, alive in enumerate(th_alive):
|
||||
observe_win.append("[ ..."+observe_window[thread_index][0][-60:].replace('\n','').replace('```','...').replace(' ','.').replace('<br/>','.....').replace('$','.')+"... ]")
|
||||
stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)]
|
||||
stat_str = ''.join(stat)
|
||||
chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1)))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 第9步:把结果写入文件
|
||||
for index, h in enumerate(handles):
|
||||
h.join() # 这里其实不需要join了,肯定已经都结束了
|
||||
fp = file_manifest[index]
|
||||
gpt_say = mutable_return[index]
|
||||
i_say_show_user = i_say_show_user_buffer[index]
|
||||
|
||||
where_to_relocate = f'gpt_log/generated_english_version/{fp}'
|
||||
if gpt_say is not None:
|
||||
with open(where_to_relocate, 'w+', encoding='utf-8') as f:
|
||||
f.write(gpt_say)
|
||||
else: # 失败
|
||||
shutil.copyfile(file_manifest[index], where_to_relocate)
|
||||
chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
time.sleep(1)
|
||||
|
||||
# 第10步:备份一个文件
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("生成一份任务执行报告", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
42
crazy_functions/对话历史存档.py
普通文件
42
crazy_functions/对话历史存档.py
普通文件
@@ -0,0 +1,42 @@
|
||||
from toolbox import CatchException, update_ui
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
|
||||
def write_chat_to_file(chatbot, file_name=None):
|
||||
"""
|
||||
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
if file_name is None:
|
||||
file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
|
||||
os.makedirs('./gpt_log/', exist_ok=True)
|
||||
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
|
||||
for i, contents in enumerate(chatbot):
|
||||
for content in contents:
|
||||
try: # 这个bug没找到触发条件,暂时先这样顶一下
|
||||
if type(content) != str: content = str(content)
|
||||
except:
|
||||
continue
|
||||
f.write(content)
|
||||
f.write('\n\n')
|
||||
f.write('<hr color="red"> \n\n')
|
||||
|
||||
res = '对话历史写入:' + os.path.abspath(f'./gpt_log/{file_name}')
|
||||
print(res)
|
||||
return res
|
||||
|
||||
@CatchException
|
||||
def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
|
||||
chatbot.append(("保存当前对话", f"[Local Message] {write_chat_to_file(chatbot)}"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
127
crazy_functions/总结word文档.py
普通文件
127
crazy_functions/总结word文档.py
普通文件
@@ -0,0 +1,127 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
fast_debug = False
|
||||
|
||||
|
||||
def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, os
|
||||
# pip install python-docx 用于docx格式,跨平台
|
||||
# pip install pywin32 用于doc格式,仅支持Win平台
|
||||
for index, fp in enumerate(file_manifest):
|
||||
if fp.split(".")[-1] == "docx":
|
||||
from docx import Document
|
||||
doc = Document(fp)
|
||||
file_content = "\n".join([para.text for para in doc.paragraphs])
|
||||
else:
|
||||
import win32com.client
|
||||
word = win32com.client.Dispatch("Word.Application")
|
||||
word.visible = False
|
||||
# 打开文件
|
||||
print('fp', os.getcwd())
|
||||
doc = word.Documents.Open(os.getcwd() + '/' + fp)
|
||||
# file_content = doc.Content.Text
|
||||
doc = word.ActiveDocument
|
||||
file_content = doc.Range().Text
|
||||
doc.Close()
|
||||
word.Quit()
|
||||
|
||||
print(file_content)
|
||||
# private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from request_llm.bridge_all import model_info
|
||||
max_token = model_info[llm_kwargs['llm_model']]['max_token']
|
||||
TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content,
|
||||
get_token_fn=model_info[llm_kwargs['llm_model']]['token_cnt'],
|
||||
limit=TOKEN_LIMIT_PER_FRAGMENT
|
||||
)
|
||||
this_paper_history = []
|
||||
for i, paper_frag in enumerate(paper_fragments):
|
||||
i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```'
|
||||
i_say_show_user = f'请对下面的文章片段做概述: {os.path.abspath(fp)}的第{i+1}/{len(paper_fragments)}个片段。'
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=[],
|
||||
sys_prompt="总结文章。"
|
||||
)
|
||||
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.extend([i_say_show_user,gpt_say])
|
||||
this_paper_history.extend([i_say_show_user,gpt_say])
|
||||
|
||||
# 已经对该文章的所有片段总结完毕,如果文章被切分了,
|
||||
if len(paper_fragments) > 1:
|
||||
i_say = f"根据以上的对话,总结文章{os.path.abspath(fp)}的主要内容。"
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=this_paper_history,
|
||||
sys_prompt="总结文章。"
|
||||
)
|
||||
|
||||
history.extend([i_say,gpt_say])
|
||||
this_paper_history.extend([i_say,gpt_say])
|
||||
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("所有文件都总结完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
@CatchException
|
||||
def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量总结Word文档。函数插件贡献者: JasonGuo1"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
from docx import Document
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
# 检测输入参数,如没有给定输入参数,直接退出
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 搜索需要处理的文件清单
|
||||
if txt.endswith('.docx') or txt.endswith('.doc'):
|
||||
file_manifest = [txt]
|
||||
else:
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.docx', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.doc', recursive=True)]
|
||||
|
||||
# 如果没找到任何文件
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 开始正式执行任务
|
||||
yield from 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
186
crazy_functions/批量Markdown翻译.py
普通文件
186
crazy_functions/批量Markdown翻译.py
普通文件
@@ -0,0 +1,186 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
fast_debug = False
|
||||
|
||||
class PaperFileGroup():
|
||||
def __init__(self):
|
||||
self.file_paths = []
|
||||
self.file_contents = []
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
"""
|
||||
for index, file_content in enumerate(self.file_contents):
|
||||
if self.get_token_num(file_content) < max_token_limit:
|
||||
self.sp_file_contents.append(file_content)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md")
|
||||
|
||||
print('Segmentation: done')
|
||||
|
||||
def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
|
||||
import time, os, re
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
# <-------- 读取Markdown文件,删除其中的所有注释 ---------->
|
||||
pfg = PaperFileGroup()
|
||||
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
# 记录删除注释后的文本
|
||||
pfg.file_paths.append(fp)
|
||||
pfg.file_contents.append(file_content)
|
||||
|
||||
# <-------- 拆分过长的Markdown文件 ---------->
|
||||
pfg.run_file_split(max_token_limit=1500)
|
||||
n_split = len(pfg.sp_file_contents)
|
||||
|
||||
# <-------- 多线程润色开始 ---------->
|
||||
if language == 'en->zh':
|
||||
inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||
elif language == 'zh->en':
|
||||
inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
|
||||
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=inputs_array,
|
||||
inputs_show_user_array=inputs_show_user_array,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[""] for _ in range(n_split)],
|
||||
sys_prompt_array=sys_prompt_array,
|
||||
# max_workers=5, # OpenAI所允许的最大并行过载
|
||||
scroller_max_len = 80
|
||||
)
|
||||
|
||||
# <-------- 整理结果,退出 ---------->
|
||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
||||
history = gpt_response_collection
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
def get_files_from_everything(txt):
|
||||
import glob, os
|
||||
|
||||
success = True
|
||||
if txt.startswith('http'):
|
||||
# 网络的远程文件
|
||||
txt = txt.replace("https://github.com/", "https://raw.githubusercontent.com/")
|
||||
txt = txt.replace("/blob/", "/")
|
||||
import requests
|
||||
from toolbox import get_conf
|
||||
proxies, = get_conf('proxies')
|
||||
r = requests.get(txt, proxies=proxies)
|
||||
with open('./gpt_log/temp.md', 'wb+') as f: f.write(r.content)
|
||||
project_folder = './gpt_log/'
|
||||
file_manifest = ['./gpt_log/temp.md']
|
||||
elif txt.endswith('.md'):
|
||||
# 直接给定文件
|
||||
file_manifest = [txt]
|
||||
project_folder = os.path.dirname(txt)
|
||||
elif os.path.exists(txt):
|
||||
# 本地路径,递归搜索
|
||||
project_folder = txt
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
|
||||
else:
|
||||
success = False
|
||||
|
||||
return success, file_manifest, project_folder
|
||||
|
||||
|
||||
@CatchException
|
||||
def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
import glob, os
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
|
||||
success, file_manifest, project_folder = get_files_from_everything(txt)
|
||||
|
||||
if not success:
|
||||
# 什么都没有
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import tiktoken
|
||||
import glob, os
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
success, file_manifest, project_folder = get_files_from_everything(txt)
|
||||
if not success:
|
||||
# 什么都没有
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
|
||||
166
crazy_functions/批量总结PDF文档.py
普通文件
166
crazy_functions/批量总结PDF文档.py
普通文件
@@ -0,0 +1,166 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
import re
|
||||
import unicodedata
|
||||
fast_debug = False
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
|
||||
def is_paragraph_break(match):
|
||||
"""
|
||||
根据给定的匹配结果来判断换行符是否表示段落分隔。
|
||||
如果换行符前为句子结束标志(句号,感叹号,问号),且下一个字符为大写字母,则换行符更有可能表示段落分隔。
|
||||
也可以根据之前的内容长度来判断段落是否已经足够长。
|
||||
"""
|
||||
prev_char, next_char = match.groups()
|
||||
|
||||
# 句子结束标志
|
||||
sentence_endings = ".!?"
|
||||
|
||||
# 设定一个最小段落长度阈值
|
||||
min_paragraph_length = 140
|
||||
|
||||
if prev_char in sentence_endings and next_char.isupper() and len(match.string[:match.start(1)]) > min_paragraph_length:
|
||||
return "\n\n"
|
||||
else:
|
||||
return " "
|
||||
|
||||
def normalize_text(text):
|
||||
"""
|
||||
通过把连字(ligatures)等文本特殊符号转换为其基本形式来对文本进行归一化处理。
|
||||
例如,将连字 "fi" 转换为 "f" 和 "i"。
|
||||
"""
|
||||
# 对文本进行归一化处理,分解连字
|
||||
normalized_text = unicodedata.normalize("NFKD", text)
|
||||
|
||||
# 替换其他特殊字符
|
||||
cleaned_text = re.sub(r'[^\x00-\x7F]+', '', normalized_text)
|
||||
|
||||
return cleaned_text
|
||||
|
||||
def clean_text(raw_text):
|
||||
"""
|
||||
对从 PDF 提取出的原始文本进行清洗和格式化处理。
|
||||
1. 对原始文本进行归一化处理。
|
||||
2. 替换跨行的连词,例如 “Espe-\ncially” 转换为 “Especially”。
|
||||
3. 根据 heuristic 规则判断换行符是否是段落分隔,并相应地进行替换。
|
||||
"""
|
||||
# 对文本进行归一化处理
|
||||
normalized_text = normalize_text(raw_text)
|
||||
|
||||
# 替换跨行的连词
|
||||
text = re.sub(r'(\w+-\n\w+)', lambda m: m.group(1).replace('-\n', ''), normalized_text)
|
||||
|
||||
# 根据前后相邻字符的特点,找到原文本中的换行符
|
||||
newlines = re.compile(r'(\S)\n(\S)')
|
||||
|
||||
# 根据 heuristic 规则,用空格或段落分隔符替换原换行符
|
||||
final_text = re.sub(newlines, lambda m: m.group(1) + is_paragraph_break(m) + m.group(2), text)
|
||||
|
||||
return final_text.strip()
|
||||
|
||||
def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, glob, os, fitz
|
||||
print('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with fitz.open(fp) as doc:
|
||||
file_content = ""
|
||||
for page in doc:
|
||||
file_content += page.get_text()
|
||||
file_content = clean_text(file_content)
|
||||
print(file_content)
|
||||
|
||||
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
||||
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=[],
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
|
||||
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
||||
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=history,
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
@CatchException
|
||||
def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量总结PDF文档。函数插件贡献者: ValeriaWong,Eralien"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import fitz
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
# 检测输入参数,如没有给定输入参数,直接退出
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 搜索需要处理的文件清单
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
|
||||
# 如果没找到任何文件
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 开始正式执行任务
|
||||
yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
160
crazy_functions/批量总结PDF文档pdfminer.py
普通文件
160
crazy_functions/批量总结PDF文档pdfminer.py
普通文件
@@ -0,0 +1,160 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
|
||||
fast_debug = False
|
||||
|
||||
def readPdf(pdfPath):
|
||||
"""
|
||||
读取pdf文件,返回文本内容
|
||||
"""
|
||||
import pdfminer
|
||||
from pdfminer.pdfparser import PDFParser
|
||||
from pdfminer.pdfdocument import PDFDocument
|
||||
from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
|
||||
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
|
||||
from pdfminer.pdfdevice import PDFDevice
|
||||
from pdfminer.layout import LAParams
|
||||
from pdfminer.converter import PDFPageAggregator
|
||||
|
||||
fp = open(pdfPath, 'rb')
|
||||
|
||||
# Create a PDF parser object associated with the file object
|
||||
parser = PDFParser(fp)
|
||||
|
||||
# Create a PDF document object that stores the document structure.
|
||||
# Password for initialization as 2nd parameter
|
||||
document = PDFDocument(parser)
|
||||
# Check if the document allows text extraction. If not, abort.
|
||||
if not document.is_extractable:
|
||||
raise PDFTextExtractionNotAllowed
|
||||
|
||||
# Create a PDF resource manager object that stores shared resources.
|
||||
rsrcmgr = PDFResourceManager()
|
||||
|
||||
# Create a PDF device object.
|
||||
# device = PDFDevice(rsrcmgr)
|
||||
|
||||
# BEGIN LAYOUT ANALYSIS.
|
||||
# Set parameters for analysis.
|
||||
laparams = LAParams(
|
||||
char_margin=10.0,
|
||||
line_margin=0.2,
|
||||
boxes_flow=0.2,
|
||||
all_texts=False,
|
||||
)
|
||||
# Create a PDF page aggregator object.
|
||||
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
|
||||
# Create a PDF interpreter object.
|
||||
interpreter = PDFPageInterpreter(rsrcmgr, device)
|
||||
|
||||
# loop over all pages in the document
|
||||
outTextList = []
|
||||
for page in PDFPage.create_pages(document):
|
||||
# read the page into a layout object
|
||||
interpreter.process_page(page)
|
||||
layout = device.get_result()
|
||||
for obj in layout._objs:
|
||||
if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal):
|
||||
# print(obj.get_text())
|
||||
outTextList.append(obj.get_text())
|
||||
|
||||
return outTextList
|
||||
|
||||
|
||||
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, glob, os
|
||||
from bs4 import BeautifulSoup
|
||||
print('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
if ".tex" in fp:
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
if ".pdf" in fp.lower():
|
||||
file_content = readPdf(fp)
|
||||
file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk')
|
||||
|
||||
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
||||
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=[],
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
||||
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=history,
|
||||
sys_prompt="总结文章。"
|
||||
) # 带超时倒计时
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import pdfminer, bs4
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
131
crazy_functions/批量翻译PDF文档_多线程.py
普通文件
131
crazy_functions/批量翻译PDF文档_多线程.py
普通文件
@@ -0,0 +1,131 @@
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from toolbox import update_ui
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
from .crazy_utils import read_and_clean_pdf_text
|
||||
from colorful import *
|
||||
|
||||
@CatchException
|
||||
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
|
||||
import glob
|
||||
import os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"批量翻译PDF文档。函数插件贡献者: Binary-Husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import fitz
|
||||
import tiktoken
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
# 检测输入参数,如没有给定输入参数,直接退出
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "":
|
||||
txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 搜索需要处理的文件清单
|
||||
file_manifest = [f for f in glob.glob(
|
||||
f'{project_folder}/**/*.pdf', recursive=True)]
|
||||
|
||||
# 如果没找到任何文件
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 开始正式执行任务
|
||||
yield from 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt)
|
||||
|
||||
|
||||
def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt):
|
||||
import os
|
||||
import tiktoken
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 1280
|
||||
generated_conclusion_files = []
|
||||
for index, fp in enumerate(file_manifest):
|
||||
|
||||
# 读取PDF文件
|
||||
file_content, page_one = read_and_clean_pdf_text(fp)
|
||||
|
||||
# 递归地切割PDF文件
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
||||
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
# 单线,获取文章meta信息
|
||||
paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=f"以下是一篇学术论文的基础信息,请从中提取出“标题”、“收录会议或期刊”、“作者”、“摘要”、“编号”、“作者邮箱”这六个部分。请用markdown格式输出,最后用中文翻译摘要部分。请提取:{paper_meta}",
|
||||
inputs_show_user=f"请从{fp}中提取出“标题”、“收录会议或期刊”等基本信息。",
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot, history=[],
|
||||
sys_prompt="Your job is to collect information from materials。",
|
||||
)
|
||||
|
||||
# 多线,翻译
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=[
|
||||
f"你需要翻译以下内容:\n{frag}" for frag in paper_fragments],
|
||||
inputs_show_user_array=[f"\n---\n 原文: \n\n {frag.replace('#', '')} \n---\n 翻译:\n " for frag in paper_fragments],
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[paper_meta] for _ in paper_fragments],
|
||||
sys_prompt_array=[
|
||||
"请你作为一个学术翻译,负责把学术论文准确翻译成中文。注意文章中的每一句话都要翻译。" for _ in paper_fragments],
|
||||
# max_workers=5 # OpenAI所允许的最大并行过载
|
||||
)
|
||||
|
||||
# 整理报告的格式
|
||||
for i,k in enumerate(gpt_response_collection):
|
||||
if i%2==0:
|
||||
gpt_response_collection[i] = f"\n\n---\n\n ## 原文[{i//2}/{len(gpt_response_collection)//2}]: \n\n {paper_fragments[i//2].replace('#', '')} \n\n---\n\n ## 翻译[{i//2}/{len(gpt_response_collection)//2}]:\n "
|
||||
else:
|
||||
gpt_response_collection[i] = gpt_response_collection[i]
|
||||
final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""]
|
||||
final.extend(gpt_response_collection)
|
||||
create_report_file_name = f"{os.path.basename(fp)}.trans.md"
|
||||
res = write_results_to_file(final, file_name=create_report_file_name)
|
||||
|
||||
# 更新UI
|
||||
generated_conclusion_files.append(f'./gpt_log/{create_report_file_name}')
|
||||
chatbot.append((f"{fp}完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 准备文件的下载
|
||||
import shutil
|
||||
for pdf_path in generated_conclusion_files:
|
||||
# 重命名文件
|
||||
rename_file = f'./gpt_log/总结论文-{os.path.basename(pdf_path)}'
|
||||
if os.path.exists(rename_file):
|
||||
os.remove(rename_file)
|
||||
shutil.copyfile(pdf_path, rename_file)
|
||||
if os.path.exists(pdf_path):
|
||||
os.remove(pdf_path)
|
||||
chatbot.append(("给出输出文件清单", str(generated_conclusion_files)))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
112
crazy_functions/理解PDF文档内容.py
普通文件
112
crazy_functions/理解PDF文档内容.py
普通文件
@@ -0,0 +1,112 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption
|
||||
from .crazy_utils import read_and_clean_pdf_text
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
fast_debug = False
|
||||
|
||||
|
||||
def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import tiktoken
|
||||
print('begin analysis on:', file_name)
|
||||
|
||||
############################## <第 0 步,切割PDF> ##################################
|
||||
# 递归地切割PDF文件,每一块(尽量是完整的一个section,比如introduction,experiment等,必要时再进行切割)
|
||||
# 的长度必须小于 2500 个 Token
|
||||
file_content, page_one = read_and_clean_pdf_text(file_name) # (尝试)按照章节切割PDF
|
||||
|
||||
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
||||
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
||||
paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=file_content, get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT)
|
||||
page_one_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
txt=str(page_one), get_token_fn=get_token_num, limit=TOKEN_LIMIT_PER_FRAGMENT//4)
|
||||
# 为了更好的效果,我们剥离Introduction之后的部分(如果有)
|
||||
paper_meta = page_one_fragments[0].split('introduction')[0].split('Introduction')[0].split('INTRODUCTION')[0]
|
||||
|
||||
############################## <第 1 步,从摘要中提取高价值信息,放到history中> ##################################
|
||||
final_results = []
|
||||
final_results.append(paper_meta)
|
||||
|
||||
############################## <第 2 步,迭代地历遍整个文章,提取精炼信息> ##################################
|
||||
i_say_show_user = f'首先你在英文语境下通读整篇论文。'; gpt_say = "[Local Message] 收到。" # 用户提示
|
||||
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI
|
||||
|
||||
iteration_results = []
|
||||
last_iteration_result = paper_meta # 初始值是摘要
|
||||
MAX_WORD_TOTAL = 4096
|
||||
n_fragment = len(paper_fragments)
|
||||
if n_fragment >= 20: print('文章极长,不能达到预期效果')
|
||||
for i in range(n_fragment):
|
||||
NUM_OF_WORD = MAX_WORD_TOTAL // n_fragment
|
||||
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i]}"
|
||||
i_say_show_user = f"[{i+1}/{n_fragment}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {paper_fragments[i][:200]}"
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
||||
llm_kwargs, chatbot,
|
||||
history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
|
||||
sys_prompt="Extract the main idea of this section." # 提示
|
||||
)
|
||||
iteration_results.append(gpt_say)
|
||||
last_iteration_result = gpt_say
|
||||
|
||||
############################## <第 3 步,整理history> ##################################
|
||||
final_results.extend(iteration_results)
|
||||
final_results.append(f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。')
|
||||
# 接下来两句话只显示在界面上,不起实际作用
|
||||
i_say_show_user = f'接下来,你是一名专业的学术教授,利用以上信息,使用中文回答我的问题。'; gpt_say = "[Local Message] 收到。"
|
||||
chatbot.append([i_say_show_user, gpt_say])
|
||||
|
||||
############################## <第 4 步,设置一个token上限,防止回答时Token溢出> ##################################
|
||||
from .crazy_utils import input_clipping
|
||||
_, final_results = input_clipping("", final_results, max_token_limit=3200)
|
||||
yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了
|
||||
|
||||
|
||||
@CatchException
|
||||
def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"理解PDF论文内容,并且将结合上下文内容,进行学术解答。函数插件贡献者: Hanzoe, binary-husky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import fitz
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
# 检测输入参数,如没有给定输入参数,直接退出
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "":
|
||||
txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 搜索需要处理的文件清单
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
|
||||
# 如果没找到任何文件
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
txt = file_manifest[0]
|
||||
# 开始正式执行任务
|
||||
yield from 解析PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
54
crazy_functions/生成函数注释.py
普通文件
54
crazy_functions/生成函数注释.py
普通文件
@@ -0,0 +1,54 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
fast_debug = False
|
||||
|
||||
def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, os
|
||||
print('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
|
||||
i_say = f'请对下面的程序文件做一个概述,并对文件中的所有函数生成注释,使用markdown表格输出结果,文件名是{os.path.relpath(fp, project_folder)},文件内容是 ```{file_content}```'
|
||||
i_say_show_user = f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述,并对文件中的所有函数生成注释: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
|
||||
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
if not fast_debug:
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
|
||||
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
102
crazy_functions/联网的ChatGPT.py
普通文件
102
crazy_functions/联网的ChatGPT.py
普通文件
@@ -0,0 +1,102 @@
|
||||
from toolbox import CatchException, update_ui
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from request_llm.bridge_all import model_info
|
||||
|
||||
def google(query, proxies):
|
||||
query = query # 在此处替换您要搜索的关键词
|
||||
url = f"https://www.google.com/search?q={query}"
|
||||
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'}
|
||||
response = requests.get(url, headers=headers, proxies=proxies)
|
||||
soup = BeautifulSoup(response.content, 'html.parser')
|
||||
results = []
|
||||
for g in soup.find_all('div', class_='g'):
|
||||
anchors = g.find_all('a')
|
||||
if anchors:
|
||||
link = anchors[0]['href']
|
||||
if link.startswith('/url?q='):
|
||||
link = link[7:]
|
||||
if not link.startswith('http'):
|
||||
continue
|
||||
title = g.find('h3').text
|
||||
item = {'title': title, 'link': link}
|
||||
results.append(item)
|
||||
|
||||
for r in results:
|
||||
print(r['link'])
|
||||
return results
|
||||
|
||||
def scrape_text(url, proxies) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape text from
|
||||
|
||||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36',
|
||||
'Content-Type': 'text/plain',
|
||||
}
|
||||
try:
|
||||
response = requests.get(url, headers=headers, proxies=proxies, timeout=8)
|
||||
if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding
|
||||
except:
|
||||
return "无法连接到该网页"
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
for script in soup(["script", "style"]):
|
||||
script.extract()
|
||||
text = soup.get_text()
|
||||
lines = (line.strip() for line in text.splitlines())
|
||||
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
||||
text = "\n".join(chunk for chunk in chunks if chunk)
|
||||
return text
|
||||
|
||||
@CatchException
|
||||
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
||||
"[Local Message] 请注意,您正在调用一个[函数插件]的模板,该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。您若希望分享新的功能模组,请不吝PR!"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
# ------------- < 第1步:爬取搜索引擎的结果 > -------------
|
||||
from toolbox import get_conf
|
||||
proxies, = get_conf('proxies')
|
||||
urls = google(txt, proxies)
|
||||
history = []
|
||||
|
||||
# ------------- < 第2步:依次访问网页 > -------------
|
||||
max_search_result = 5 # 最多收纳多少个网页的结果
|
||||
for index, url in enumerate(urls[:max_search_result]):
|
||||
res = scrape_text(url['link'], proxies)
|
||||
history.extend([f"第{index}份搜索结果:", res])
|
||||
chatbot.append([f"第{index}份搜索结果:", res[:500]+"......"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
# ------------- < 第3步:ChatGPT综合 > -------------
|
||||
i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}"
|
||||
i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token
|
||||
inputs=i_say,
|
||||
history=history,
|
||||
max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']*3//4
|
||||
)
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say, inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||
sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。"
|
||||
)
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say);history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
|
||||
145
crazy_functions/解析JupyterNotebook.py
普通文件
145
crazy_functions/解析JupyterNotebook.py
普通文件
@@ -0,0 +1,145 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
fast_debug = True
|
||||
|
||||
|
||||
class PaperFileGroup():
|
||||
def __init__(self):
|
||||
self.file_paths = []
|
||||
self.file_contents = []
|
||||
self.sp_file_contents = []
|
||||
self.sp_file_index = []
|
||||
self.sp_file_tag = []
|
||||
|
||||
# count_token
|
||||
from request_llm.bridge_all import model_info
|
||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
||||
def get_token_num(txt): return len(
|
||||
enc.encode(txt, disallowed_special=()))
|
||||
self.get_token_num = get_token_num
|
||||
|
||||
def run_file_split(self, max_token_limit=1900):
|
||||
"""
|
||||
将长文本分离开来
|
||||
"""
|
||||
for index, file_content in enumerate(self.file_contents):
|
||||
if self.get_token_num(file_content) < max_token_limit:
|
||||
self.sp_file_contents.append(file_content)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(self.file_paths[index])
|
||||
else:
|
||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
|
||||
segments = breakdown_txt_to_satisfy_token_limit_for_pdf(
|
||||
file_content, self.get_token_num, max_token_limit)
|
||||
for j, segment in enumerate(segments):
|
||||
self.sp_file_contents.append(segment)
|
||||
self.sp_file_index.append(index)
|
||||
self.sp_file_tag.append(
|
||||
self.file_paths[index] + f".part-{j}.txt")
|
||||
|
||||
|
||||
|
||||
def parseNotebook(filename, enable_markdown=1):
|
||||
import json
|
||||
|
||||
CodeBlocks = []
|
||||
with open(filename, 'r', encoding='utf-8', errors='replace') as f:
|
||||
notebook = json.load(f)
|
||||
for cell in notebook['cells']:
|
||||
if cell['cell_type'] == 'code' and cell['source']:
|
||||
# remove blank lines
|
||||
cell['source'] = [line for line in cell['source'] if line.strip()
|
||||
!= '']
|
||||
CodeBlocks.append("".join(cell['source']))
|
||||
elif enable_markdown and cell['cell_type'] == 'markdown' and cell['source']:
|
||||
cell['source'] = [line for line in cell['source'] if line.strip()
|
||||
!= '']
|
||||
CodeBlocks.append("Markdown:"+"".join(cell['source']))
|
||||
|
||||
Code = ""
|
||||
for idx, code in enumerate(CodeBlocks):
|
||||
Code += f"This is {idx+1}th code block: \n"
|
||||
Code += code+"\n"
|
||||
|
||||
return Code
|
||||
|
||||
|
||||
def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
enable_markdown = plugin_kwargs.get("advanced_arg", "1")
|
||||
try:
|
||||
enable_markdown = int(enable_markdown)
|
||||
except ValueError:
|
||||
enable_markdown = 1
|
||||
|
||||
pfg = PaperFileGroup()
|
||||
|
||||
for fp in file_manifest:
|
||||
file_content = parseNotebook(fp, enable_markdown=enable_markdown)
|
||||
pfg.file_paths.append(fp)
|
||||
pfg.file_contents.append(file_content)
|
||||
|
||||
# <-------- 拆分过长的IPynb文件 ---------->
|
||||
pfg.run_file_split(max_token_limit=1024)
|
||||
n_split = len(pfg.sp_file_contents)
|
||||
|
||||
inputs_array = [r"This is a Jupyter Notebook file, tell me about Each Block in Chinese. Focus Just On Code." +
|
||||
r"If a block starts with `Markdown` which means it's a markdown block in ipynbipynb. " +
|
||||
r"Start a new line for a block and block num use Chinese." +
|
||||
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
||||
inputs_show_user_array = [f"{f}的分析如下" for f in pfg.sp_file_tag]
|
||||
sys_prompt_array = ["You are a professional programmer."] * n_split
|
||||
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array=inputs_array,
|
||||
inputs_show_user_array=inputs_show_user_array,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history_array=[[""] for _ in range(n_split)],
|
||||
sys_prompt_array=sys_prompt_array,
|
||||
# max_workers=5, # OpenAI所允许的最大并行过载
|
||||
scroller_max_len=80
|
||||
)
|
||||
|
||||
# <-------- 整理结果,退出 ---------->
|
||||
block_result = " \n".join(gpt_response_collection)
|
||||
chatbot.append(("解析的结果如下", block_result))
|
||||
history.extend(["解析的结果如下", block_result])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# <-------- 写入文件,退出 ---------->
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
@CatchException
|
||||
def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"对IPynb文件进行解析。Contributor: codycjy."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
history = [] # 清空历史
|
||||
import glob
|
||||
import os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "":
|
||||
txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
if txt.endswith('.ipynb'):
|
||||
file_manifest = [txt]
|
||||
else:
|
||||
file_manifest = [f for f in glob.glob(
|
||||
f'{project_folder}/**/*.ipynb', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, )
|
||||
310
crazy_functions/解析项目源代码.py
普通文件
310
crazy_functions/解析项目源代码.py
普通文件
@@ -0,0 +1,310 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from .crazy_utils import input_clipping
|
||||
|
||||
def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import os, copy
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
msg = '正常'
|
||||
inputs_array = []
|
||||
inputs_show_user_array = []
|
||||
history_array = []
|
||||
sys_prompt_array = []
|
||||
report_part_1 = []
|
||||
|
||||
assert len(file_manifest) <= 512, "源文件太多(超过512个), 请缩减输入文件的数量。或者,您也可以选择删除此行警告,并修改代码拆分file_manifest列表,从而实现分批次处理。"
|
||||
############################## <第一步,逐个文件分析,多线程> ##################################
|
||||
for index, fp in enumerate(file_manifest):
|
||||
# 读取文件
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
prefix = "接下来请你逐文件分析下面的工程" if index==0 else ""
|
||||
i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}'
|
||||
# 装载请求内容
|
||||
inputs_array.append(i_say)
|
||||
inputs_show_user_array.append(i_say_show_user)
|
||||
history_array.append([])
|
||||
sys_prompt_array.append("你是一个程序架构分析师,正在分析一个源代码项目。你的回答必须简单明了。")
|
||||
|
||||
# 文件读取完成,对每一个源代码文件,生成一个请求线程,发送到chatgpt进行分析
|
||||
gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
inputs_array = inputs_array,
|
||||
inputs_show_user_array = inputs_show_user_array,
|
||||
history_array = history_array,
|
||||
sys_prompt_array = sys_prompt_array,
|
||||
llm_kwargs = llm_kwargs,
|
||||
chatbot = chatbot,
|
||||
show_user_at_complete = True
|
||||
)
|
||||
|
||||
# 全部文件解析完成,结果写入文件,准备对工程源代码进行汇总分析
|
||||
report_part_1 = copy.deepcopy(gpt_response_collection)
|
||||
history_to_return = report_part_1
|
||||
res = write_results_to_file(report_part_1)
|
||||
chatbot.append(("完成?", "逐个文件分析已完成。" + res + "\n\n正在开始汇总。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
||||
|
||||
############################## <第二步,综合,单线程,分组+迭代处理> ##################################
|
||||
batchsize = 16 # 10个文件为一组
|
||||
report_part_2 = []
|
||||
previous_iteration_files = []
|
||||
last_iteration_result = ""
|
||||
while True:
|
||||
if len(file_manifest) == 0: break
|
||||
this_iteration_file_manifest = file_manifest[:batchsize]
|
||||
this_iteration_gpt_response_collection = gpt_response_collection[:batchsize*2]
|
||||
file_rel_path = [os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)]
|
||||
# 把“请对下面的程序文件做一个概述” 替换成 精简的 "文件名:{all_file[index]}"
|
||||
for index, content in enumerate(this_iteration_gpt_response_collection):
|
||||
if index%2==0: this_iteration_gpt_response_collection[index] = f"{file_rel_path[index//2]}" # 只保留文件名节省token
|
||||
previous_iteration_files.extend([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
|
||||
previous_iteration_files_string = ', '.join(previous_iteration_files)
|
||||
current_iteration_focus = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)])
|
||||
i_say = f'用一张Markdown表格简要描述以下文件的功能:{previous_iteration_files_string}。根据以上分析,用一句话概括程序的整体功能。'
|
||||
inputs_show_user = f'根据以上分析,对程序的整体功能和构架重新做出概括,由于输入长度限制,可能需要分组处理,本组文件为 {current_iteration_focus} + 已经汇总的文件组。'
|
||||
this_iteration_history = copy.deepcopy(this_iteration_gpt_response_collection)
|
||||
this_iteration_history.append(last_iteration_result)
|
||||
# 裁剪input
|
||||
inputs, this_iteration_history_feed = input_clipping(inputs=i_say, history=this_iteration_history, max_token_limit=2560)
|
||||
result = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=inputs, inputs_show_user=inputs_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot,
|
||||
history=this_iteration_history_feed, # 迭代之前的分析
|
||||
sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。")
|
||||
report_part_2.extend([i_say, result])
|
||||
last_iteration_result = result
|
||||
|
||||
file_manifest = file_manifest[batchsize:]
|
||||
gpt_response_collection = gpt_response_collection[batchsize*2:]
|
||||
|
||||
############################## <END> ##################################
|
||||
history_to_return.extend(report_part_2)
|
||||
res = write_results_to_file(history_to_return)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob
|
||||
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
||||
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]+ \
|
||||
[f for f in glob.glob('./request_llm/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
||||
project_folder = './'
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
@CatchException
|
||||
def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
@CatchException
|
||||
def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个Rect项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.js', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何Rect文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/go.mod', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \
|
||||
[f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
txt_pattern = plugin_kwargs.get("advanced_arg")
|
||||
txt_pattern = txt_pattern.replace(",", ",")
|
||||
# 将要匹配的模式(例如: *.c, *.cpp, *.py, config.toml)
|
||||
pattern_include = [_.lstrip(" ,").rstrip(" ,") for _ in txt_pattern.split(",") if _ != "" and not _.strip().startswith("^")]
|
||||
if not pattern_include: pattern_include = ["*"] # 不输入即全部匹配
|
||||
# 将要忽略匹配的文件后缀(例如: ^*.c, ^*.cpp, ^*.py)
|
||||
pattern_except_suffix = [_.lstrip(" ^*.,").rstrip(" ,") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^*.")]
|
||||
pattern_except_suffix += ['zip', 'rar', '7z', 'tar', 'gz'] # 避免解析压缩文件
|
||||
# 将要忽略匹配的文件名(例如: ^README.md)
|
||||
pattern_except_name = [_.lstrip(" ^*,").rstrip(" ,").replace(".", "\.") for _ in txt_pattern.split(" ") if _ != "" and _.strip().startswith("^") and not _.strip().startswith("^*.")]
|
||||
# 生成正则表达式
|
||||
pattern_except = '/[^/]+\.(' + "|".join(pattern_except_suffix) + ')$'
|
||||
pattern_except += '|/(' + "|".join(pattern_except_name) + ')$' if pattern_except_name != [] else ''
|
||||
|
||||
history.clear()
|
||||
import glob, os, re
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
# 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件
|
||||
maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)]
|
||||
if len(maybe_dir)>0 and maybe_dir[0].endswith('.extract'):
|
||||
extract_folder_path = maybe_dir[0]
|
||||
else:
|
||||
extract_folder_path = project_folder
|
||||
# 按输入的匹配模式寻找上传的非压缩文件和已解压的文件
|
||||
file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \
|
||||
os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
59
crazy_functions/询问多个大语言模型.py
普通文件
59
crazy_functions/询问多个大语言模型.py
普通文件
@@ -0,0 +1,59 @@
|
||||
from toolbox import CatchException, update_ui
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
import datetime
|
||||
@CatchException
|
||||
def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,如温度和top_p等,一般原样传递下去就行
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
# llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
||||
llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=txt, inputs_show_user=txt,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||
sys_prompt=system_prompt,
|
||||
retry_times_at_unknown_error=0
|
||||
)
|
||||
|
||||
history.append(txt)
|
||||
history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
|
||||
|
||||
@CatchException
|
||||
def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,如温度和top_p等,一般原样传递下去就行
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
# llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
||||
llm_kwargs['llm_model'] = plugin_kwargs.get("advanced_arg", 'chatglm&gpt-3.5-turbo') # 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=txt, inputs_show_user=txt,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||
sys_prompt=system_prompt,
|
||||
retry_times_at_unknown_error=0
|
||||
)
|
||||
|
||||
history.append(txt)
|
||||
history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
67
crazy_functions/读文章写摘要.py
普通文件
67
crazy_functions/读文章写摘要.py
普通文件
@@ -0,0 +1,67 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
fast_debug = False
|
||||
|
||||
|
||||
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
import time, glob, os
|
||||
print('begin analysis on:', file_manifest)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
||||
file_content = f.read()
|
||||
|
||||
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
||||
i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
|
||||
i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
|
||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
|
||||
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.append(i_say_show_user); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
if not fast_debug: time.sleep(2)
|
||||
|
||||
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
||||
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
if not fast_debug:
|
||||
msg = '正常'
|
||||
# ** gpt request **
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时
|
||||
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say); history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
|
||||
|
||||
|
||||
@CatchException
|
||||
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
import glob, os
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
|
||||
# [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
||||
108
crazy_functions/谷歌检索小助手.py
普通文件
108
crazy_functions/谷歌检索小助手.py
普通文件
@@ -0,0 +1,108 @@
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
from toolbox import CatchException, report_execption, write_results_to_file
|
||||
from toolbox import update_ui
|
||||
|
||||
def get_meta_information(url, chatbot, history):
|
||||
import requests
|
||||
import arxiv
|
||||
import difflib
|
||||
from bs4 import BeautifulSoup
|
||||
from toolbox import get_conf
|
||||
proxies, = get_conf('proxies')
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36',
|
||||
}
|
||||
# 发送 GET 请求
|
||||
response = requests.get(url, proxies=proxies, headers=headers)
|
||||
|
||||
# 解析网页内容
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
|
||||
def string_similar(s1, s2):
|
||||
return difflib.SequenceMatcher(None, s1, s2).quick_ratio()
|
||||
|
||||
profile = []
|
||||
# 获取所有文章的标题和作者
|
||||
for result in soup.select(".gs_ri"):
|
||||
title = result.a.text.replace('\n', ' ').replace(' ', ' ')
|
||||
author = result.select_one(".gs_a").text
|
||||
try:
|
||||
citation = result.select_one(".gs_fl > a[href*='cites']").text # 引用次数是链接中的文本,直接取出来
|
||||
except:
|
||||
citation = 'cited by 0'
|
||||
abstract = result.select_one(".gs_rs").text.strip() # 摘要在 .gs_rs 中的文本,需要清除首尾空格
|
||||
search = arxiv.Search(
|
||||
query = title,
|
||||
max_results = 1,
|
||||
sort_by = arxiv.SortCriterion.Relevance,
|
||||
)
|
||||
paper = next(search.results())
|
||||
if string_similar(title, paper.title) > 0.90: # same paper
|
||||
abstract = paper.summary.replace('\n', ' ')
|
||||
is_paper_in_arxiv = True
|
||||
else: # different paper
|
||||
abstract = abstract
|
||||
is_paper_in_arxiv = False
|
||||
paper = next(search.results())
|
||||
print(title)
|
||||
print(author)
|
||||
print(citation)
|
||||
profile.append({
|
||||
'title':title,
|
||||
'author':author,
|
||||
'citation':citation,
|
||||
'abstract':abstract,
|
||||
'is_paper_in_arxiv':is_paper_in_arxiv,
|
||||
})
|
||||
|
||||
chatbot[-1] = [chatbot[-1][0], title + f'\n\n是否在arxiv中(不在arxiv中无法获取完整摘要):{is_paper_in_arxiv}\n\n' + abstract]
|
||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||
return profile
|
||||
|
||||
@CatchException
|
||||
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"分析用户提供的谷歌学术(google scholar)搜索页面中,出现的所有文章: binary-husky,插件初始化中..."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
try:
|
||||
import arxiv
|
||||
import math
|
||||
from bs4 import BeautifulSoup
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a = f"解析项目: {txt}",
|
||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
history = []
|
||||
meta_paper_info_list = yield from get_meta_information(txt, chatbot, history)
|
||||
batchsize = 5
|
||||
for batch in range(math.ceil(len(meta_paper_info_list)/batchsize)):
|
||||
if len(meta_paper_info_list[:batchsize]) > 0:
|
||||
i_say = "下面是一些学术文献的数据,提取出以下内容:" + \
|
||||
"1、英文题目;2、中文题目翻译;3、作者;4、arxiv公开(is_paper_in_arxiv);4、引用数量(cite);5、中文摘要翻译。" + \
|
||||
f"以下是信息源:{str(meta_paper_info_list[:batchsize])}"
|
||||
|
||||
inputs_show_user = f"请分析此页面中出现的所有文章:{txt},这是第{batch+1}批"
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say, inputs_show_user=inputs_show_user,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||
sys_prompt="你是一个学术翻译,请从数据中提取信息。你必须使用Markdown表格。你必须逐个文献进行处理。"
|
||||
)
|
||||
|
||||
history.extend([ f"第{batch+1}批", gpt_say ])
|
||||
meta_paper_info_list = meta_paper_info_list[batchsize:]
|
||||
|
||||
chatbot.append(["状态?",
|
||||
"已经全部完成,您可以试试让AI写一个Related Works,例如您可以继续输入Write a \"Related Works\" section about \"你搜索的研究领域\" for me."])
|
||||
msg = '正常'
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("完成了吗?", res));
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||
29
crazy_functions/高级功能函数模板.py
普通文件
29
crazy_functions/高级功能函数模板.py
普通文件
@@ -0,0 +1,29 @@
|
||||
from toolbox import CatchException, update_ui
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
import datetime
|
||||
@CatchException
|
||||
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||
"""
|
||||
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||
chatbot 聊天显示框的句柄,用于显示给用户
|
||||
history 聊天历史,前情提要
|
||||
system_prompt 给gpt的静默提醒
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
for i in range(5):
|
||||
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
|
||||
currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day
|
||||
i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。'
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say, inputs_show_user=i_say,
|
||||
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
||||
sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。"
|
||||
)
|
||||
chatbot[-1] = (i_say, gpt_say)
|
||||
history.append(i_say);history.append(gpt_say)
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||
59
docs/Dockerfile+ChatGLM
普通文件
59
docs/Dockerfile+ChatGLM
普通文件
@@ -0,0 +1,59 @@
|
||||
# How to build | 如何构建: docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
|
||||
# How to run | 如何运行 (1) 直接运行(选择0号GPU): docker run --rm -it --net=host --gpus="0" gpt-academic
|
||||
# How to run | 如何运行 (2) 我想运行之前进容器做一些调整: docker run --rm -it --net=host --gpus="0" gpt-academic bash
|
||||
|
||||
# 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
|
||||
FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
|
||||
ARG useProxyNetwork=''
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y curl proxychains curl
|
||||
RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
|
||||
|
||||
# 配置代理网络(构建Docker镜像时使用)
|
||||
# # comment out below if you do not need proxy network | 如果不需要翻墙 - 从此行向下删除
|
||||
RUN $useProxyNetwork curl cip.cc
|
||||
RUN sed -i '$ d' /etc/proxychains.conf
|
||||
RUN sed -i '$ d' /etc/proxychains.conf
|
||||
RUN echo "socks5 127.0.0.1 10880" >> /etc/proxychains.conf
|
||||
ARG useProxyNetwork=proxychains
|
||||
# # comment out above if you do not need proxy network | 如果不需要翻墙 - 从此行向上删除
|
||||
|
||||
|
||||
# use python3 as the system default python
|
||||
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
||||
|
||||
# 下载分支
|
||||
WORKDIR /gpt
|
||||
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
WORKDIR /gpt/chatgpt_academic
|
||||
RUN $useProxyNetwork python3 -m pip install -r requirements.txt
|
||||
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
|
||||
RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
|
||||
|
||||
# 预热CHATGLM参数(非必要 可选步骤)
|
||||
RUN echo ' \n\
|
||||
from transformers import AutoModel, AutoTokenizer \n\
|
||||
chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) \n\
|
||||
chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() ' >> warm_up_chatglm.py
|
||||
RUN python3 -u warm_up_chatglm.py
|
||||
|
||||
# 禁用缓存,确保更新代码
|
||||
ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache
|
||||
RUN $useProxyNetwork git pull
|
||||
|
||||
# 预热Tiktoken模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
# 为chatgpt-academic配置代理和API-KEY (非必要 可选步骤)
|
||||
# 可同时填写多个API-KEY,支持openai的key和api2d的key共存,用英文逗号分割,例如API_KEY = "sk-openaikey1,fkxxxx-api2dkey2,........"
|
||||
# LLM_MODEL 是选择初始的模型
|
||||
# LOCAL_MODEL_DEVICE 是选择chatglm等本地模型运行的设备,可选 cpu 和 cuda
|
||||
RUN echo ' \n\
|
||||
API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\
|
||||
USE_PROXY = True \n\
|
||||
LLM_MODEL = "chatglm" \n\
|
||||
LOCAL_MODEL_DEVICE = "cuda" \n\
|
||||
proxies = { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' >> config_private.py
|
||||
|
||||
# 启动
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
291
docs/README_EN.md
普通文件
291
docs/README_EN.md
普通文件
@@ -0,0 +1,291 @@
|
||||
> **Note**
|
||||
>
|
||||
> This English README is automatically generated by the markdown translation plugin in this project, and may not be 100% correct.
|
||||
>
|
||||
|
||||
# <img src="logo.png" width="40" > ChatGPT Academic Optimization
|
||||
|
||||
**If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a [README in English](docs/README_EN.md) translated by this project itself.**
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> 1. Please note that only **functions with red color** supports reading files, some functions are located in the **dropdown menu** of plugins. Additionally, we welcome and prioritize any new plugin PRs with **highest priority**!
|
||||
>
|
||||
> 2. The functionality of each file in this project is detailed in the self-translation report [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) of the project. With the iteration of the version, you can also click on the relevant function plugins at any time to call GPT to regenerate the self-analysis report of the project. The FAQ summary is in the [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98) section.
|
||||
>
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
Function | Description
|
||||
--- | ---
|
||||
One-Click Polish | Supports one-click polishing and finding grammar errors in academic papers.
|
||||
One-Key Translation Between Chinese and English | One-click translation between Chinese and English.
|
||||
One-Key Code Interpretation | Can correctly display and interpret code.
|
||||
[Custom Shortcut Keys](https://www.bilibili.com/video/BV14s4y1E7jN) | Supports custom shortcut keys.
|
||||
[Configure Proxy Server](https://www.bilibili.com/video/BV1rc411W7Dr) | Supports configuring proxy servers.
|
||||
Modular Design | Supports custom high-order function plugins and [function plugins], and plugins support [hot updates](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||
[Self-programming Analysis](https://www.bilibili.com/video/BV1cj411A7VW) | [Function Plugin] [One-Key Read] (https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) The source code of this project is analyzed.
|
||||
[Program Analysis](https://www.bilibili.com/video/BV1cj411A7VW) | [Function Plugin] One-click can analyze the project tree of other Python/C/C++/Java/Lua/... projects
|
||||
Read the Paper | [Function Plugin] One-click interpretation of the full text of latex paper and generation of abstracts
|
||||
Latex Full Text Translation, Proofreading | [Function Plugin] One-click translation or proofreading of latex papers.
|
||||
Batch Comment Generation | [Function Plugin] One-click batch generation of function comments
|
||||
Chat Analysis Report Generation | [Function Plugin] After running, an automatic summary report will be generated
|
||||
[Arxiv Assistant](https://www.bilibili.com/video/BV1LM4y1279X) | [Function Plugin] Enter the arxiv article url to translate the abstract and download the PDF with one click
|
||||
[Full-text Translation Function of PDF Paper](https://www.bilibili.com/video/BV1KT411x7Wn) | [Function Plugin] Extract the title & abstract of the PDF paper + translate the full text (multithreading)
|
||||
[Google Scholar Integration Assistant](https://www.bilibili.com/video/BV19L411U7ia) | [Function Plugin] Given any Google Scholar search page URL, let gpt help you choose interesting articles.
|
||||
Formula / Picture / Table Display | Can display both the tex form and the rendering form of formulas at the same time, support formula and code highlighting
|
||||
Multithreaded Function Plugin Support | Supports multi-threaded calling chatgpt, one-click processing of massive text or programs
|
||||
Start Dark Gradio [Theme](https://github.com/binary-husky/chatgpt_academic/issues/173) | Add ```/?__dark-theme=true``` at the end of the browser url to switch to dark theme
|
||||
[Multiple LLM Models](https://www.bilibili.com/video/BV1wT411p7yf) support, [API2D](https://api2d.com/) interface support | It must feel nice to be served by both GPT3.5, GPT4, and [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B)!
|
||||
Huggingface non-Science Net [Online Experience](https://huggingface.co/spaces/qingxu98/gpt-academic) | After logging in to huggingface, copy [this space](https://huggingface.co/spaces/qingxu98/gpt-academic)
|
||||
... | ...
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
- New interface (switch between "left-right layout" and "up-down layout" by modifying the LAYOUT option in config.py)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
- All buttons are dynamically generated by reading functional.py and can add custom functionality at will, freeing up clipboard
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Proofreading / correcting
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- If the output contains formulas, it will be displayed in both the tex form and the rendering form at the same time, which is convenient for copying and reading
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Don't want to read the project code? Just take the whole project to chatgpt
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Multiple major language model mixing calls (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
Multiple major language model mixing call [huggingface beta version](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (the huggingface version does not support chatglm)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Installation-Method 1: Run directly (Windows, Linux or MacOS)
|
||||
|
||||
1. Download project
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
```
|
||||
|
||||
2. Configure API_KEY and proxy settings
|
||||
|
||||
|
||||
In `config.py`, configure the overseas Proxy and OpenAI API KEY as follows:
|
||||
```
|
||||
1. If you are in China, you need to set up an overseas proxy to use the OpenAI API smoothly. Please read config.py carefully for setup details (1. Modify USE_PROXY to True; 2. Modify proxies according to the instructions).
|
||||
2. Configure the OpenAI API KEY. You need to register and obtain an API KEY on the OpenAI website. Once you get the API KEY, you can configure it in the config.py file.
|
||||
3. Issues related to proxy networks (network timeouts, proxy failures) are summarized at https://github.com/binary-husky/chatgpt_academic/issues/1
|
||||
```
|
||||
(P.S. When the program runs, it will first check whether there is a private configuration file named `config_private.py` and use the same-name configuration in `config.py` to overwrite it. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and transfer (copy) the configuration in `config.py` to` config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure.))
|
||||
|
||||
|
||||
3. Install dependencies
|
||||
```sh
|
||||
# (Option One) Recommended
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Option Two) If you use anaconda, the steps are similar:
|
||||
# (Option Two.1) conda create -n gptac_venv python=3.11
|
||||
# (Option Two.2) conda activate gptac_venv
|
||||
# (Option Two.3) python -m pip install -r requirements.txt
|
||||
|
||||
# Note: Use official pip source or Ali pip source. Other pip sources (such as some university pips) may have problems, and temporary replacement methods are as follows:
|
||||
# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
```
|
||||
|
||||
If you need to support Tsinghua ChatGLM, you need to install more dependencies (if you are not familiar with python or your computer configuration is not good, we recommend not to try):
|
||||
```sh
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
```
|
||||
|
||||
4. Run
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
|
||||
5. Test function plugins
|
||||
```
|
||||
- Test Python project analysis
|
||||
In the input area, enter `./crazy_functions/test_project/python/dqn`, and then click "Analyze the entire Python project"
|
||||
- Test self-code interpretation
|
||||
Click "[Multithreading Demo] Interpretation of This Project Itself (Source Code Interpretation)"
|
||||
- Test experimental function template function (requires gpt to answer what happened today in history). You can use this function as a template to implement more complex functions.
|
||||
Click "[Function Plugin Template Demo] Today in History"
|
||||
- There are more functions to choose from in the function plugin area drop-down menu.
|
||||
```
|
||||
|
||||
## Installation-Method 2: Use Docker (Linux)
|
||||
|
||||
1. ChatGPT only (recommended for most people)
|
||||
``` sh
|
||||
# download project
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
# configure overseas Proxy and OpenAI API KEY
|
||||
Edit config.py with any text editor
|
||||
# Install
|
||||
docker build -t gpt-academic .
|
||||
# Run
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
|
||||
# Test function plug-in
|
||||
## Test function plugin template function (requires gpt to answer what happened today in history). You can use this function as a template to implement more complex functions.
|
||||
Click "[Function Plugin Template Demo] Today in History"
|
||||
## Test Abstract Writing for Latex Projects
|
||||
Enter ./crazy_functions/test_project/latex/attention in the input area, and then click "Read Tex Paper and Write Abstract"
|
||||
## Test Python Project Analysis
|
||||
Enter ./crazy_functions/test_project/python/dqn in the input area and click "Analyze the entire Python project."
|
||||
|
||||
More functions are available in the function plugin area drop-down menu.
|
||||
```
|
||||
|
||||
2. ChatGPT+ChatGLM (requires strong familiarity with docker + strong computer configuration)
|
||||
|
||||
``` sh
|
||||
# Modify dockerfile
|
||||
cd docs && nano Dockerfile+ChatGLM
|
||||
# How to build | 如何构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs)
|
||||
docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
|
||||
# How to run | 如何运行 (1) 直接运行:
|
||||
docker run --rm -it --net=host --gpus=all gpt-academic
|
||||
# How to run | 如何运行 (2) 我想运行之前进容器做一些调整:
|
||||
docker run --rm -it --net=host --gpus=all gpt-academic bash
|
||||
```
|
||||
|
||||
|
||||
## Installation-Method 3: Other Deployment Methods
|
||||
|
||||
1. Remote Cloud Server Deployment
|
||||
Please visit [Deployment Wiki-1] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
2. Use WSL2 (Windows Subsystem for Linux)
|
||||
Please visit [Deployment Wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
|
||||
## Installation-Proxy Configuration
|
||||
### Method 1: Conventional method
|
||||
[Configure Proxy](https://github.com/binary-husky/chatgpt_academic/issues/1)
|
||||
|
||||
### Method Two: Step-by-step tutorial for newcomers
|
||||
[Step-by-step tutorial for newcomers](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89)
|
||||
|
||||
---
|
||||
|
||||
## Customizing Convenient Buttons (Customizing Academic Shortcuts)
|
||||
Open `core_functional.py` with any text editor and add an item as follows, then restart the program (if the button has been successfully added and visible, both the prefix and suffix support hot modification without the need to restart the program to take effect). For example:
|
||||
```
|
||||
"Super English to Chinese translation": {
|
||||
# Prefix, which will be added before your input. For example, to describe your requirements, such as translation, code interpretation, polishing, etc.
|
||||
"Prefix": "Please translate the following content into Chinese and use a markdown table to interpret the proprietary terms in the text one by one:\n\n",
|
||||
|
||||
# Suffix, which will be added after your input. For example, combined with the prefix, you can put your input content in quotes.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Some Function Displays
|
||||
|
||||
### Image Display:
|
||||
|
||||
|
||||
You are a professional academic paper translator.
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/228737599-bf0a9d9c-1808-4f43-ae15-dfcc7af0f295.png" width="800" >
|
||||
</div>
|
||||
|
||||
### If a program can understand and analyze itself:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="800" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936618-9b487e4b-ab5b-4b6e-84c6-16942102e917.png" width="800" >
|
||||
</div>
|
||||
|
||||
### Analysis of any Python/Cpp project:
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="800" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="800" >
|
||||
</div>
|
||||
|
||||
### One-click reading comprehension and summary generation of Latex papers
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504406-86ab97cd-f208-41c3-8e4a-7000e51cf980.png" width="800" >
|
||||
</div>
|
||||
|
||||
### Automatic report generation
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||
</div>
|
||||
|
||||
### Modular functional design
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||
</div>
|
||||
|
||||
### Source code translation to English
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229720562-fe6c3508-6142-4635-a83d-21eb3669baee.png" height="400" >
|
||||
</div>
|
||||
|
||||
## Todo and version planning:
|
||||
- version 3.2+ (todo): Function plugin supports more parameter interfaces
|
||||
- version 3.1: Support for inquiring multiple GPT models at the same time! Support for api2d, support for multiple apikeys load balancing
|
||||
- version 3.0: Support for chatglm and other small llms
|
||||
- version 2.6: Refactored the plugin structure, improved interactivity, added more plugins
|
||||
- version 2.5: Self-updating, solves the problem of text being too long and token overflowing when summarizing large project source code
|
||||
- version 2.4: (1) Added PDF full text translation function; (2) Added function to switch input area position; (3) Added vertical layout option; (4) Multi-threaded function plugin optimization.
|
||||
- version 2.3: Enhanced multi-threaded interactivity
|
||||
- version 2.2: Function plugin supports hot reloading
|
||||
- version 2.1: Foldable layout
|
||||
- version 2.0: Introduction of modular function plugins
|
||||
- version 1.0: Basic functions
|
||||
|
||||
## Reference and learning
|
||||
|
||||
```
|
||||
The code design of this project has referenced many other excellent projects, including:
|
||||
|
||||
# Reference project 1: Borrowed many tips from ChuanhuChatGPT
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# Reference project 2: Tsinghua ChatGLM-6B:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
```
|
||||
|
||||
296
docs/README_FR.md
普通文件
296
docs/README_FR.md
普通文件
@@ -0,0 +1,296 @@
|
||||
> **Note**
|
||||
>
|
||||
> Ce fichier README est généré automatiquement par le plugin de traduction markdown de ce projet et n'est peut - être pas correct à 100%.
|
||||
>
|
||||
|
||||
# <img src="logo.png" width="40" > ChatGPT Optimisation Académique
|
||||
|
||||
**Si vous aimez ce projet, donnez-lui une étoile; si vous avez inventé des raccourcis académiques plus utiles ou des plugins fonctionnels, n'hésitez pas à ouvrir une demande ou une demande de traction. Nous avons également un fichier README en [anglais|](docs/README_EN.md)[japonais|](docs/README_JP.md)[russe|](docs/README_RS.md)[français](docs/README_FR.md) traduit par ce projet lui-même.**
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> 1. Veuillez noter que seuls les plugins de fonction signalés en **rouge** sont capables de lire les fichiers, certains plugins se trouvent dans le **menu déroulant** de la section plugin. Nous sommes également les bienvenus avec la plus haute priorité pour traiter et accepter tout nouveau PR de plugin!
|
||||
>
|
||||
> 2. Chaque fichier dans ce projet est expliqué en détail dans l'auto-analyse [self_analysis.md](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Avec l'itération des versions, vous pouvez également cliquer sur les plugins fonctionnels pertinents pour appeler GPT et générer un rapport d'auto-analyse projet mis à jour. Les questions fréquemment posées sont résumées dans le [wiki](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98).
|
||||
>
|
||||
|
||||
<div align="center">
|
||||
|
||||
Fonctionnalité | Description
|
||||
--- | ---
|
||||
Polissage en un clic | Prend en charge la correction en un clic et la recherche d'erreurs de syntaxe dans les documents de recherche.
|
||||
Traduction Chinois-Anglais en un clic | Une touche pour traduire la partie chinoise en anglais ou celle anglaise en chinois.
|
||||
Explication de code en un clic | Affiche et explique correctement le code.
|
||||
[Raccourcis clavier personnalisables](https://www.bilibili.com/video/BV14s4y1E7jN) | Prend en charge les raccourcis clavier personnalisables.
|
||||
[Configuration du serveur proxy](https://www.bilibili.com/video/BV1rc411W7Dr) | Prend en charge la configuration du serveur proxy.
|
||||
Conception modulaire | Prend en charge la personnalisation des plugins de fonctions et des [plugins] de fonctions hiérarchiques personnalisés, et les plugins prennent en charge [la mise à jour à chaud](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
||||
[Auto-analyse du programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugins] [Lire en un clic](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) le code source de ce projet.
|
||||
[Analyse de programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugins] En un clic, les projets Python/C/C++/Java/Lua/... peuvent être analysés.
|
||||
Lire le document de recherche | [Plugins] Lisez le résumé de l'article en latex et générer un résumé.
|
||||
Traduction et polissage de l'article complet en LaTeX | [Plugins] Une touche pour traduire ou corriger en LaTeX
|
||||
Génération Commentaire de fonction en vrac | [Plugins] Lisez en un clic les fonctions et générez des commentaires de fonction.
|
||||
Rapport d'analyse automatique des chats générés | [Plugins] Génère un rapport de synthèse après l'exécution.
|
||||
[Assistant arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugins] Entrez l'url de l'article arxiv pour traduire le résumé + télécharger le PDF en un clic
|
||||
[Traduction complète des articles PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugins] Extraire le titre et le résumé de l'article PDF + Traduire le texte entier (multithread)
|
||||
[Aide à la recherche Google Academ](https://www.bilibili.com/video/BV19L411U7ia) | [Plugins] Donnez à GPT l'URL de n'importe quelle page de recherche Google Academ pour vous aider à sélectionner des articles intéressants
|
||||
Affichage de formules/images/tableaux | Afficher la forme traduite et rendue d'une formule en même temps, plusieurs formules et surlignage du code prend en charge
|
||||
Prise en charge des plugins multithread | Prise en charge de l'appel multithread de chatgpt, traitement en masse de texte ou de programmes en un clic
|
||||
Activer le thème Gradio sombre [theme](https://github.com/binary-husky/chatgpt_academic/issues/173) au démarrage | Ajoutez ```/?__dark-theme=true``` à l'URL du navigateur pour basculer vers le thème sombre
|
||||
[Prise en charge de plusieurs modèles LLM](https://www.bilibili.com/video/BV1wT411p7yf), [prise en charge de l'interface API2D](https://api2d.com/) | Comment cela serait-il de se faire servir par GPT3.5, GPT4 et la [ChatGLM de Tsinghua](https://github.com/THUDM/ChatGLM-6B) en même temps?
|
||||
Expérience en ligne d'huggingface sans science | Après vous être connecté à huggingface, copiez [cet espace](https://huggingface.co/spaces/qingxu98/gpt-academic)
|
||||
... | ...
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
Vous êtes un traducteur professionnel d'articles universitaires en français.
|
||||
|
||||
Ceci est un fichier Markdown, veuillez le traduire en français sans modifier les commandes Markdown existantes :
|
||||
|
||||
- Nouvelle interface (modifiable en modifiant l'option de mise en page dans config.py pour basculer entre les mises en page gauche-droite et haut-bas)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
- Tous les boutons sont générés dynamiquement en lisant functional.py, les utilisateurs peuvent ajouter librement des fonctions personnalisées pour libérer le presse-papiers.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Correction/amélioration
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Si la sortie contient des formules, elles seront affichées simultanément sous forme de de texte brut et de forme rendue pour faciliter la copie et la lecture.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Pas envie de lire le code du projet ? Faites votre propre démo avec ChatGPT.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Utilisation combinée de plusieurs modèles de langage sophistiqués (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
Utilisation combinée de plusieurs modèles de langage sophistiqués en version de test [huggingface](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (la version huggingface ne prend pas en charge Chatglm).
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Installation - Méthode 1 : Exécution directe (Windows, Linux or MacOS)
|
||||
|
||||
1. Téléchargez le projet
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
```
|
||||
|
||||
2. Configuration de l'API_KEY et des paramètres de proxy
|
||||
|
||||
Dans `config.py`, configurez les paramètres de proxy et de clé d'API OpenAI, comme indiqué ci-dessous
|
||||
```
|
||||
1. Si vous êtes en Chine, vous devez configurer un proxy étranger pour utiliser l'API OpenAI en toute transparence. Pour ce faire, veuillez lire attentivement le fichier config.py (1. Modifiez l'option USE_PROXY ; 2. Modifiez les paramètres de proxies comme indiqué dans les instructions).
|
||||
2. Configurez votre clé API OpenAI. Vous devez vous inscrire sur le site web d'OpenAI pour obtenir une clé API. Une fois que vous avez votre clé API, vous pouvez la configurer dans le fichier config.py.
|
||||
3. Tous les problèmes liés aux réseaux de proxy (temps d'attente, non-fonctionnement des proxies) sont résumés dans https://github.com/binary-husky/chatgpt_academic/issues/1.
|
||||
```
|
||||
(Remarque : le programme vérifie d'abord s'il existe un fichier de configuration privé nommé `config_private.py`, et utilise les configurations de celui-ci à la place de celles du fichier `config.py`. Par conséquent, si vous comprenez notre logique de lecture de configuration, nous vous recommandons fortement de créer un nouveau fichier de configuration nommé `config_private.py` à côté de `config.py` et de transférer (copier) les configurations de celui-ci dans `config_private.py`. `config_private.py` n'est pas contrôlé par git et rend vos informations personnelles plus sûres.)
|
||||
|
||||
3. Installation des dépendances
|
||||
```sh
|
||||
# (Option 1) Recommandé
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Option 2) Si vous utilisez anaconda, les étapes sont similaires :
|
||||
# (Option 2.1) conda create -n gptac_venv python=3.11
|
||||
# (Option 2.2) conda activate gptac_venv
|
||||
# (Option 2.3) python -m pip install -r requirements.txt
|
||||
|
||||
# note : Utilisez la source pip officielle ou la source pip Alibaba. D'autres sources (comme celles des universités) pourraient poser problème. Pour utiliser temporairement une autre source, utilisez :
|
||||
# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
```
|
||||
|
||||
Si vous avez besoin de soutenir ChatGLM de Tsinghua, vous devez installer plus de dépendances (si vous n'êtes pas familier avec Python ou que votre ordinateur n'est pas assez performant, nous vous recommandons de ne pas essayer) :
|
||||
```sh
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
```
|
||||
|
||||
4. Exécution
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
|
||||
5. Tester les plugins de fonctions
|
||||
```
|
||||
- Test Python Project Analysis
|
||||
Dans la zone de saisie, entrez `./crazy_functions/test_project/python/dqn`, puis cliquez sur "Parse Entire Python Project"
|
||||
- Test d'auto-lecture du code
|
||||
Cliquez sur "[Démo multi-thread] Parser ce projet lui-même (auto-traduction de la source)"
|
||||
- Test du modèle de fonctionnalité expérimentale (exige une réponse de l'IA à ce qui est arrivé aujourd'hui dans l'histoire). Vous pouvez utiliser cette fonctionnalité comme modèle pour des fonctions plus complexes.
|
||||
Cliquez sur "[Démo modèle de plugin de fonction] Histoire du Jour"
|
||||
- Le menu déroulant de la zone de plugin de fonctionnalité contient plus de fonctionnalités à sélectionner.
|
||||
```
|
||||
|
||||
## Installation - Méthode 2 : Utilisation de docker (Linux)
|
||||
|
||||
|
||||
Vous êtes un traducteur professionnel d'articles académiques en français.
|
||||
|
||||
1. ChatGPT seul (recommandé pour la plupart des gens)
|
||||
``` sh
|
||||
# Télécharger le projet
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
# Configurer le proxy outre-mer et la clé API OpenAI
|
||||
Modifier le fichier config.py avec n'importe quel éditeur de texte
|
||||
# Installer
|
||||
docker build -t gpt-academic .
|
||||
# Exécuter
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
|
||||
# Tester les modules de fonction
|
||||
## Tester la fonction modèle des modules (requiert la réponse de GPT à "qu'est-ce qui s'est passé dans l'histoire aujourd'hui ?"), vous pouvez utiliser cette fonction en tant que modèle pour implémenter des fonctions plus complexes.
|
||||
Cliquez sur "[Exemple de modèle de module] Histoire d'aujourd'hui"
|
||||
## Tester le résumé écrit pour le projet LaTeX
|
||||
Dans la zone de saisie, tapez ./crazy_functions/test_project/latex/attention, puis cliquez sur "Lire le résumé de l'article de recherche LaTeX"
|
||||
## Tester l'analyse du projet Python
|
||||
Dans la zone de saisie, tapez ./crazy_functions/test_project/python/dqn, puis cliquez sur "Analyser l'ensemble du projet Python"
|
||||
|
||||
D'autres fonctions sont disponibles dans la liste déroulante des modules de fonction.
|
||||
```
|
||||
|
||||
2. ChatGPT+ChatGLM (nécessite une grande connaissance de docker et une configuration informatique suffisamment puissante)
|
||||
``` sh
|
||||
# Modifier le dockerfile
|
||||
cd docs && nano Dockerfile+ChatGLM
|
||||
# Comment construire | 如何构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs)
|
||||
docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
|
||||
# Comment exécuter | 如何运行 (1) Directement exécuter :
|
||||
docker run --rm -it --net=host --gpus=all gpt-academic
|
||||
# Comment exécuter | 如何运行 (2) Je veux effectuer quelques ajustements dans le conteneur avant de lancer :
|
||||
docker run --rm -it --net=host --gpus=all gpt-academic bash
|
||||
```
|
||||
|
||||
## Installation - Méthode 3 : Autres méthodes de déploiement
|
||||
|
||||
1. Déploiement sur un cloud serveur distant
|
||||
Veuillez consulter le [wiki de déploiement-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
2. Utilisation de WSL2 (Windows Subsystem for Linux)
|
||||
Veuillez consulter le [wiki de déploiement-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
|
||||
## Configuration de la procuration de l'installation
|
||||
### Méthode 1 : Méthode conventionnelle
|
||||
[Configuration de la procuration](https://github.com/binary-husky/chatgpt_academic/issues/1)
|
||||
|
||||
### Méthode 2 : Tutoriel pour débutant pur
|
||||
[Tutoriel pour débutant pur](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Personnalisation des nouveaux boutons pratiques (personnalisation des raccourcis académiques)
|
||||
Ouvrez le fichier `core_functional.py` avec n'importe quel éditeur de texte, ajoutez les éléments suivants, puis redémarrez le programme. (Si le bouton a déjà été ajouté avec succès et est visible, le préfixe et le suffixe pris en charge peuvent être modifiés à chaud sans avoir besoin de redémarrer le programme.)
|
||||
Par exemple:
|
||||
```
|
||||
"Traduction Français-Chinois": {
|
||||
# Préfixe, qui sera ajouté avant votre saisie. Par exemple, pour décrire votre demande, telle que la traduction, le débogage de code, l'amélioration, etc.
|
||||
"Prefix": "Veuillez traduire le contenu ci-dessous en chinois, puis expliquer chaque terme propre mentionné dans un tableau Markdown :\n\n",
|
||||
|
||||
# Suffixe, qui sera ajouté après votre saisie. Par exemple, en combinaison avec un préfixe, vous pouvez mettre le contenu de votre saisie entre guillemets.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Présentation de certaines fonctionnalités
|
||||
|
||||
### Affichage des images:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/228737599-bf0a9d9c-1808-4f43-ae15-dfcc7af0f295.png" width="800" >
|
||||
</div>
|
||||
|
||||
|
||||
### Si un programme peut comprendre et décomposer lui-même :
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="800" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936618-9b487e4b-ab5b-4b6e-84c6-16942102e917.png" width="800" >
|
||||
</div>
|
||||
|
||||
|
||||
### Analyse de tout projet Python/Cpp quelconque :
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="800" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="800" >
|
||||
</div>
|
||||
|
||||
### Lecture et résumé générés automatiquement pour les articles en Latex
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504406-86ab97cd-f208-41c3-8e4a-7000e51cf980.png" width="800" >
|
||||
</div>
|
||||
|
||||
### Génération de rapports automatique
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||
</div>
|
||||
|
||||
### Conception de fonctionnalités modulaires
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||
</div>
|
||||
|
||||
|
||||
### Traduction de code source en anglais
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229720562-fe6c3508-6142-4635-a83d-21eb3669baee.png" height="400" >
|
||||
</div>
|
||||
|
||||
## À faire et planification de version :
|
||||
- version 3.2+ (à faire) : Prise en charge de plus de paramètres d'interface de plugin de fonction
|
||||
- version 3.1 : Prise en charge de l'interrogation simultanée de plusieurs modèles GPT ! Prise en charge de l'API2d, prise en charge de la répartition de charge de plusieurs clés API
|
||||
- version 3.0 : Prise en charge de chatglm et d'autres petits llm
|
||||
- version 2.6 : Réorganisation de la structure du plugin, amélioration de l'interactivité, ajout de plus de plugins
|
||||
- version 2.5 : Mise à jour automatique, résolution du problème de dépassement de jeton et de texte trop long lors de la compilation du code source complet
|
||||
- version 2.4 : (1) Ajout de la fonctionnalité de traduction intégrale de PDF ; (2) Ajout d'une fonctionnalité de changement de position de zone de saisie ; (3) Ajout d'une option de disposition verticale ; (4) Optimisation du plugin de fonction multi-thread.
|
||||
- version 2.3 : Amélioration de l'interactivité multi-thread
|
||||
- version 2.2 : Prise en charge du rechargement à chaud du plugin de fonction
|
||||
- version 2.1 : Mise en page pliable
|
||||
- version 2.0 : Introduction du plugin de fonction modulaire
|
||||
- version 1.0 : Fonctionnalité de base
|
||||
|
||||
## Références et apprentissage
|
||||
|
||||
```
|
||||
De nombreux designs d'autres projets exceptionnels ont été utilisés pour référence dans le code, notamment :
|
||||
|
||||
# Projet 1 : De nombreuses astuces ont été empruntées à ChuanhuChatGPT
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# Projet 2 : ChatGLM-6B de Tsinghua :
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
```
|
||||
|
||||
302
docs/README_JP.md
普通文件
302
docs/README_JP.md
普通文件
@@ -0,0 +1,302 @@
|
||||
> **Note**
|
||||
>
|
||||
> このReadmeファイルは、このプロジェクトのmarkdown翻訳プラグインによって自動的に生成されたもので、100%正確ではない可能性があります。
|
||||
>
|
||||
|
||||
# <img src="logo.png" width="40" > ChatGPT 学術最適化
|
||||
|
||||
**このプロジェクトが好きだったら、スターをつけてください。もし、より使いやすい学術用のショートカットキーまたはファンクションプラグインを発明した場合は、issueを発行するかpull requestを作成してください。また、このプロジェクト自体によって翻訳されたREADMEは[英語説明書|](docs/README_EN.md)[日本語説明書|](docs/README_JP.md)[ロシア語説明書|](docs/README_RS.md)[フランス語説明書](docs/README_FR.md)もあります。**
|
||||
|
||||
> **注意事項**
|
||||
>
|
||||
> 1. **赤色**のラベルが付いているファンクションプラグイン(ボタン)のみファイルを読み込めます。一部のプラグインはプラグインエリアのドロップダウンメニューにあります。新しいプラグインのPRを歓迎いたします!
|
||||
>
|
||||
> 2. このプロジェクトの各ファイルの機能は`self_analysis.md`(自己解析レポート)で詳しく説明されています。バージョンが追加されると、関連するファンクションプラグインをクリックして、GPTを呼び出して自己解析レポートを再生成することができます。一般的な質問は`wiki`にまとめられています。(`https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98`)
|
||||
|
||||
|
||||
<div align="center">
|
||||
|
||||
機能 | 説明
|
||||
--- | ---
|
||||
ワンクリック整形 | 論文の文法エラーを一括で正確に修正できます。
|
||||
ワンクリック日英翻訳 | 日英翻訳には、ワンクリックで対応できます。
|
||||
ワンクリックコード説明 | コードの正しい表示と説明が可能です。
|
||||
[カスタムショートカットキー](https://www.bilibili.com/video/BV14s4y1E7jN) | カスタムショートカットキーをサポートします。
|
||||
[プロキシサーバーの設定](https://www.bilibili.com/video/BV1rc411W7Dr) | プロキシサーバーの設定をサポートします。
|
||||
モジュラーデザイン | カスタム高階関数プラグインと[関数プラグイン]、プラグイン[ホット更新]のサポートが可能です。詳細は[こちら](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[自己プログラム解析](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン][ワンクリック理解](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)このプロジェクトのソースコード
|
||||
[プログラム解析機能](https://www.bilibili.com/video/BV1cj411A7VW) | [関数プラグイン] ワンクリックで別のPython/C/C++/Java/Lua/...プロジェクトツリーを解析できます。
|
||||
論文読解 | [関数プラグイン] LaTeX論文の全文をワンクリックで解読し、要約を生成します。
|
||||
LaTeX全文翻訳、整形 | [関数プラグイン] ワンクリックでLaTeX論文を翻訳または整形できます。
|
||||
注釈生成 | [関数プラグイン] ワンクリックで関数の注釈を大量に生成できます。
|
||||
チャット分析レポート生成 | [関数プラグイン] 実行後、まとめレポートを自動生成します。
|
||||
[arxivヘルパー](https://www.bilibili.com/video/BV1LM4y1279X) | [関数プラグイン] 入力したarxivの記事URLで要約をワンクリック翻訳+PDFダウンロードができます。
|
||||
[PDF論文全文翻訳機能](https://www.bilibili.com/video/BV1KT411x7Wn) | [関数プラグイン] PDF論文タイトルと要約を抽出し、全文を翻訳します(マルチスレッド)。
|
||||
[Google Scholar Integratorヘルパー](https://www.bilibili.com/video/BV19L411U7ia) | [関数プラグイン] 任意のGoogle Scholar検索ページURLを指定すると、gptが興味深い記事を選択します。
|
||||
数式/画像/テーブル表示 | 数式のTex形式とレンダリング形式を同時に表示できます。数式、コードのハイライトをサポートしています。
|
||||
マルチスレッド関数プラグインサポート | ChatGPTをマルチスレッドで呼び出すことができ、大量のテキストやプログラムを簡単に処理できます。
|
||||
ダークグラジオ[テーマ](https://github.com/binary-husky/chatgpt_academic/issues/173)の起動 | 「/?__dark-theme=true」というURLをブラウザに追加することで、ダークテーマに切り替えることができます。
|
||||
[多数のLLMモデル](https://www.bilibili.com/video/BV1wT411p7yf)をサポート、[API2D](https://api2d.com/)インターフェースをサポート | GPT3.5、GPT4、[清華ChatGLM](https://github.com/THUDM/ChatGLM-6B)による同時サポートは、とても素晴らしいですね!
|
||||
huggingface免科学上网[オンライン版](https://huggingface.co/spaces/qingxu98/gpt-academic) | huggingfaceにログイン後、[このスペース](https://huggingface.co/spaces/qingxu98/gpt-academic)をコピーしてください。
|
||||
...... | ......
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
- 新しいインターフェース(config.pyのLAYOUTオプションを変更するだけで、「左右レイアウト」と「上下レイアウト」を切り替えることができます)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
- すべてのボタンは、functional.pyを読み込んで動的に生成されます。カスタム機能を自由に追加して、クリップボードを解放します
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- 色を修正/修正
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- 出力に数式が含まれている場合、TeX形式とレンダリング形式の両方が表示され、コピーと読み取りが容易になります
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- プロジェクトのコードを見るのが面倒?chatgptに整備されたプロジェクトを直接与えましょう
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- 多数の大規模言語モデルの混合呼び出し(ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
多数の大規模言語モデルの混合呼び出し[huggingfaceテスト版](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta)(huggigface版はchatglmをサポートしていません)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## インストール-方法1:直接運転 (Windows、LinuxまたはMacOS)
|
||||
|
||||
1. プロジェクトをダウンロードします。
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
```
|
||||
|
||||
2. API_KEYとプロキシ設定を構成する
|
||||
|
||||
`config.py`で、海外のProxyとOpenAI API KEYを構成して説明します。
|
||||
```
|
||||
1.あなたが中国にいる場合、OpenAI APIをスムーズに使用するには海外プロキシを設定する必要があります。構成の詳細については、config.py(1.その中のUSE_PROXYをTrueに変更し、2.手順に従ってプロキシを変更する)を詳細に読んでください。
|
||||
2. OpenAI API KEYを構成する。OpenAIのウェブサイトでAPI KEYを取得してください。一旦API KEYを手に入れると、config.pyファイルで設定するだけです。
|
||||
3.プロキシネットワークに関連する問題(ネットワークタイムアウト、プロキシが動作しない)をhttps://github.com/binary-husky/chatgpt_academic/issues/1にまとめました。
|
||||
```
|
||||
(P.S. プログラム実行時にconfig.pyの隣にconfig_private.pyという名前のプライバシー設定ファイルを作成し、同じ名前の設定を上書きするconfig_private.pyが存在するかどうかを優先的に確認します。そのため、私たちの構成読み取りロジックを理解できる場合は、config.pyの隣にconfig_private.pyという名前の新しい設定ファイルを作成し、その中のconfig.pyから設定を移動してください。config_private.pyはgitで保守されていないため、プライバシー情報をより安全にすることができます。)
|
||||
|
||||
3. 依存関係をインストールします。
|
||||
```sh
|
||||
# 選択肢があります。
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
|
||||
# (選択肢2) もしAnacondaを使用する場合、手順は同様です:
|
||||
# (選択肢2.1) conda create -n gptac_venv python=3.11
|
||||
# (選択肢2.2) conda activate gptac_venv
|
||||
# (選択肢2.3) python -m pip install -r requirements.txt
|
||||
|
||||
# 注: 公式のpipソースまたはAlibabaのpipソースを使用してください。 別のpipソース(例:一部の大学のpip)は問題が発生する可能性があります。 一時的なソースの切り替え方法:
|
||||
# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
```
|
||||
|
||||
もしあなたが清華ChatGLMをサポートする必要がある場合、さらに多くの依存関係をインストールする必要があります(Pythonに慣れない方やコンピューターの設定が十分でない方は、試みないことをお勧めします):
|
||||
```sh
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
```
|
||||
|
||||
4. 実行
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
|
||||
5. 関数プラグインのテスト
|
||||
```
|
||||
- Pythonプロジェクト分析のテスト
|
||||
入力欄に `./crazy_functions/test_project/python/dqn` と入力し、「Pythonプロジェクト全体の解析」をクリックします。
|
||||
- 自己コード解読のテスト
|
||||
「[マルチスレッドデモ] このプロジェクト自体を解析します(ソースを翻訳して解読します)」をクリックします。
|
||||
- 実験的な機能テンプレート関数のテスト(GPTが「今日の歴史」に何が起こったかを回答することが求められます)。この関数をテンプレートとして使用して、より複雑な機能を実装できます。
|
||||
「[関数プラグインテンプレートデモ] 今日の歴史」をクリックします。
|
||||
- 関数プラグインエリアのドロップダウンメニューには他にも選択肢があります。
|
||||
```
|
||||
|
||||
## インストール方法2:Dockerを使用する(Linux)
|
||||
|
||||
1. ChatGPTのみ(大多数の人にお勧めです)
|
||||
``` sh
|
||||
# プロジェクトのダウンロード
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
# 海外プロキシとOpenAI API KEYの設定
|
||||
config.pyを任意のテキストエディタで編集する
|
||||
# インストール
|
||||
docker build -t gpt-academic .
|
||||
# 実行
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
|
||||
# 関数プラグインのテスト
|
||||
## 関数プラグインテンプレート関数のテスト(GPTが「今日の歴史」に何が起こったかを回答することが求められます)。この関数をテンプレートとして使用して、より複雑な機能を実装できます。
|
||||
「[関数プラグインテンプレートデモ] 今日の歴史」をクリックします。
|
||||
## Latexプロジェクトの要約を書くテスト
|
||||
入力欄に./crazy_functions/test_project/latex/attentionと入力し、「テックス論文を読んで要約を書く」をクリックします。
|
||||
## Pythonプロジェクト分析のテスト
|
||||
入力欄に./crazy_functions/test_project/python/dqnと入力し、[Pythonプロジェクトの全解析]をクリックします。
|
||||
|
||||
関数プラグインエリアのドロップダウンメニューには他にも選択肢があります。
|
||||
```
|
||||
|
||||
2. ChatGPT + ChatGLM(Dockerに非常に詳しい人+十分なコンピューター設定が必要)
|
||||
|
||||
|
||||
|
||||
```sh
|
||||
# Dockerfileの編集
|
||||
cd docs && nano Dockerfile+ChatGLM
|
||||
# ビルド方法
|
||||
docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
|
||||
# 実行方法 (1) 直接実行:
|
||||
docker run --rm -it --net=host --gpus=all gpt-academic
|
||||
# 実行方法 (2) コンテナに入って調整する:
|
||||
docker run --rm -it --net=host --gpus=all gpt-academic bash
|
||||
```
|
||||
|
||||
## インストール方法3:その他のデプロイ方法
|
||||
|
||||
1. クラウドサーバーデプロイ
|
||||
[デプロイwiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
2. WSL2を使用 (Windows Subsystem for Linux)
|
||||
[デプロイwiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
|
||||
## インストール-プロキシ設定
|
||||
1. 通常の方法
|
||||
[プロキシを設定する](https://github.com/binary-husky/chatgpt_academic/issues/1)
|
||||
|
||||
2. 初心者向けチュートリアル
|
||||
[初心者向けチュートリアル](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## カスタムボタンの追加(学術ショートカットキー)
|
||||
|
||||
`core_functional.py`を任意のテキストエディタで開き、以下のエントリーを追加し、プログラムを再起動してください。(ボタンが追加されて表示される場合、前置詞と後置詞はホット編集がサポートされているため、プログラムを再起動せずに即座に有効になります。)
|
||||
|
||||
例:
|
||||
```
|
||||
"超级英译中": {
|
||||
# 前置詞 - あなたの要求を説明するために使用されます。翻訳、コードの説明、編集など。
|
||||
"Prefix": "以下のコンテンツを中国語に翻訳して、マークダウンテーブルを使用して専門用語を説明してください。\n\n",
|
||||
|
||||
# 後置詞 - プレフィックスと共に使用すると、入力内容を引用符で囲むことができます。
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
---
|
||||
|
||||
## いくつかの機能の例
|
||||
|
||||
### 画像表示:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/228737599-bf0a9d9c-1808-4f43-ae15-dfcc7af0f295.png" width="800" >
|
||||
</div>
|
||||
|
||||
|
||||
### プログラムが自己解析できる場合:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="800" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936618-9b487e4b-ab5b-4b6e-84c6-16942102e917.png" width="800" >
|
||||
</div>
|
||||
|
||||
### 他のPython/Cppプロジェクトの解析:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="800" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="800" >
|
||||
</div>
|
||||
|
||||
### Latex論文の一括読解と要約生成
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504406-86ab97cd-f208-41c3-8e4a-7000e51cf980.png" width="800" >
|
||||
</div>
|
||||
|
||||
### 自動報告生成
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||
</div>
|
||||
|
||||
### モジュール化された機能デザイン
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||
</div>
|
||||
|
||||
|
||||
### ソースコードの英語翻訳
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229720562-fe6c3508-6142-4635-a83d-21eb3669baee.png" height="400" >
|
||||
</div>
|
||||
|
||||
## Todo およびバージョン計画:
|
||||
- version 3.2+ (todo): 関数プラグインがより多くのパラメーターインターフェースをサポートするようになります。
|
||||
- version 3.1: 複数のgptモデルを同時にクエリし、api2dをサポートし、複数のapikeyの負荷分散をサポートします。
|
||||
- version 3.0: chatglmおよび他の小型llmのサポート
|
||||
- version 2.6: プラグイン構造を再構成し、相互作用性を高め、より多くのプラグインを追加しました。
|
||||
- version 2.5: 自己更新。総括的な大規模プロジェクトのソースコードをまとめた場合、テキストが長すぎる、トークンがオーバーフローする問題を解決します。
|
||||
- version 2.4: (1)PDF全文翻訳機能を追加。(2)入力エリアの位置を切り替える機能を追加。(3)垂直レイアウトオプションを追加。(4)マルチスレッド関数プラグインの最適化。
|
||||
- version 2.3: 多スレッドの相互作用性を向上させました。
|
||||
- version 2.2: 関数プラグインでホットリロードをサポート
|
||||
- version 2.1: 折りたたみ式レイアウト
|
||||
- version 2.0: モジュール化された関数プラグインを導入
|
||||
- version 1.0: 基本機能
|
||||
|
||||
## 参考および学習
|
||||
|
||||
|
||||
以下は中国語のマークダウンファイルです。日本語に翻訳してください。既存のマークダウンコマンドを変更しないでください:
|
||||
|
||||
```
|
||||
多くの優秀なプロジェクトの設計を参考にしています。主なものは以下の通りです:
|
||||
|
||||
# 参考プロジェクト1:ChuanhuChatGPTから多くのテクニックを借用
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# 参考プロジェクト2:清華ChatGLM-6B:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
```
|
||||
|
||||
291
docs/README_RS.md
普通文件
291
docs/README_RS.md
普通文件
@@ -0,0 +1,291 @@
|
||||
> **Note**
|
||||
>
|
||||
> Этот файл самовыражения автоматически генерируется модулем перевода markdown в этом проекте и может быть не на 100% правильным.
|
||||
>
|
||||
|
||||
# <img src="logo.png" width="40" > ChatGPT Academic Optimization
|
||||
|
||||
**Если вам понравился этот проект, пожалуйста, поставьте ему звезду. Если вы придумали более полезные академические ярлыки или функциональные плагины, не стесняйтесь создавать запросы на изменение или пул-запросы. Мы также имеем [README на английском языке](docs/README_EN.md), переведенный этим же проектом.
|
||||
|
||||
> **Примечание**
|
||||
>
|
||||
> 1. Пожалуйста, обратите внимание, что только функциonal plugins (buttons) с **красным цветом** могут читать файлы, некоторые из которых находятся в **выпадающем меню** плагинов. Кроме того, мы приветствуем и обрабатываем любые новые плагины с **наивысшим приоритетом**!
|
||||
>
|
||||
> 2. Функции каждого файла в этом проекте подробно описаны в собственном анализе [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) . При повторных итерациях вы также можете вызывать обновленный отчет функций проекта, щелкнув соответствующий функциональный плагин GPT. Часто задаваемые вопросы собраны в [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98) .
|
||||
|
||||
<div align="center">
|
||||
|
||||
Функция | Описание
|
||||
--- | ---
|
||||
Редактирование одним кликом | Поддержка редактирования одним кликом, поиск грамматических ошибок в академических статьях
|
||||
Переключение языков "Английский-Китайский" одним кликом | Одним кликом переключайте языки "Английский-Китайский"
|
||||
Разъяснение программного кода одним кликом | Вы можете правильно отобразить и объяснить программный код.
|
||||
[Настраиваемые сочетания клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настраиваемых сочетаний клавиш
|
||||
[Настройка сервера-прокси](https://www.bilibili.com/video/BV1rc411W7Dr) | Поддержка настройки сервера-прокси
|
||||
Модульный дизайн | Поддержка настраиваемых функциональных плагинов высших порядков и функциональных плагинов, поддерживающих [горячее обновление](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
|
||||
[Автоанализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] [Прочтение в один клик](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) кода программы проекта
|
||||
[Анализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] Один клик для проанализирования дерева других проектов Python/C/C++/Java/Lua/...
|
||||
Чтение статей| [Функциональный плагин] Одним кликом прочитайте весь латех (LaTex) текст статьи и сгенерируйте краткое описание
|
||||
Перевод и редактирование всех статей из LaTex | [Функциональный плагин] Перевод или редактирование LaTex-статьи всего одним нажатием кнопки
|
||||
Генерация комментариев в пакетном режиме | [Функциональный плагин] Одним кликом сгенерируйте комментарии к функциям в пакетном режиме
|
||||
Генерация отчетов пакета CHAT | [Функциональный плагин] Автоматически создавайте сводные отчеты после выполнения
|
||||
[Помощник по arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Функциональный плагин] Введите URL статьи arxiv, чтобы легко перевести резюме и загрузить PDF-файл
|
||||
[Перевод полного текста статьи в формате PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Функциональный плагин] Извлеките заголовок статьи, резюме и переведите весь текст статьи (многопоточно)
|
||||
[Помощник интеграции Google Scholar](https://www.bilibili.com/video/BV19L411U7ia) | [Функциональный плагин] Дайте GPT выбрать для вас интересные статьи на любой странице поиска Google Scholar.
|
||||
Отображение формул/изображений/таблиц | Одновременно отображается tex-форма и рендер-форма формул, поддержка формул, высокоскоростных кодов
|
||||
Поддержка функциональных плагинов многопоточности | Поддержка многопоточной работы с плагинами, обрабатывайте огромные объемы текста или программы одним кликом
|
||||
Запуск темной темы gradio[подробнее](https://github.com/binary-husky/chatgpt_academic/issues/173) | Добавьте / ?__dark-theme=true в конец URL браузера, чтобы переключиться на темную тему.
|
||||
[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf), поддержка API2D | Находиться между GPT3.5, GPT4 и [清华ChatGLM](https://github.com/THUDM/ChatGLM-6B) должно быть очень приятно, не так ли?
|
||||
Альтернатива huggingface без использования научной сети [Онлайн-эксперимент](https://huggingface.co/spaces/qingxu98/gpt-academic) | Войдите в систему, скопируйте пространство [этот пространственный URL](https://huggingface.co/spaces/qingxu98/gpt-academic)
|
||||
…… | ……
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
- Новый интерфейс (вы можете изменить настройку LAYOUT в config.py, чтобы переключаться между "горизонтальным расположением" и "вертикальным расположением")
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
||||
</div>
|
||||
|
||||
|
||||
Вы профессиональный переводчик научных статей.
|
||||
|
||||
- Все кнопки генерируются динамически путем чтения functional.py и могут быть легко настроены под пользовательские потребности, освобождая буфер обмена.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Редактирование/корректирование
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
||||
</div>
|
||||
|
||||
- Если вывод содержит формулы, они отображаются одновременно как в формате tex, так и в рендеринговом формате для удобства копирования и чтения.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Лень смотреть код проекта? Просто покажите chatgpt.
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
||||
</div>
|
||||
|
||||
- Несколько моделей больших языковых моделей смешиваются (ChatGLM + OpenAI-GPT3.5 + [API2D] (https://api2d.com/) -GPT4)
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
||||
</div>
|
||||
|
||||
Несколько моделей больших языковых моделей смешиваются в [бета-версии huggingface] (https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (huggingface-версия не поддерживает chatglm).
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Установка - Метод 1: Запуск (Windows, Linux или MacOS)
|
||||
|
||||
1. Скачайте проект
|
||||
```sh
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
```
|
||||
|
||||
2. Настройка API_KEY и настройки прокси
|
||||
|
||||
В файле `config.py` настройте зарубежный прокси и OpenAI API KEY, пояснения ниже
|
||||
```
|
||||
1. Если вы находитесь в Китае, вам нужно настроить зарубежный прокси, чтобы использовать OpenAI API. Пожалуйста, внимательно прочитайте config.py для получения инструкций (1. Измените USE_PROXY на True; 2. Измените прокси в соответствии с инструкциями).
|
||||
2. Настройка API KEY OpenAI. Вам необходимо зарегистрироваться на сайте OpenAI и получить API KEY. После получения API KEY настройте его в файле config.py.
|
||||
3. Вопросы, связанные с сетевыми проблемами (тайм-аут сети, прокси не работает), можно найти здесь: https://github.com/binary-husky/chatgpt_academic/issues/1
|
||||
```
|
||||
(Примечание: при запуске программы будет проверяться наличие конфиденциального файла конфигурации с именем `config_private.py` и использоваться в нем конфигурация параметров, которая перезаписывает параметры с такими же именами в `config.py`. Поэтому, если вы понимаете логику чтения нашей конфигурации, мы настоятельно рекомендуем вам создать новый файл конфигурации с именем `config_private.py` рядом с `config.py` и переместить (скопировать) настройки из `config.py` в `config_private.py`. `config_private.py` не подвергается контролю git, что делает конфиденциальную информацию более безопасной.)
|
||||
|
||||
|
||||
3. Установить зависимости
|
||||
```sh
|
||||
# (Выбор 1) Рекомендуется
|
||||
python -m pip install -r requirements.txt
|
||||
|
||||
# (Выбор 2) Если вы используете anaconda, то шаги будут аналогичны:
|
||||
# (Шаг 2.1) conda create -n gptac_venv python=3.11
|
||||
# (Шаг 2.2) conda activate gptac_venv
|
||||
# (Шаг 2.3) python -m pip install -r requirements.txt
|
||||
|
||||
# Примечание: используйте официальный источник pip или источник pip.aliyun.com. Другие источники pip могут вызывать проблемы. временный метод замены источника:
|
||||
# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
```
|
||||
|
||||
Если требуется поддержка TUNA ChatGLM, необходимо установить дополнительные зависимости (если вы неудобны с python, необходимо иметь хорошую конфигурацию компьютера):
|
||||
```sh
|
||||
python -m pip install -r request_llm/requirements_chatglm.txt
|
||||
```
|
||||
|
||||
4. Запустите
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
|
||||
5. Тестовые функции плагина
|
||||
```
|
||||
- Тестирвоание анализа проекта Python
|
||||
В основной области введите `./crazy_functions/test_project/python/dqn` , а затем нажмите "Анализировать весь проект Python"
|
||||
- Тестирование самостоятельного чтения кода
|
||||
Щелкните " [Демонстрационный режим многопоточности] Проанализируйте сам проект (расшифровка источника кода)"
|
||||
- Тестирование функций шаблонного плагина (вы можете использовать эту функцию как шаблон для более сложных функций, требующих ответа от gpt в связи с тем, что произошло сегодня в истории)
|
||||
Щелкните " [Функции шаблонного плагина] День в истории"
|
||||
- На нижней панели дополнительные функции для выбора
|
||||
```
|
||||
|
||||
## Установка - Метод 2: Использование docker (Linux)
|
||||
|
||||
|
||||
1. Только ChatGPT (рекомендуется для большинства пользователей):
|
||||
``` sh
|
||||
# Скачать проект
|
||||
git clone https://github.com/binary-husky/chatgpt_academic.git
|
||||
cd chatgpt_academic
|
||||
# Настроить прокси за границей и OpenAI API KEY
|
||||
Отредактируйте файл config.py в любом текстовом редакторе.
|
||||
# Установка
|
||||
docker build -t gpt-academic .
|
||||
# Запустить
|
||||
docker run --rm -it --net=host gpt-academic
|
||||
|
||||
# Проверка функциональности плагина
|
||||
## Проверка шаблонной функции плагина (требуется, чтобы gpt ответил, что произошло "в истории на этот день"), вы можете использовать эту функцию в качестве шаблона для реализации более сложных функций.
|
||||
Нажмите "[Шаблонный демонстрационный плагин] История на этот день".
|
||||
## Тест абстрактного резюме для проекта на Latex
|
||||
В области ввода введите ./crazy_functions/test_project/latex/attention, а затем нажмите "Чтение реферата о тезисах статьи на LaTeX".
|
||||
## Тестовый анализ проекта на Python
|
||||
Введите в область ввода ./crazy_functions/test_project/python/dqn, затем нажмите "Проанализировать весь проект на Python".
|
||||
|
||||
Выбирайте больше функциональных плагинов в нижнем выпадающем меню.
|
||||
```
|
||||
|
||||
2. ChatGPT + ChatGLM (требуется глубокое знание Docker и достаточно мощное компьютерное оборудование):
|
||||
|
||||
``` sh
|
||||
# Изменение Dockerfile
|
||||
cd docs && nano Dockerfile+ChatGLM
|
||||
# Как построить | Как запустить (Dockerfile+ChatGLM в пути docs, сначала перейдите в папку с помощью cd docs)
|
||||
docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
|
||||
# Как запустить | Как запустить (2) я хочу войти в контейнер и сделать какие-то настройки до запуска:
|
||||
docker run --rm -it --net=host --gpus=all gpt-academic bash
|
||||
```
|
||||
|
||||
|
||||
## Установка-Метод 3: Другие способы развертывания
|
||||
|
||||
1. Развертывание на удаленном облачном сервере
|
||||
Пожалуйста, посетите [Deploy Wiki-1] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
||||
|
||||
2. Использование WSL2 (Windows Subsystem for Linux)
|
||||
Пожалуйста, посетите [Deploy Wiki-2] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
||||
|
||||
|
||||
## Установка-Настройки прокси
|
||||
### Метод 1: Обычный способ
|
||||
[Конфигурация прокси] (https://github.com/binary-husky/chatgpt_academic/issues/1)
|
||||
|
||||
### Метод 2: Руководство новичка
|
||||
[Руководство новичка] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Настройка новой удобной кнопки (настройка быстрой клавиши для научной работы)
|
||||
Откройте `core_functional.py` любым текстовым редактором, добавьте элементы, как показано ниже, затем перезапустите программу. (Если кнопка уже успешно добавлена и видна, то префикс и суффикс поддерживают горячее изменение, чтобы они оказались в действии, не нужно перезапускать программу.)
|
||||
например
|
||||
```
|
||||
"Супер анг-рус": {
|
||||
# Префикс, будет добавлен перед вашим вводом. Например, используется для описания ваших потребностей, таких как перевод, кодинг, редактирование и т. д.
|
||||
"Prefix": "Пожалуйста, переведите этот фрагмент на русский язык, а затем создайте пошаговую таблицу в markdown, чтобы объяснить все специализированные термины, которые встречаются в тексте:\n\n",
|
||||
|
||||
# Суффикс, будет добавлен после вашего ввода. Например, совместно с префиксом можно обрамить ваш ввод в кавычки.
|
||||
"Suffix": "",
|
||||
},
|
||||
```
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
|
||||
## Демонстрация некоторых возможностей
|
||||
|
||||
### Отображение изображений:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/228737599-bf0a9d9c-1808-4f43-ae15-dfcc7af0f295.png" width="800" >
|
||||
</div>
|
||||
|
||||
|
||||
### Если программа может понимать и разбирать сама себя:
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="800" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226936618-9b487e4b-ab5b-4b6e-84c6-16942102e917.png" width="800" >
|
||||
</div>
|
||||
|
||||
|
||||
### Анализ других проектов на Python/Cpp:
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="800" >
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="800" >
|
||||
</div>
|
||||
|
||||
### Генерация понимания и абстрактов с помощью Латех статей в один клик
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504406-86ab97cd-f208-41c3-8e4a-7000e51cf980.png" width="800" >
|
||||
</div>
|
||||
|
||||
### Автоматическое создание отчетов
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
||||
</div>
|
||||
|
||||
### Модульный дизайн функций
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
||||
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
||||
</div>
|
||||
|
||||
|
||||
### Трансляция исходного кода на английский язык
|
||||
|
||||
<div align="center">
|
||||
<img src="https://user-images.githubusercontent.com/96192199/229720562-fe6c3508-6142-4635-a83d-21eb3669baee.png" height="400" >
|
||||
</div>
|
||||
|
||||
## Todo и планирование версий:
|
||||
- version 3.2+ (todo): функция плагины поддерживают более многочисленные интерфейсы параметров
|
||||
- version 3.1: поддержка одновременного опроса нескольких моделей gpt! Поддержка api2d, поддержка балансировки нагрузки множества apikey.
|
||||
- version 3.0: поддержка chatglm и других маленьких llm
|
||||
- version 2.6: реструктурировал структуру плагинов, повысил интерактивность, добавил больше плагинов
|
||||
- version 2.5: само обновление, решение проблемы слишком длинного текста и переполнения токена при переводе всего проекта исходного кода
|
||||
- version 2.4: (1) добавлена функция перевода всего PDF-документа; (2) добавлена функция изменения положения входной области; (3) добавлена опция вертикального макета; (4) оптимизация функций многопоточности плагина.
|
||||
- version 2.3: улучшение многопоточной интерактивности
|
||||
- version 2.2: функция плагинов поддерживает горячую перезагрузку
|
||||
- version 2.1: блочная раскладка
|
||||
- version 2.0: модульный дизайн функций плагина
|
||||
- version 1.0: основные функции
|
||||
|
||||
## Ссылки на изучение и обучение
|
||||
|
||||
```
|
||||
В коде использовано много хороших дизайнерских решений из других отличных проектов, в том числе:
|
||||
|
||||
# Project1: использование многих приемов из ChuanhuChatGPT
|
||||
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
||||
|
||||
# Project2: ChatGLM-6B в Тхуде:
|
||||
https://github.com/THUDM/ChatGLM-6B
|
||||
```
|
||||
|
||||
43
docs/WithFastapi.md
普通文件
43
docs/WithFastapi.md
普通文件
@@ -0,0 +1,43 @@
|
||||
# Running with fastapi
|
||||
|
||||
We currently support fastapi in order to solve sub-path deploy issue.
|
||||
|
||||
1. change CUSTOM_PATH setting in `config.py`
|
||||
|
||||
``` sh
|
||||
nano config.py
|
||||
```
|
||||
|
||||
2. Edit main.py
|
||||
|
||||
```diff
|
||||
auto_opentab_delay()
|
||||
- demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
|
||||
+ demo.queue(concurrency_count=CONCURRENT_COUNT)
|
||||
|
||||
- # 如果需要在二级路径下运行
|
||||
- # CUSTOM_PATH, = get_conf('CUSTOM_PATH')
|
||||
- # if CUSTOM_PATH != "/":
|
||||
- # from toolbox import run_gradio_in_subpath
|
||||
- # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
|
||||
- # else:
|
||||
- # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
|
||||
|
||||
+ 如果需要在二级路径下运行
|
||||
+ CUSTOM_PATH, = get_conf('CUSTOM_PATH')
|
||||
+ if CUSTOM_PATH != "/":
|
||||
+ from toolbox import run_gradio_in_subpath
|
||||
+ run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
|
||||
+ else:
|
||||
+ demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
```
|
||||
|
||||
|
||||
3. Go!
|
||||
|
||||
``` sh
|
||||
python main.py
|
||||
```
|
||||
二进制
docs/demo.jpg
普通文件
二进制
docs/demo.jpg
普通文件
二进制文件未显示。
|
之后 宽度: | 高度: | 大小: 262 KiB |
二进制
docs/demo2.jpg
普通文件
二进制
docs/demo2.jpg
普通文件
二进制文件未显示。
|
之后 宽度: | 高度: | 大小: 264 KiB |
二进制
docs/logo.png
普通文件
二进制
docs/logo.png
普通文件
二进制文件未显示。
|
之后 宽度: | 高度: | 大小: 11 KiB |
256
docs/self_analysis.md
普通文件
256
docs/self_analysis.md
普通文件
@@ -0,0 +1,256 @@
|
||||
# chatgpt-academic项目自译解报告
|
||||
(Author补充:以下分析均由本项目调用ChatGPT一键生成,如果有不准确的地方,全怪GPT😄)
|
||||
|
||||
## 对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能。
|
||||
|
||||
整体概括:
|
||||
|
||||
该程序是一个基于自然语言处理和机器学习的科学论文辅助工具,主要功能包括聊天机器人、批量总结PDF文档、批量翻译PDF文档、生成函数注释、解析项目源代码等。程序基于 Gradio 构建 Web 服务,并集成了代理和自动更新功能,提高了用户的使用体验。
|
||||
|
||||
文件功能表格:
|
||||
|
||||
| 文件名 | 文件功能 |
|
||||
| --- | --- |
|
||||
| check_proxy.py | 用于检查代理的正确性和可用性 |
|
||||
| colorful.py | 包含不同预设置颜色的常量,并用于多种UI元素 |
|
||||
| config.py | 用于全局配置的类 |
|
||||
| config_private.py | 与config.py文件一起使用的另一个配置文件,用于更改私密信息 |
|
||||
| core_functional.py | 包含一些TextFunctional类和基础功能函数 |
|
||||
| crazy_functional.py | 包含大量高级功能函数和实验性的功能函数 |
|
||||
| main.py | 程序的主入口,包含GUI主窗口和主要的UI管理功能 |
|
||||
| theme.py | 包含一些预设置主题的颜色 |
|
||||
| toolbox.py | 提供了一些有用的工具函数 |
|
||||
| crazy_functions\crazy_utils.py | 包含一些用于实现高级功能的辅助函数 |
|
||||
| crazy_functions\Latex全文润色.py | 实现了对LaTeX文件中全文的润色和格式化功能 |
|
||||
| crazy_functions\Latex全文翻译.py | 实现了对LaTeX文件中的内容进行翻译的功能 |
|
||||
| crazy_functions\_\_init\_\_.py | 用于导入crazy_functional.py中的功能函数 |
|
||||
| crazy_functions\下载arxiv论文翻译摘要.py | 从Arxiv上下载论文并提取重要信息 |
|
||||
| crazy_functions\代码重写为全英文_多线程.py | 针对中文Python文件,将其翻译为全英文 |
|
||||
| crazy_functions\总结word文档.py | 提取Word文件的重要内容来生成摘要 |
|
||||
| crazy_functions\批量Markdown翻译.py | 批量翻译Markdown文件 |
|
||||
| crazy_functions\批量总结PDF文档.py | 批量从PDF文件中提取摘要 |
|
||||
| crazy_functions\批量总结PDF文档pdfminer.py | 批量从PDF文件中提取摘要 |
|
||||
| crazy_functions\批量翻译PDF文档_多线程.py | 批量翻译PDF文件 |
|
||||
| crazy_functions\理解PDF文档内容.py | 批量分析PDF文件并提取摘要 |
|
||||
| crazy_functions\生成函数注释.py | 自动生成Python文件中函数的注释 |
|
||||
| crazy_functions\解析项目源代码.py | 解析并分析给定项目的源代码 |
|
||||
| crazy_functions\询问多个大语言模型.py | 向多个大语言模型询问输入文本并进行处理 |
|
||||
| crazy_functions\读文献写摘要.py | 根据用户输入读取文献内容并生成摘要 |
|
||||
| crazy_functions\谷歌检索小助手.py | 利用谷歌学术检索用户提供的论文信息并提取相关信息 |
|
||||
| crazy_functions\高级功能函数模板.py | 实现高级功能的模板函数 |
|
||||
| request_llm\bridge_all.py | 处理与LLM的交互 |
|
||||
| request_llm\bridge_chatglm.py | 使用ChatGLM模型进行聊天 |
|
||||
| request_llm\bridge_chatgpt.py | 实现对话生成的各项功能 |
|
||||
| request_llm\bridge_tgui.py | 在Websockets中与用户进行交互并生成文本输出 |
|
||||
|
||||
|
||||
|
||||
## [0/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\check_proxy.py
|
||||
|
||||
该文件主要包括四个函数:check_proxy、backup_and_download、patch_and_restart 和 auto_update。其中,check_proxy 函数用于检查代理是否可用;backup_and_download 用于进行一键更新备份和下载;patch_and_restart 是一键更新协议的重要函数,用于覆盖和重启;auto_update 函数用于查询版本和用户意见,并自动进行一键更新。该文件主要使用了 requests、json、shutil、zipfile、distutils、subprocess 等 Python 标准库和 toolbox 和 colorful 两个第三方库。
|
||||
|
||||
## [1/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\colorful.py
|
||||
|
||||
该程序文件实现了一些打印文本的函数,使其具有不同的颜色输出。当系统为Linux时直接跳过,否则使用colorama库来实现颜色输出。程序提供了深色和亮色两种颜色输出方式,同时也提供了对打印函数的别名。对于不是终端输出的情况,对所有的打印函数进行重复定义,以便在重定向时能够避免打印错误日志。
|
||||
|
||||
## [2/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\config.py
|
||||
|
||||
该程序文件是一个配置文件,其主要功能是提供使用API密钥等信息,以及对程序的体验进行优化,例如定义对话框高度、布局等。还包含一些其他的设置,例如设置并行使用的线程数、重试次数限制等等。
|
||||
|
||||
## [3/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\config_private.py
|
||||
|
||||
这是一个名为config_private.py的Python文件,它用于配置API_KEY和代理信息。API_KEY是一个私密密钥,用于访问某些受保护的API。USE_PROXY变量设置为True以应用代理,proxies变量配置了代理网络的地址和协议。在使用该文件时,需要填写正确的API_KEY和代理信息。
|
||||
|
||||
## [4/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\core_functional.py
|
||||
|
||||
该文件是一个Python模块,名为"core_functional.py"。模块中定义了一个字典,包含了各种核心功能的配置信息,如英语学术润色、中文学术润色、查找语法错误等。每个功能都包含一些前言和后语,在前言中描述了该功能的任务和要求,在后语中提供一些附加信息。此外,有些功能还定义了一些特定的处理函数和按钮颜色。
|
||||
|
||||
## [5/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functional.py
|
||||
|
||||
这是一个Python程序文件,文件名是crazy_functional.py。它导入了一个名为HotReload的工具箱,并定义了一个名为get_crazy_functions()的函数。这个函数包括三个部分的插件组,分别是已经编写完成的第一组插件、已经测试但距离完美状态还差一点点的第二组插件和尚未充分测试的第三组插件。每个插件都有一个名称、一个按钮颜色、一个函数和一个是否加入下拉菜单中的标志位。这些插件提供了多种功能,包括生成函数注释、解析项目源代码、批量翻译PDF文档、谷歌检索、PDF文档内容理解和Latex文档的全文润色、翻译等功能。其中第三组插件可能还存在一定的bug。
|
||||
|
||||
## [6/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\main.py
|
||||
|
||||
该Python脚本代码实现了一个用于交互式对话的Chatbot机器人。它使用了Gradio框架来构建一个Web界面,并在此基础之上嵌入了一个文本输入框和与Chatbot进行交互的其他控件,包括提交、重置、停止和清除按钮、选择框和滑块等。此外,它还包括了一些类和函数和一些用于编程分析的工具和方法。整个程序文件的结构清晰,注释丰富,并提供了很多技术细节,使得开发者可以很容易地在其基础上进行二次开发、修改、扩展和集成。
|
||||
|
||||
## [7/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\theme.py
|
||||
|
||||
该程序文件名为theme.py,主要功能为调节Gradio的全局样式。在该文件中,调节了Gradio的主题颜色、字体、阴影、边框、渐变等等样式。同时,该文件还添加了一些高级CSS样式,比如调整表格单元格的背景和边框,设定聊天气泡的圆角、最大宽度和阴影等等。如果CODE_HIGHLIGHT为True,则还进行了代码高亮显示。
|
||||
|
||||
## [8/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\toolbox.py
|
||||
|
||||
这是一个名为`toolbox.py`的源代码文件。该文件包含了一系列工具函数和装饰器,用于聊天Bot的开发和调试。其中有一些功能包括将输入参数进行重组、捕捉函数中的异常并记录到历史记录中、生成Markdown格式的聊天记录报告等。该文件中还包含了一些与转换Markdown文本相关的函数。
|
||||
|
||||
## [9/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\crazy_utils.py
|
||||
|
||||
这是一个Python程序文件 `crazy_utils.py`,它包含了两个函数:
|
||||
|
||||
- `input_clipping(inputs, history, max_token_limit)`:这个函数接收三个参数,inputs 是一个字符串,history 是一个列表,max_token_limit 是一个整数。它使用 `tiktoken` 、`numpy` 和 `toolbox` 模块,处理输入文本和历史记录,将其裁剪到指定的最大标记数,避免输入过长导致的性能问题。如果 inputs 长度不超过 max_token_limit 的一半,则只裁剪历史;否则,同时裁剪输入和历史。
|
||||
- `request_gpt_model_in_new_thread_with_ui_alive(inputs, inputs_show_user, llm_kwargs, chatbot, history, sys_prompt, refresh_interval=0.2, handle_token_exceed=True, retry_times_at_unknown_error=2)`:这个函数接收八个参数,其中后三个是列表类型,其他为标量或句柄等。它提供对话窗口和刷新控制,执行 `predict_no_ui_long_connection` 方法,将输入数据发送至 GPT 模型并获取结果,如果子任务出错,返回相应的错误信息,否则返回结果。
|
||||
|
||||
## [10/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\Latex全文润色.py
|
||||
|
||||
这是一个名为"crazy_functions\Latex全文润色.py"的程序文件,其中包含了两个函数"Latex英文润色"和"Latex中文润色",以及其他辅助函数。这些函数能够对 Latex 项目进行润色处理,其中 "多文件润色" 函数是一个主要函数,它调用了其他辅助函数用于读取和处理 Latex 项目中的文件。函数使用了多线程和机器学习模型进行自然语言处理,对文件进行简化和排版来满足学术标准。注释已删除并可以在函数内部查找。
|
||||
|
||||
## [11/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\Latex全文翻译.py
|
||||
|
||||
这个程序文件包括一个用于对整个Latex项目进行翻译的函数 `Latex英译中` 和一个用于将中文翻译为英文的函数 `Latex中译英`。这两个函数都会尝试导入依赖库 tiktoken, 若无法导入则会提示用户安装。`Latex英译中` 函数会对 Latex 项目中的文件进行分离并去除注释,然后运行多线程翻译。`Latex中译英` 也做同样的事情,只不过是将中文翻译为英文。这个程序文件还包括其他一些帮助函数。
|
||||
|
||||
## [12/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\__init__.py
|
||||
|
||||
这是一个 Python 包,包名为 `crazy_functions`,在 `__init__.py` 文件中定义了一些函数,包含以下函数:
|
||||
|
||||
- `crazy_addition(a, b)`:对两个数进行加法运算,并将结果返回。
|
||||
- `crazy_multiplication(a, b)`:对两个数进行乘法运算,并将结果返回。
|
||||
- `crazy_subtraction(a, b)`:对两个数进行减法运算,并将结果返回。
|
||||
- `crazy_division(a, b)`:对两个数进行除法运算,并将结果返回。
|
||||
- `crazy_factorial(n)`:计算 `n` 的阶乘并返回结果。
|
||||
|
||||
这些函数可能会有一些奇怪或者不符合常规的实现方式(由函数名可以看出来),所以这个包的名称为 `crazy_functions`,可能是暗示这些函数会有一些“疯狂”的实现方式。
|
||||
|
||||
## [13/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\下载arxiv论文翻译摘要.py
|
||||
|
||||
该程序实现了一个名为“下载arxiv论文并翻译摘要”的函数插件,作者是“binary-husky”。该函数的功能是,在输入一篇arxiv论文的链接后,提取摘要、下载PDF文档、翻译摘要为中文,并将翻译结果保存到文件中。程序使用了一些Python库,如requests、pdfminer和beautifulsoup4等。程序入口是名为“下载arxiv论文并翻译摘要”的函数,其中使用了自定义的辅助函数download_arxiv_和get_name。程序中还使用了其他非函数的辅助函数和变量,如update_ui、CatchException、report_exception和get_conf等。
|
||||
|
||||
## [14/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\代码重写为全英文_多线程.py
|
||||
|
||||
该文件是一个多线程Python脚本,包含多个函数和利用第三方库进行的API请求。主要功能是将给定文件夹内的Python代码文件中所有中文转化为英文,然后输出转化后的英文代码。重要的功能和步骤包括:
|
||||
|
||||
1. 清空历史,以免输入溢出
|
||||
2. 尝试导入依赖,如果缺少依赖,则给出安装建议
|
||||
3. 集合文件
|
||||
4. 显示随意内容以防卡顿的感觉
|
||||
5. Token限制下的截断与处理
|
||||
6. 多线程操作请求转换中文变为英文的代码
|
||||
7. 所有线程同时开始执行任务函数
|
||||
8. 循环轮询各个线程是否执行完毕
|
||||
9. 把结果写入文件
|
||||
10. 备份一个文件
|
||||
|
||||
## [15/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\总结word文档.py
|
||||
|
||||
这是一个名为"总结word文档.py"的程序文件,使用python编写。该文件导入了"toolbox"和"crazy_utils"模块,实现了解析docx格式和doc格式的文件的功能。该文件包含了一个名为"解析docx"的函数,通过对文件内容应用自然语言处理技术,生成文章片段的中英文概述。具体实现过程中,该函数使用了"docx"模块和"win32com.client"模块来实现对docx和doc格式文件的解析,同时使用了"request_gpt_model_in_new_thread_with_ui_alive"函数来向GPT模型发起请求。最后,该文件还实现了一个名为"总结word文档"的函数来批量总结Word文档。
|
||||
|
||||
## [16/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量Markdown翻译.py
|
||||
|
||||
这个程序文件实现了一个批量Markdown翻译功能,可以将一个源代码项目中的Markdown文本翻译成指定语言(目前支持中<-英和英<-中)。程序主要分为三个函数,`PaperFileGroup`类用于处理长文本的拆分,`多文件翻译`是主要函数调用了`request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency`函数进行多线程翻译并输出结果,`Markdown英译中`和`Markdown中译外`分别是英译中和中译英的入口函数,用于解析项目路径和调用翻译函数。程序依赖于tiktoken等库实现。
|
||||
|
||||
## [17/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量总结PDF文档.py
|
||||
|
||||
这是一个名为“批量总结PDF文档”的Python脚本,包含了多个函数。其中有一个函数名为“clean_text”,可以对PDF提取出的原始文本进行清洗和格式化处理,将连字转换为其基本形式,并根据heuristic规则判断换行符是否是段落分隔,并相应地进行替换。另一个函数名为“解析PDF”,可以接收一个PDF文件清单,并对清单中的每一个PDF进行解析,提取出文本并调用“clean_text”函数进行清洗和格式化处理,然后向用户发送一个包含文章简介信息的问题并等待用户回答。最后,该脚本也包含一个名为“批量总结PDF文档”的主函数,其中调用了“解析PDF”函数来完成对PDF文件的批量处理。
|
||||
|
||||
## [18/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量总结PDF文档pdfminer.py
|
||||
|
||||
这个文件是一个Python模块,文件名为pdfminer.py,它定义了一个函数批量总结PDF文档。该函数接受一些参数,然后尝试导入pdfminer和beautifulsoup4库。该函数将读取pdf文件或tex文件中的内容,对其进行分析,并使用GPT模型进行自然语言摘要。文件中还有一个辅助函数readPdf,用于读取pdf文件中的内容。
|
||||
|
||||
## [19/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\批量翻译PDF文档_多线程.py
|
||||
|
||||
这是一个Python脚本,文件名是crazy_functions\批量翻译PDF文档_多线程.py。该脚本提供了一个名为“批量翻译PDF文档”的函数,可以批量翻译PDF文件并生成报告文件。该函数使用了多个模块和函数(如toolbox、crazy_utils、update_ui等),使用了Python的异常处理和多线程功能,还使用了一些文本处理函数和第三方库(如fitz和tiktoken)。在函数执行过程中,它会进行一些参数检查、读取和清理PDF文本、递归地切割PDF文件、获取文章meta信息、多线程翻译、整理报告格式等操作,并更新UI界面和生成报告文件。
|
||||
|
||||
## [20/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\理解PDF文档内容.py
|
||||
|
||||
这是一个解析PDF文件内容的Python程序,程序文件名为"理解PDF文档内容.py",程序主要由5个步骤组成:第0步是切割PDF文件;第1步是从摘要中提取高价值信息,放到history中;第2步是迭代地历遍整个文章,提取精炼信息;第3步是整理history;第4步是设置一个token上限,防止回答时Token溢出。程序主要用到了Python中的各种模块和函数库,如:toolbox, tiktoken, pymupdf等。
|
||||
|
||||
## [21/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\生成函数注释.py
|
||||
|
||||
这是一个名为"生成函数注释"的函数,带有一个装饰器"@CatchException",可以捕获异常。该函数接受文件路径、参数和聊天机器人等参数,用于对多个Python或C++文件进行函数注释,使用了"toolbox"和"crazy_utils"模块中的函数。该函数会逐个读取指定文件中的内容,并使用聊天机器人进行交互,向用户请求注释信息,然后将生成的注释与原文件内容一起输出到一个markdown表格中。最后,该函数返回一个字符串,指示任务是否已完成。另外还包含一个名为"批量生成函数注释"的函数,它与"生成函数注释"函数一起用于批量处理多个文件。
|
||||
|
||||
## [22/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\解析项目源代码.py
|
||||
|
||||
这个程序文件实现了对一个源代码项目进行分析的功能。其中,函数`解析项目本身`、`解析一个Python项目`、`解析一个C项目的头文件`、`解析一个C项目`、`解析一个Java项目`和`解析一个Rect项目`分别用于解析不同类型的项目。函数`解析源代码新`实现了对每一个源代码文件的分析,并将分析结果汇总,同时还实现了分组和迭代处理,提高了效率。最后,函数`write_results_to_file`将所有分析结果写入文件。中间,还用到了`request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency`和`request_gpt_model_in_new_thread_with_ui_alive`来完成请求和响应,并用`update_ui`实时更新界面。
|
||||
|
||||
## [23/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\询问多个大语言模型.py
|
||||
|
||||
这是一个Python程序,文件名为"crazy_functions\询问多个大语言模型.py"。该程序实现了一个同时向多个大语言模型询问的功能,接收用户输入文本以及模型参数,向ChatGPT和ChatGLM模型发出请求,并将对话记录显示在聊天框中,同时刷新界面。
|
||||
|
||||
## [24/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\读文章写摘要.py
|
||||
|
||||
该程序文件是一个Python模块,文件名为"读文章写摘要.py",主要包含两个函数:"解析Paper"和"读文章写摘要"。其中,"解析Paper"函数接受文件路径、参数等参数,逐个打印文件内容并使用GPT模型生成对该文件的摘要;"读文章写摘要"函数则接受一段文本内容和参数,将该文本内容及其所有.tex文件逐个传递给"解析Paper"函数进行处理,并使用GPT模型生成文章的中英文摘要。文件还导入了一些工具函数,如异常处理、信息上报和文件写入等。
|
||||
|
||||
## [25/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\谷歌检索小助手.py
|
||||
|
||||
该文件代码包含了一个名为`get_meta_information`的函数和一个名为`谷歌检索小助手`的装饰器函数,用于从谷歌学术中抓取文章元信息,并从用户提供的搜索页面中分析所有文章的相关信息。该文件使用了许多第三方库,如requests、arxiv、BeautifulSoup等。其中`get_meta_information`函数中还定义了一个名为`string_similar`的辅助函数,用于比较字符串相似度。
|
||||
|
||||
## [26/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\crazy_functions\高级功能函数模板.py
|
||||
|
||||
该程序文件是一个 Python 模块,包含一个名为“高阶功能模板函数”的函数。该函数接受多个参数,其中包括输入文本、GPT 模型参数、插件模型参数、聊天显示框、聊天历史等。 该函数的主要功能是根据输入文本,使用 GPT 模型生成一些问题,并等待用户回答这些问题(使用 Markdown 格式),然后将用户回答加入到聊天历史中,并更新聊天显示框。该函数还包含了一些异常处理和多线程的相关操作。该程序文件还引用了另一个 Python 模块中的两个函数,分别为“CatchException”和“update_ui”,并且还引用了一个名为“request_gpt_model_in_new_thread_with_ui_alive”的自定义函数。
|
||||
|
||||
## [27/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_all.py
|
||||
|
||||
这个文件是用来处理与LLM的交互的。包含两个函数,一个是 predict_no_ui_long_connection 用来处理长文本的输出,可以多线程调用;另一个是 predict 用来处理基础的对话功能。这个文件会导入其他文件中定义的方法进行调用,具体调用哪个方法取决于传入的参数。函数中还有一些装饰器和管理多线程的逻辑。
|
||||
|
||||
## [28/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_chatglm.py
|
||||
|
||||
这个程序文件实现了一个使用ChatGLM模型进行聊天的功能。具体实现过程是:首先进行初始化,然后使用GetGLMHandle类进行ChatGLM模型的加载和运行。predict_no_ui_long_connection函数用于多线程聊天,而predict函数用于单线程聊天,它们的不同之处在于前者不会更新UI界面,后者会。这个文件还导入了其他模块和库,例如transformers、time、importlib等,并使用了多进程Pipe。
|
||||
|
||||
## [29/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_chatgpt.py
|
||||
|
||||
这个程序文件是用于对话生成的,主要包含三个函数:predict、predict_no_ui、predict_no_ui_long_connection。其中,predict是用于普通对话的函数,具备完备的交互功能,但不具备多线程能力;predict_no_ui是高级实验性功能模块调用的函数,参数简单,可以多线程并行,方便实现复杂的功能逻辑;predict_no_ui_long_connection解决了predict_no_ui在处理长文档时容易断开连接的问题,同样支持多线程。程序中还包含一些常量和工具函数,用于整合信息,选择LLM模型,生成http请求,发送请求,接收响应等。它需要配置一个config文件,包含代理网址、API等敏感信息。
|
||||
|
||||
## [30/31] 请对下面的程序文件做一个概述: H:\chatgpt_academic_resolve\request_llm\bridge_tgui.py
|
||||
|
||||
该程序文件实现了一个基于Websockets的文本生成服务和对话功能。其中,有三个函数:`run()`、`predict()`和`predict_no_ui_long_connection()`。`run()`函数用于连接到Websocket服务并生成文本结果;`predict()`函数用于将用户输入作为文本生成的输入,同时在UI上显示对话历史记录,并在不断更新UI的过程中不断更新生成的文本输出;`predict_no_ui_long_connection()`函数与`predict()`函数类似,但没有UI,并在一段时间内返回单个生成的文本。整个程序还引入了多个Python模块来完成相关功能,例如`asyncio`、`websockets`、`json`等等。
|
||||
|
||||
## 根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, theme.py, toolbox.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py, crazy_functions\代码重写为全英文_多线程.py, crazy_functions\总结word文档.py)。
|
||||
|
||||
程序功能概括:该程序是一个聊天机器人,可以通过 Web 界面与用户进行交互。它包含了丰富的功能,如文本润色、翻译、代码重写、在线查找等,并且支持多线程处理。用户可以通过 Gradio 框架提供的 Web 界面进行交互,程序还提供了一些调试工具,如toolbox 模块,方便程序开发和调试。
|
||||
|
||||
下表概述了每个文件的功能:
|
||||
|
||||
| 文件名 | 功能 |
|
||||
| ----------------------------------------------------------- | ------------------------------------------------------------ |
|
||||
| check_proxy.py | 检查代理是否可用 |
|
||||
| colorful.py | 用于打印文本的字体颜色输出模块 |
|
||||
| config.py | 用于程序中的各种设置,如并行线程数量和重试次数的限制等 |
|
||||
| config_private.py | 配置API_KEY和代理信息的文件 |
|
||||
| core_functional.py | 包含具体的文本处理功能的模块 |
|
||||
| crazy_functional.py | 包括各种插件函数的模块,提供了多种文本处理功能 |
|
||||
| main.py | 包含 Chatbot 机器人主程序的模块 |
|
||||
| theme.py | 用于调节全局样式的模块 |
|
||||
| toolbox.py | 包含工具函数和装饰器,用于聊天Bot的开发和调试 |
|
||||
| crazy_functions\crazy_utils.py | 包含一些辅助函数,如文本裁剪和消息捕捉等 |
|
||||
| crazy_functions\Latex全文润色.py | 对 Latex 项目进行润色处理的功能模块 |
|
||||
| crazy_functions\Latex全文翻译.py | 对 Latex 项目进行翻译的功能模块 |
|
||||
| crazy_functions\__init__.py | 定义一些奇特的数学函数等 |
|
||||
| crazy_functions\下载arxiv论文翻译摘要.py | 下载 Arxiv 论文并翻译摘要的功能模块 |
|
||||
| crazy_functions\代码重写为全英文_多线程.py | 将Python程序中所有中文转化为英文的功能模块 |
|
||||
| crazy_functions\总结word文档.py | 解析 docx 和 doc 格式的文件,生成文章片段的中英文概述的功能模块 |
|
||||
|
||||
## 根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, theme.py, toolbox.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py, crazy_functions\代码重写为全英文_多线程.py, crazy_functions\总结word文档.py, crazy_functions\批量Markdown翻译.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\批量翻译PDF文档_多线程.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py, crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llm\bridge_all.py, request_llm\bridge_chatglm.py, request_llm\bridge_chatgpt.py, request_llm\bridge_tgui.py)。
|
||||
|
||||
根据以上分析,整个程序是一个集成了多个有用工具和功能的文本处理和生成工具,提供了多种在不同场景下使用的功能,包括但不限于对话生成、文本摘要、PDF文件批量处理、代码翻译和实用工具等。主要的Python模块包括"toolbox.py"、"config.py"、"core_functional.py"和"crazy_functional.py"等,并且还使用了许多第三方库和模块实现相关功能。以下是每个程序文件的功能:
|
||||
|
||||
| 文件名 | 文件功能 |
|
||||
| --- | --- |
|
||||
| check_proxy.py | 用于检查代理的正确性和可用性 |
|
||||
| colorful.py | 包含不同预设置颜色的常量,并用于多种UI元素 |
|
||||
| config.py | 用于全局配置的类 |
|
||||
| config_private.py | 与config.py文件一起使用的另一个配置文件,用于更改私密信息 |
|
||||
| core_functional.py | 包含一些TextFunctional类和基础功能函数 |
|
||||
| crazy_functional.py | 包含大量高级功能函数和实验性的功能函数 |
|
||||
| main.py | 程序的主入口,包含GUI主窗口和主要的UI管理功能 |
|
||||
| theme.py | 包含一些预设置主题的颜色 |
|
||||
| toolbox.py | 提供了一些有用的工具函数 |
|
||||
| crazy_functions\crazy_utils.py | 包含一些用于实现高级功能的辅助函数 |
|
||||
| crazy_functions\Latex全文润色.py | 实现了对LaTeX文件中全文的润色和格式化功能 |
|
||||
| crazy_functions\Latex全文翻译.py | 实现了对LaTeX文件中的内容进行翻译的功能 |
|
||||
| crazy_functions\_\_init\_\_.py | 用于导入crazy_functional.py中的功能函数 |
|
||||
| crazy_functions\下载arxiv论文翻译摘要.py | 从Arxiv上下载论文并提取重要信息 |
|
||||
| crazy_functions\代码重写为全英文_多线程.py | 针对中文Python文件,将其翻译为全英文 |
|
||||
| crazy_functions\总结word文档.py | 提取Word文件的重要内容来生成摘要 |
|
||||
| crazy_functions\批量Markdown翻译.py | 批量翻译Markdown文件 |
|
||||
| crazy_functions\批量总结PDF文档.py | 批量从PDF文件中提取摘要 |
|
||||
| crazy_functions\批量总结PDF文档pdfminer.py | 批量从PDF文件中提取摘要 |
|
||||
| crazy_functions\批量翻译PDF文档_多线程.py | 批量翻译PDF文件 |
|
||||
| crazy_functions\理解PDF文档内容.py | 批量分析PDF文件并提取摘要 |
|
||||
| crazy_functions\生成函数注释.py | 自动生成Python文件中函数的注释 |
|
||||
| crazy_functions\解析项目源代码.py | 解析并分析给定项目的源代码 |
|
||||
| crazy_functions\询问多个大语言模型.py | 向多个大语言模型询问输入文本并进行处理 |
|
||||
| crazy_functions\读文献写摘要.py | 根据用户输入读取文献内容并生成摘要 |
|
||||
| crazy_functions\谷歌检索小助手.py | 利用谷歌学术检索用户提供的论文信息并提取相关信息 |
|
||||
| crazy_functions\高级功能函数模板.py | 实现高级功能的模板函数 |
|
||||
| request_llm\bridge_all.py | 处理与LLM的交互 |
|
||||
| request_llm\bridge_chatglm.py | 使用ChatGLM模型进行聊天 |
|
||||
| request_llm\bridge_chatgpt.py | 实现对话生成的各项功能 |
|
||||
| request_llm\bridge_tgui.py | 在Websockets中与用户进行交互并生成文本输出 |
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
# """
|
||||
# 'primary' for main call-to-action,
|
||||
# 'secondary' for a more subdued style,
|
||||
# 'stop' for a stop button.
|
||||
# """
|
||||
|
||||
|
||||
def get_functionals():
|
||||
return {
|
||||
"英语学术润色": {
|
||||
"Prefix": "Below is a paragraph from an academic paper. Polish the writing to meet the academic style, \
|
||||
improve the spelling, grammar, clarity, concision and overall readability. When neccessary, rewrite the whole sentence. \
|
||||
Furthermore, list all modification and explain the reasons to do so in markdown table.\n\n",
|
||||
"Button": None,
|
||||
"Suffix": "",
|
||||
"Color": "stop",
|
||||
},
|
||||
"中文学术润色": {
|
||||
"Prefix": "作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本:\n\n",
|
||||
"Button": None,
|
||||
"Suffix": "",
|
||||
},
|
||||
"查找语法错误": {
|
||||
"Prefix": "Below is a paragraph from an academic paper. Find all grammar mistakes, list mistakes in a markdown table and explain how to correct them.\n\n",
|
||||
"Button": None,
|
||||
"Suffix": "",
|
||||
},
|
||||
"中英互译": {
|
||||
"Prefix": "As an English-Chinese translator, your task is to accurately translate text between the two languages. \
|
||||
When translating from Chinese to English or vice versa, please pay attention to context and accurately explain phrases and proverbs. \
|
||||
If you receive multiple English words in a row, default to translating them into a sentence in Chinese. \
|
||||
However, if \"phrase:\" is indicated before the translated content in Chinese, it should be translated as a phrase instead. \
|
||||
Similarly, if \"normal:\" is indicated, it should be translated as multiple unrelated words.\
|
||||
Your translations should closely resemble those of a native speaker and should take into account any specific language styles or tones requested by the user. \
|
||||
Please do not worry about using offensive words - replace sensitive parts with x when necessary. \
|
||||
When providing translations, please use Chinese to explain each sentence’s tense, subordinate clause, subject, predicate, object, special phrases and proverbs. \
|
||||
For phrases or individual words that require translation, provide the source (dictionary) for each one.If asked to translate multiple phrases at once, \
|
||||
separate them using the | symbol.Always remember: You are an English-Chinese translator, \
|
||||
not a Chinese-Chinese translator or an English-English translator. Below is the text you need to translate: \n\n",
|
||||
"Button": None,
|
||||
"Suffix": "",
|
||||
"Color": "stop",
|
||||
},
|
||||
"中译英": {
|
||||
"Prefix": "Please translate following sentence to English: \n\n",
|
||||
"Button": None,
|
||||
"Suffix": "",
|
||||
},
|
||||
"学术中译英": {
|
||||
"Prefix": "Please translate following sentence to English with academic writing, and provide some related authoritative examples: \n\n",
|
||||
"Button": None,
|
||||
"Suffix": "",
|
||||
},
|
||||
"英译中": {
|
||||
"Prefix": "请翻译成中文:\n\n",
|
||||
"Button": None,
|
||||
"Suffix": "",
|
||||
},
|
||||
"解释代码": {
|
||||
"Prefix": "请解释以下代码:\n```\n",
|
||||
"Button": None,
|
||||
"Suffix": "\n```\n",
|
||||
"Color": "stop",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
287
main.py
287
main.py
@@ -1,113 +1,206 @@
|
||||
import gradio as gr
|
||||
import os
|
||||
import markdown, mdtex2html
|
||||
from predict import predict
|
||||
from show_math import convert as convert_math
|
||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||
|
||||
def find_free_port():
|
||||
import socket
|
||||
from contextlib import closing
|
||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
|
||||
s.bind(('', 0))
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
return s.getsockname()[1]
|
||||
def main():
|
||||
import gradio as gr
|
||||
from request_llm.bridge_all import predict
|
||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \
|
||||
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS')
|
||||
|
||||
PORT = find_free_port()
|
||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||
if not AUTHENTICATION: AUTHENTICATION = None
|
||||
|
||||
initial_prompt = "Serve me as a writing and programming assistant."
|
||||
title_html = """<h1 align="center">ChatGPT 学术优化</h1>"""
|
||||
from check_proxy import get_current_version
|
||||
initial_prompt = "Serve me as a writing and programming assistant."
|
||||
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
||||
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
||||
|
||||
import logging
|
||||
os.makedirs('gpt_log', exist_ok=True)
|
||||
logging.basicConfig(filename='gpt_log/predict.log', level=logging.INFO)
|
||||
# 问询记录, python 版本建议3.9+(越新越好)
|
||||
import logging
|
||||
os.makedirs("gpt_log", exist_ok=True)
|
||||
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
|
||||
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
|
||||
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
||||
|
||||
# 一些普通功能模块
|
||||
from core_functional import get_core_functions
|
||||
functional = get_core_functions()
|
||||
|
||||
from functional import get_functionals
|
||||
functional = get_functionals()
|
||||
def reset_textbox(): return gr.update(value='')
|
||||
# 高级函数插件
|
||||
from crazy_functional import get_crazy_functions
|
||||
crazy_fns = get_crazy_functions()
|
||||
|
||||
def text_divide_paragraph(text):
|
||||
if '```' in text:
|
||||
# careful input
|
||||
return text
|
||||
else:
|
||||
# wtf input
|
||||
lines = text.split("\n")
|
||||
for i, line in enumerate(lines):
|
||||
if i!=0: lines[i] = "<p>"+lines[i].replace(" ", " ")+"</p>"
|
||||
text = "".join(lines)
|
||||
return text
|
||||
# 处理markdown文本格式的转变
|
||||
gr.Chatbot.postprocess = format_io
|
||||
|
||||
def markdown_convertion(txt):
|
||||
if ('$' in txt) and ('```' not in txt):
|
||||
math_config = {'mdx_math': {'enable_dollar_delimiter': True}}
|
||||
return markdown.markdown(txt,extensions=['fenced_code','tables']) + '<br><br>' + \
|
||||
markdown.markdown(convert_math(txt, splitParagraphs=False),extensions=['fenced_code','tables'])
|
||||
else:
|
||||
return markdown.markdown(txt,extensions=['fenced_code','tables'])
|
||||
# 做一些外观色彩上的调整
|
||||
from theme import adjust_theme, advanced_css
|
||||
set_theme = adjust_theme()
|
||||
|
||||
# math_config = {'mdx_math': {'enable_dollar_delimiter': True}}
|
||||
# markdown.markdown(txt, extensions=['fenced_code', 'tables', 'mdx_math'], extension_configs=math_config)
|
||||
# 代理与自动更新
|
||||
from check_proxy import check_proxy, auto_update, warm_up_modules
|
||||
proxy_info = check_proxy(proxies)
|
||||
|
||||
gr_L1 = lambda: gr.Row().style()
|
||||
gr_L2 = lambda scale: gr.Column(scale=scale)
|
||||
if LAYOUT == "TOP-DOWN":
|
||||
gr_L1 = lambda: DummyWith()
|
||||
gr_L2 = lambda scale: gr.Row()
|
||||
CHATBOT_HEIGHT /= 2
|
||||
|
||||
def format_io(self,y):
|
||||
if y is None:
|
||||
return []
|
||||
i_ask, gpt_reply = y[-1]
|
||||
cancel_handles = []
|
||||
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||
gr.HTML(title_html)
|
||||
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
||||
with gr_L1():
|
||||
with gr_L2(scale=2):
|
||||
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}")
|
||||
chatbot.style(height=CHATBOT_HEIGHT)
|
||||
history = gr.State([])
|
||||
with gr_L2(scale=1):
|
||||
with gr.Accordion("输入区", open=True) as area_input_primary:
|
||||
with gr.Row():
|
||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
||||
with gr.Row():
|
||||
submitBtn = gr.Button("提交", variant="primary")
|
||||
with gr.Row():
|
||||
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
||||
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
||||
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
|
||||
with gr.Row():
|
||||
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
||||
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
||||
with gr.Row():
|
||||
for k in functional:
|
||||
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
||||
functional[k]["Button"] = gr.Button(k, variant=variant)
|
||||
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
||||
with gr.Row():
|
||||
gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
|
||||
with gr.Row():
|
||||
for k in crazy_fns:
|
||||
if not crazy_fns[k].get("AsButton", True): continue
|
||||
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||
crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
|
||||
crazy_fns[k]["Button"].style(size="sm")
|
||||
with gr.Row():
|
||||
with gr.Accordion("更多函数插件", open=True):
|
||||
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
|
||||
with gr.Row():
|
||||
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
|
||||
with gr.Row():
|
||||
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
|
||||
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
|
||||
with gr.Row():
|
||||
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
|
||||
with gr.Row():
|
||||
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
|
||||
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
|
||||
with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN")):
|
||||
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
|
||||
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
||||
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
||||
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="Local LLM MaxLength",)
|
||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
||||
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
||||
|
||||
i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
|
||||
gr.Markdown(description)
|
||||
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
|
||||
with gr.Row():
|
||||
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
|
||||
with gr.Row():
|
||||
submitBtn2 = gr.Button("提交", variant="primary")
|
||||
with gr.Row():
|
||||
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
||||
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
||||
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
||||
# 功能区显示开关与功能区的互动
|
||||
def fn_area_visibility(a):
|
||||
ret = {}
|
||||
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
|
||||
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
|
||||
ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
|
||||
ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
|
||||
ret.update({clearBtn: gr.update(visible=("输入清除键" in a))})
|
||||
ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))})
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
|
||||
if "底部输入区" in a: ret.update({txt: gr.update(value="")})
|
||||
return ret
|
||||
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
|
||||
# 整理反复出现的控件句柄组合
|
||||
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
||||
output_combo = [cookies, chatbot, history, status]
|
||||
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
||||
# 提交按钮、重置按钮
|
||||
cancel_handles.append(txt.submit(**predict_args))
|
||||
cancel_handles.append(txt2.submit(**predict_args))
|
||||
cancel_handles.append(submitBtn.click(**predict_args))
|
||||
cancel_handles.append(submitBtn2.click(**predict_args))
|
||||
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
||||
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
||||
clearBtn.click(lambda: ("",""), None, [txt, txt2])
|
||||
clearBtn2.click(lambda: ("",""), None, [txt, txt2])
|
||||
# 基础功能区的回调函数注册
|
||||
for k in functional:
|
||||
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
||||
cancel_handles.append(click_handle)
|
||||
# 文件上传区,接收文件后与chatbot的互动
|
||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2])
|
||||
# 函数插件-固定按钮区
|
||||
for k in crazy_fns:
|
||||
if not crazy_fns[k].get("AsButton", True): continue
|
||||
click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
|
||||
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
||||
cancel_handles.append(click_handle)
|
||||
# 函数插件-下拉菜单与随变按钮的互动
|
||||
def on_dropdown_changed(k):
|
||||
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
||||
ret = {switchy_bt: gr.update(value=k, variant=variant)}
|
||||
if crazy_fns[k].get("AdvancedArgs", False): # 是否唤起高级插件参数区
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=True, label=f"插件[{k}]的高级参数说明:" + crazy_fns[k].get("ArgsReminder", [f"没有提供高级参数功能说明"]))})
|
||||
else:
|
||||
ret.update({plugin_advanced_arg: gr.update(visible=False, label=f"插件[{k}]不需要高级参数。")})
|
||||
return ret
|
||||
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt, plugin_advanced_arg] )
|
||||
def on_md_dropdown_changed(k):
|
||||
return {chatbot: gr.update(label="当前模型:"+k)}
|
||||
md_dropdown.select(on_md_dropdown_changed, [md_dropdown], [chatbot] )
|
||||
# 随变按钮的回调函数注册
|
||||
def route(k, *args, **kwargs):
|
||||
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
||||
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
|
||||
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
|
||||
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
||||
cancel_handles.append(click_handle)
|
||||
# 终止按钮的回调函数注册
|
||||
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||
|
||||
y[-1] = (
|
||||
None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code','tables']),
|
||||
None if gpt_reply is None else markdown_convertion(gpt_reply)
|
||||
)
|
||||
return y
|
||||
gr.Chatbot.postprocess = format_io
|
||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||
def auto_opentab_delay():
|
||||
import threading, webbrowser, time
|
||||
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
||||
print(f"\t(亮色主题): http://localhost:{PORT}")
|
||||
print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
|
||||
def open():
|
||||
time.sleep(2) # 打开浏览器
|
||||
webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
|
||||
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
||||
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
||||
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
gr.HTML(title_html)
|
||||
with gr.Row():
|
||||
with gr.Column(scale=2):
|
||||
chatbot = gr.Chatbot()
|
||||
chatbot.style(height=700)
|
||||
chatbot.style()
|
||||
history = gr.State([])
|
||||
TRUE = gr.State(True)
|
||||
FALSE = gr.State(False)
|
||||
with gr.Column(scale=1):
|
||||
with gr.Row():
|
||||
with gr.Column(scale=12):
|
||||
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
|
||||
with gr.Column(scale=1):
|
||||
submitBtn = gr.Button("Ask", variant="primary")
|
||||
with gr.Row():
|
||||
for k in functional:
|
||||
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
||||
functional[k]["Button"] = gr.Button(k, variant=variant)
|
||||
auto_opentab_delay()
|
||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
|
||||
|
||||
statusDisplay = gr.Markdown("status: ready")
|
||||
systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt).style(container=True)
|
||||
#inputs, top_p, temperature, top_k, repetition_penalty
|
||||
with gr.Accordion("arguments", open=False):
|
||||
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
||||
temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
||||
# 如果需要在二级路径下运行
|
||||
# CUSTOM_PATH, = get_conf('CUSTOM_PATH')
|
||||
# if CUSTOM_PATH != "/":
|
||||
# from toolbox import run_gradio_in_subpath
|
||||
# run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
|
||||
# else:
|
||||
# demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
|
||||
|
||||
txt.submit(predict, [txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay])
|
||||
submitBtn.click(predict, [txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True)
|
||||
# submitBtn.click(reset_textbox, [], [txt])
|
||||
for k in functional:
|
||||
functional[k]["Button"].click(predict,
|
||||
[txt, top_p, temperature, chatbot,history, systemPromptTxt, FALSE, TRUE, gr.State(k)], [chatbot, history, statusDisplay], show_progress=True)
|
||||
|
||||
print(f"URL http://localhost:{PORT}")
|
||||
demo.title = "ChatGPT 学术优化"
|
||||
|
||||
def auto_opentab_delay():
|
||||
import threading, webbrowser, time
|
||||
def open(): time.sleep(2)
|
||||
webbrowser.open_new_tab(f'http://localhost:{PORT}')
|
||||
t = threading.Thread(target=open)
|
||||
t.daemon = True; t.start()
|
||||
|
||||
auto_opentab_delay()
|
||||
demo.queue().launch(server_name="0.0.0.0", share=True, server_port=PORT)
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
134
predict.py
134
predict.py
@@ -1,134 +0,0 @@
|
||||
import json
|
||||
import gradio as gr
|
||||
import logging
|
||||
import traceback
|
||||
import requests
|
||||
import importlib
|
||||
import os
|
||||
|
||||
if os.path.exists('config_private.py'):
|
||||
# 放自己的秘密如API和代理网址
|
||||
from config_private import proxies, API_URL, API_KEY
|
||||
else:
|
||||
from config import proxies, API_URL, API_KEY
|
||||
|
||||
|
||||
|
||||
def compose_system(system_prompt):
|
||||
return {"role": "system", "content": system_prompt}
|
||||
|
||||
|
||||
def compose_user(user_input):
|
||||
return {"role": "user", "content": user_input}
|
||||
|
||||
|
||||
def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='', retry=False,
|
||||
stream = True, additional_fn=None):
|
||||
|
||||
if additional_fn is not None:
|
||||
import functional
|
||||
importlib.reload(functional)
|
||||
functional = functional.get_functionals()
|
||||
inputs = functional[additional_fn]["Prefix"] + inputs + functional[additional_fn]["Suffix"]
|
||||
|
||||
if stream:
|
||||
raw_input = inputs
|
||||
logging.info(f'[raw_input] {raw_input}')
|
||||
chatbot.append((inputs, ""))
|
||||
yield chatbot, history, "Waiting"
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {API_KEY}"
|
||||
}
|
||||
|
||||
chat_counter = len(history) // 2
|
||||
|
||||
print(f"chat_counter - {chat_counter}")
|
||||
|
||||
messages = [compose_system(system_prompt)]
|
||||
if chat_counter:
|
||||
for index in range(0, 2*chat_counter, 2):
|
||||
d1 = {}
|
||||
d1["role"] = "user"
|
||||
d1["content"] = history[index]
|
||||
d2 = {}
|
||||
d2["role"] = "assistant"
|
||||
d2["content"] = history[index+1]
|
||||
if d1["content"] != "":
|
||||
if d2["content"] != "" or retry:
|
||||
messages.append(d1)
|
||||
messages.append(d2)
|
||||
else:
|
||||
messages[-1]['content'] = d2['content']
|
||||
if retry and chat_counter:
|
||||
messages.pop()
|
||||
else:
|
||||
temp3 = {}
|
||||
temp3["role"] = "user"
|
||||
temp3["content"] = inputs
|
||||
messages.append(temp3)
|
||||
chat_counter += 1
|
||||
# messages
|
||||
payload = {
|
||||
"model": "gpt-3.5-turbo",
|
||||
# "model": "gpt-4",
|
||||
"messages": messages,
|
||||
"temperature": temperature, # 1.0,
|
||||
"top_p": top_p, # 1.0,
|
||||
"n": 1,
|
||||
"stream": stream,
|
||||
"presence_penalty": 0,
|
||||
"frequency_penalty": 0,
|
||||
}
|
||||
|
||||
history.append(inputs)
|
||||
|
||||
try:
|
||||
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
||||
response = requests.post(API_URL, headers=headers, proxies=proxies,
|
||||
json=payload, stream=True, timeout=15)
|
||||
except:
|
||||
chatbot[-1] = ((chatbot[-1][0], 'Requests Timeout, Network Error.'))
|
||||
yield chatbot, history, "Requests Timeout"
|
||||
raise TimeoutError
|
||||
|
||||
token_counter = 0
|
||||
partial_words = ""
|
||||
|
||||
counter = 0
|
||||
if stream:
|
||||
stream_response = response.iter_lines()
|
||||
while True:
|
||||
chunk = next(stream_response)
|
||||
# print(chunk)
|
||||
|
||||
if chunk == b'data: [DONE]':
|
||||
break
|
||||
|
||||
if counter == 0:
|
||||
counter += 1
|
||||
continue
|
||||
counter += 1
|
||||
# check whether each line is non-empty
|
||||
if chunk:
|
||||
# decode each line as response data is in bytes
|
||||
try:
|
||||
if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
|
||||
logging.info(f'[response] {chatbot[-1][-1]}')
|
||||
break
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
|
||||
chunkjson = json.loads(chunk.decode()[6:])
|
||||
status_text = f"id: {chunkjson['id']}, finish_reason: {chunkjson['choices'][0]['finish_reason']}"
|
||||
partial_words = partial_words + \
|
||||
json.loads(chunk.decode()[6:])[
|
||||
'choices'][0]["delta"]["content"]
|
||||
if token_counter == 0:
|
||||
history.append(" " + partial_words)
|
||||
else:
|
||||
history[-1] = partial_words
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
token_counter += 1
|
||||
yield chatbot, history, status_text
|
||||
54
request_llm/README.md
普通文件
54
request_llm/README.md
普通文件
@@ -0,0 +1,54 @@
|
||||
# 如何使用其他大语言模型
|
||||
|
||||
## ChatGLM
|
||||
|
||||
- 安装依赖 `pip install -r request_llm/requirements_chatglm.txt`
|
||||
- 修改配置,在config.py中将LLM_MODEL的值改为"chatglm"
|
||||
|
||||
``` sh
|
||||
LLM_MODEL = "chatglm"
|
||||
```
|
||||
- 运行!
|
||||
``` sh
|
||||
`python main.py`
|
||||
```
|
||||
|
||||
|
||||
---
|
||||
## Text-Generation-UI (TGUI,调试中,暂不可用)
|
||||
|
||||
### 1. 部署TGUI
|
||||
``` sh
|
||||
# 1 下载模型
|
||||
git clone https://github.com/oobabooga/text-generation-webui.git
|
||||
# 2 这个仓库的最新代码有问题,回滚到几周之前
|
||||
git reset --hard fcda3f87767e642d1c0411776e549e1d3894843d
|
||||
# 3 切换路径
|
||||
cd text-generation-webui
|
||||
# 4 安装text-generation的额外依赖
|
||||
pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers
|
||||
# 5 下载模型
|
||||
python download-model.py facebook/galactica-1.3b
|
||||
# 其他可选如 facebook/opt-1.3b
|
||||
# facebook/galactica-1.3b
|
||||
# facebook/galactica-6.7b
|
||||
# facebook/galactica-120b
|
||||
# facebook/pygmalion-1.3b 等
|
||||
# 详情见 https://github.com/oobabooga/text-generation-webui
|
||||
|
||||
# 6 启动text-generation
|
||||
python server.py --cpu --listen --listen-port 7865 --model facebook_galactica-1.3b
|
||||
```
|
||||
|
||||
### 2. 修改config.py
|
||||
|
||||
``` sh
|
||||
# LLM_MODEL格式: tgui:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致
|
||||
LLM_MODEL = "tgui:galactica-1.3b@localhost:7860"
|
||||
```
|
||||
|
||||
### 3. 运行!
|
||||
``` sh
|
||||
cd chatgpt-academic
|
||||
python main.py
|
||||
```
|
||||
240
request_llm/bridge_all.py
普通文件
240
request_llm/bridge_all.py
普通文件
@@ -0,0 +1,240 @@
|
||||
|
||||
"""
|
||||
该文件中主要包含2个函数,是所有LLM的通用接口,它们会继续向下调用更底层的LLM模型,处理多模型并行等细节
|
||||
|
||||
不具备多线程能力的函数:正常对话时使用,具备完备的交互功能,不可多线程
|
||||
1. predict(...)
|
||||
|
||||
具备多线程调用能力的函数:在函数插件中被调用,灵活而简洁
|
||||
2. predict_no_ui_long_connection(...)
|
||||
"""
|
||||
import tiktoken
|
||||
from functools import lru_cache
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from toolbox import get_conf, trimmed_format_exc
|
||||
|
||||
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
||||
from .bridge_chatgpt import predict as chatgpt_ui
|
||||
|
||||
from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
|
||||
from .bridge_chatglm import predict as chatglm_ui
|
||||
|
||||
from .bridge_newbing import predict_no_ui_long_connection as newbing_noui
|
||||
from .bridge_newbing import predict as newbing_ui
|
||||
|
||||
# from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
|
||||
# from .bridge_tgui import predict as tgui_ui
|
||||
|
||||
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
||||
|
||||
class LazyloadTiktoken(object):
|
||||
def __init__(self, model):
|
||||
self.model = model
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=128)
|
||||
def get_encoder(model):
|
||||
print('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数')
|
||||
tmp = tiktoken.encoding_for_model(model)
|
||||
print('加载tokenizer完毕')
|
||||
return tmp
|
||||
|
||||
def encode(self, *args, **kwargs):
|
||||
encoder = self.get_encoder(self.model)
|
||||
return encoder.encode(*args, **kwargs)
|
||||
|
||||
def decode(self, *args, **kwargs):
|
||||
encoder = self.get_encoder(self.model)
|
||||
return encoder.decode(*args, **kwargs)
|
||||
|
||||
# Endpoint 重定向
|
||||
API_URL_REDIRECT, = get_conf("API_URL_REDIRECT")
|
||||
openai_endpoint = "https://api.openai.com/v1/chat/completions"
|
||||
api2d_endpoint = "https://openai.api2d.net/v1/chat/completions"
|
||||
newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
|
||||
# 兼容旧版的配置
|
||||
try:
|
||||
API_URL, = get_conf("API_URL")
|
||||
if API_URL != "https://api.openai.com/v1/chat/completions":
|
||||
openai_endpoint = API_URL
|
||||
print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
|
||||
except:
|
||||
pass
|
||||
# 新版配置
|
||||
if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint]
|
||||
if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint]
|
||||
if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint]
|
||||
|
||||
|
||||
# 获取tokenizer
|
||||
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
||||
tokenizer_gpt4 = LazyloadTiktoken("gpt-4")
|
||||
get_token_num_gpt35 = lambda txt: len(tokenizer_gpt35.encode(txt, disallowed_special=()))
|
||||
get_token_num_gpt4 = lambda txt: len(tokenizer_gpt4.encode(txt, disallowed_special=()))
|
||||
|
||||
|
||||
model_info = {
|
||||
# openai
|
||||
"gpt-3.5-turbo": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
"gpt-4": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 8192,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
# api_2d
|
||||
"api2d-gpt-3.5-turbo": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"endpoint": api2d_endpoint,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
"api2d-gpt-4": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"endpoint": api2d_endpoint,
|
||||
"max_token": 8192,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
# chatglm
|
||||
"chatglm": {
|
||||
"fn_with_ui": chatglm_ui,
|
||||
"fn_without_ui": chatglm_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 1024,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
# newbing
|
||||
"newbing": {
|
||||
"fn_with_ui": newbing_ui,
|
||||
"fn_without_ui": newbing_noui,
|
||||
"endpoint": newbing_endpoint,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def LLM_CATCH_EXCEPTION(f):
|
||||
"""
|
||||
装饰器函数,将错误显示出来
|
||||
"""
|
||||
def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience):
|
||||
try:
|
||||
return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||
except Exception as e:
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
observe_window[0] = tb_str
|
||||
return tb_str
|
||||
return decorated
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
||||
"""
|
||||
发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||
inputs:
|
||||
是本次问询的输入
|
||||
sys_prompt:
|
||||
系统静默prompt
|
||||
llm_kwargs:
|
||||
LLM的内部调优参数
|
||||
history:
|
||||
是之前的对话列表
|
||||
observe_window = None:
|
||||
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||
"""
|
||||
import threading, time, copy
|
||||
|
||||
model = llm_kwargs['llm_model']
|
||||
n_model = 1
|
||||
if '&' not in model:
|
||||
assert not model.startswith("tgui"), "TGUI不支持函数插件的实现"
|
||||
|
||||
# 如果只询问1个大语言模型:
|
||||
method = model_info[model]["fn_without_ui"]
|
||||
return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience)
|
||||
else:
|
||||
# 如果同时询问多个大语言模型:
|
||||
executor = ThreadPoolExecutor(max_workers=4)
|
||||
models = model.split('&')
|
||||
n_model = len(models)
|
||||
|
||||
window_len = len(observe_window)
|
||||
assert window_len==3
|
||||
window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
|
||||
|
||||
futures = []
|
||||
for i in range(n_model):
|
||||
model = models[i]
|
||||
method = model_info[model]["fn_without_ui"]
|
||||
llm_kwargs_feedin = copy.deepcopy(llm_kwargs)
|
||||
llm_kwargs_feedin['llm_model'] = model
|
||||
future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience)
|
||||
futures.append(future)
|
||||
|
||||
def mutex_manager(window_mutex, observe_window):
|
||||
while True:
|
||||
time.sleep(0.25)
|
||||
if not window_mutex[-1]: break
|
||||
# 看门狗(watchdog)
|
||||
for i in range(n_model):
|
||||
window_mutex[i][1] = observe_window[1]
|
||||
# 观察窗(window)
|
||||
chat_string = []
|
||||
for i in range(n_model):
|
||||
chat_string.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {window_mutex[i][0]} </font>" )
|
||||
res = '<br/><br/>\n\n---\n\n'.join(chat_string)
|
||||
# # # # # # # # # # #
|
||||
observe_window[0] = res
|
||||
|
||||
t_model = threading.Thread(target=mutex_manager, args=(window_mutex, observe_window), daemon=True)
|
||||
t_model.start()
|
||||
|
||||
return_string_collect = []
|
||||
while True:
|
||||
worker_done = [h.done() for h in futures]
|
||||
if all(worker_done):
|
||||
executor.shutdown()
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
for i, future in enumerate(futures): # wait and get
|
||||
return_string_collect.append( f"【{str(models[i])} 说】: <font color=\"{colors[i]}\"> {future.result()} </font>" )
|
||||
|
||||
window_mutex[-1] = False # stop mutex thread
|
||||
res = '<br/><br/>\n\n---\n\n'.join(return_string_collect)
|
||||
return res
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, *args, **kwargs):
|
||||
"""
|
||||
发送至LLM,流式获取输出。
|
||||
用于基础的对话功能。
|
||||
inputs 是本次问询的输入
|
||||
top_p, temperature是LLM的内部调优参数
|
||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||
"""
|
||||
|
||||
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"]
|
||||
yield from method(inputs, llm_kwargs, *args, **kwargs)
|
||||
|
||||
154
request_llm/bridge_chatglm.py
普通文件
154
request_llm/bridge_chatglm.py
普通文件
@@ -0,0 +1,154 @@
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
import time
|
||||
import importlib
|
||||
from toolbox import update_ui, get_conf
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||
|
||||
#################################################################################
|
||||
class GetGLMHandle(Process):
|
||||
def __init__(self):
|
||||
super().__init__(daemon=True)
|
||||
self.parent, self.child = Pipe()
|
||||
self.chatglm_model = None
|
||||
self.chatglm_tokenizer = None
|
||||
self.info = ""
|
||||
self.success = True
|
||||
self.check_dependency()
|
||||
self.start()
|
||||
|
||||
def check_dependency(self):
|
||||
try:
|
||||
import sentencepiece
|
||||
self.info = "依赖检测通过"
|
||||
self.success = True
|
||||
except:
|
||||
self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。"
|
||||
self.success = False
|
||||
|
||||
def ready(self):
|
||||
return self.chatglm_model is not None
|
||||
|
||||
def run(self):
|
||||
# 子进程执行
|
||||
# 第一次运行,加载参数
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
if self.chatglm_model is None:
|
||||
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
|
||||
device, = get_conf('LOCAL_MODEL_DEVICE')
|
||||
if device=='cpu':
|
||||
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
|
||||
else:
|
||||
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
|
||||
self.chatglm_model = self.chatglm_model.eval()
|
||||
break
|
||||
else:
|
||||
break
|
||||
except:
|
||||
retry += 1
|
||||
if retry > 3:
|
||||
self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。')
|
||||
raise RuntimeError("不能正常加载ChatGLM的参数!")
|
||||
|
||||
while True:
|
||||
# 进入任务等待状态
|
||||
kwargs = self.child.recv()
|
||||
# 收到消息,开始请求
|
||||
try:
|
||||
for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
|
||||
self.child.send(response)
|
||||
# # 中途接收可能的终止指令(如果有的话)
|
||||
# if self.child.poll():
|
||||
# command = self.child.recv()
|
||||
# if command == '[Terminate]': break
|
||||
except:
|
||||
self.child.send('[Local Message] Call ChatGLM fail.')
|
||||
# 请求处理结束,开始下一个循环
|
||||
self.child.send('[Finish]')
|
||||
|
||||
def stream_chat(self, **kwargs):
|
||||
# 主进程执行
|
||||
self.parent.send(kwargs)
|
||||
while True:
|
||||
res = self.parent.recv()
|
||||
if res != '[Finish]':
|
||||
yield res
|
||||
else:
|
||||
break
|
||||
return
|
||||
|
||||
global glm_handle
|
||||
glm_handle = None
|
||||
#################################################################################
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llm/bridge_all.py
|
||||
"""
|
||||
global glm_handle
|
||||
if glm_handle is None:
|
||||
glm_handle = GetGLMHandle()
|
||||
observe_window[0] = load_message + "\n\n" + glm_handle.info
|
||||
if not glm_handle.success:
|
||||
error = glm_handle.info
|
||||
glm_handle = None
|
||||
raise RuntimeError(error)
|
||||
|
||||
# chatglm 没有 sys_prompt 接口,因此把prompt加入 history
|
||||
history_feedin = []
|
||||
history_feedin.append(["What can I do?", sys_prompt])
|
||||
for i in range(len(history)//2):
|
||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||
|
||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||
response = ""
|
||||
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
observe_window[0] = response
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("程序终止。")
|
||||
return response
|
||||
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
单线程方法
|
||||
函数的说明请见 request_llm/bridge_all.py
|
||||
"""
|
||||
chatbot.append((inputs, ""))
|
||||
|
||||
global glm_handle
|
||||
if glm_handle is None:
|
||||
glm_handle = GetGLMHandle()
|
||||
chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info)
|
||||
yield from update_ui(chatbot=chatbot, history=[])
|
||||
if not glm_handle.success:
|
||||
glm_handle = None
|
||||
return
|
||||
|
||||
if additional_fn is not None:
|
||||
import core_functional
|
||||
importlib.reload(core_functional) # 热更新prompt
|
||||
core_functional = core_functional.get_core_functions()
|
||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||
|
||||
# 处理历史信息
|
||||
history_feedin = []
|
||||
history_feedin.append(["What can I do?", system_prompt] )
|
||||
for i in range(len(history)//2):
|
||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||
|
||||
# 开始接收chatglm的回复
|
||||
for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
# 总结输出
|
||||
history.extend([inputs, response])
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
277
request_llm/bridge_chatgpt.py
普通文件
277
request_llm/bridge_chatgpt.py
普通文件
@@ -0,0 +1,277 @@
|
||||
# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目
|
||||
|
||||
"""
|
||||
该文件中主要包含三个函数
|
||||
|
||||
不具备多线程能力的函数:
|
||||
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
||||
|
||||
具备多线程调用能力的函数
|
||||
2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑
|
||||
3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
import gradio as gr
|
||||
import logging
|
||||
import traceback
|
||||
import requests
|
||||
import importlib
|
||||
|
||||
# config_private.py放自己的秘密如API和代理网址
|
||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc
|
||||
proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \
|
||||
get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY')
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||
|
||||
def get_full_error(chunk, stream_response):
|
||||
"""
|
||||
获取完整的从Openai返回的报错
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
chunk += next(stream_response)
|
||||
except:
|
||||
break
|
||||
return chunk
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||
"""
|
||||
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||
inputs:
|
||||
是本次问询的输入
|
||||
sys_prompt:
|
||||
系统静默prompt
|
||||
llm_kwargs:
|
||||
chatGPT的内部调优参数
|
||||
history:
|
||||
是之前的对话列表
|
||||
observe_window = None:
|
||||
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||
"""
|
||||
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
# make a POST request to the API endpoint, stream=False
|
||||
from .bridge_all import model_info
|
||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
||||
except requests.exceptions.ReadTimeout as e:
|
||||
retry += 1
|
||||
traceback.print_exc()
|
||||
if retry > MAX_RETRY: raise TimeoutError
|
||||
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||
|
||||
stream_response = response.iter_lines()
|
||||
result = ''
|
||||
while True:
|
||||
try: chunk = next(stream_response).decode()
|
||||
except StopIteration:
|
||||
break
|
||||
except requests.exceptions.ConnectionError:
|
||||
chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。
|
||||
if len(chunk)==0: continue
|
||||
if not chunk.startswith('data:'):
|
||||
error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode()
|
||||
if "reduce the length" in error_msg:
|
||||
raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg)
|
||||
else:
|
||||
raise RuntimeError("OpenAI拒绝了请求:" + error_msg)
|
||||
if ('data: [DONE]' in chunk): break # api2d 正常完成
|
||||
json_data = json.loads(chunk.lstrip('data:'))['choices'][0]
|
||||
delta = json_data["delta"]
|
||||
if len(delta) == 0: break
|
||||
if "role" in delta: continue
|
||||
if "content" in delta:
|
||||
result += delta["content"]
|
||||
if not console_slience: print(delta["content"], end='')
|
||||
if observe_window is not None:
|
||||
# 观测窗,把已经获取的数据显示出去
|
||||
if len(observe_window) >= 1: observe_window[0] += delta["content"]
|
||||
# 看门狗,如果超过期限没有喂狗,则终止
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("用户取消了程序。")
|
||||
else: raise RuntimeError("意外Json结构:"+delta)
|
||||
if json_data['finish_reason'] == 'length':
|
||||
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
|
||||
return result
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
发送至chatGPT,流式获取输出。
|
||||
用于基础的对话功能。
|
||||
inputs 是本次问询的输入
|
||||
top_p, temperature是chatGPT的内部调优参数
|
||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||
"""
|
||||
if is_any_api_key(inputs):
|
||||
chatbot._cookies['api_key'] = inputs
|
||||
chatbot.append(("输入已识别为openai的api_key", what_keys(inputs)))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
|
||||
return
|
||||
elif not is_any_api_key(chatbot._cookies['api_key']):
|
||||
chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
|
||||
return
|
||||
|
||||
if additional_fn is not None:
|
||||
import core_functional
|
||||
importlib.reload(core_functional) # 热更新prompt
|
||||
core_functional = core_functional.get_core_functions()
|
||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||
|
||||
raw_input = inputs
|
||||
logging.info(f'[raw_input] {raw_input}')
|
||||
chatbot.append((inputs, ""))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||
|
||||
try:
|
||||
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||
except RuntimeError as e:
|
||||
chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
|
||||
return
|
||||
|
||||
history.append(inputs); history.append("")
|
||||
|
||||
retry = 0
|
||||
while True:
|
||||
try:
|
||||
# make a POST request to the API endpoint, stream=True
|
||||
from .bridge_all import model_info
|
||||
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||
except:
|
||||
retry += 1
|
||||
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
||||
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
||||
if retry > MAX_RETRY: raise TimeoutError
|
||||
|
||||
gpt_replying_buffer = ""
|
||||
|
||||
is_head_of_the_stream = True
|
||||
if stream:
|
||||
stream_response = response.iter_lines()
|
||||
while True:
|
||||
chunk = next(stream_response)
|
||||
# print(chunk.decode()[6:])
|
||||
if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()):
|
||||
# 数据流的第一帧不携带content
|
||||
is_head_of_the_stream = False; continue
|
||||
|
||||
if chunk:
|
||||
try:
|
||||
chunk_decoded = chunk.decode()
|
||||
# 前者API2D的
|
||||
if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
|
||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||
logging.info(f'[response] {gpt_replying_buffer}')
|
||||
break
|
||||
# 处理数据流的主体
|
||||
chunkjson = json.loads(chunk_decoded[6:])
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
|
||||
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
||||
gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"]
|
||||
history[-1] = gpt_replying_buffer
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
||||
chunk = get_full_error(chunk, stream_response)
|
||||
chunk_decoded = chunk.decode()
|
||||
error_msg = chunk_decoded
|
||||
if "reduce the length" in error_msg:
|
||||
if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
|
||||
history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
|
||||
max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
|
||||
# history = [] # 清除历史
|
||||
elif "does not exist" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
|
||||
elif "Incorrect API key" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务.")
|
||||
elif "exceeded your current quota" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务.")
|
||||
elif "bad forward key" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
|
||||
elif "Not enough point" in error_msg:
|
||||
chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.")
|
||||
else:
|
||||
from toolbox import regular_txt_to_markdown
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded[4:])}")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
||||
return
|
||||
|
||||
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||
"""
|
||||
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
||||
"""
|
||||
if not is_any_api_key(llm_kwargs['api_key']):
|
||||
raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
|
||||
|
||||
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {api_key}"
|
||||
}
|
||||
|
||||
conversation_cnt = len(history) // 2
|
||||
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2*conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
what_i_have_asked["role"] = "user"
|
||||
what_i_have_asked["content"] = history[index]
|
||||
what_gpt_answer = {}
|
||||
what_gpt_answer["role"] = "assistant"
|
||||
what_gpt_answer["content"] = history[index+1]
|
||||
if what_i_have_asked["content"] != "":
|
||||
if what_gpt_answer["content"] == "": continue
|
||||
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
else:
|
||||
messages[-1]['content'] = what_gpt_answer['content']
|
||||
|
||||
what_i_ask_now = {}
|
||||
what_i_ask_now["role"] = "user"
|
||||
what_i_ask_now["content"] = inputs
|
||||
messages.append(what_i_ask_now)
|
||||
|
||||
payload = {
|
||||
"model": llm_kwargs['llm_model'].strip('api2d-'),
|
||||
"messages": messages,
|
||||
"temperature": llm_kwargs['temperature'], # 1.0,
|
||||
"top_p": llm_kwargs['top_p'], # 1.0,
|
||||
"n": 1,
|
||||
"stream": stream,
|
||||
"presence_penalty": 0,
|
||||
"frequency_penalty": 0,
|
||||
}
|
||||
try:
|
||||
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
|
||||
except:
|
||||
print('输入中可能存在乱码。')
|
||||
return headers,payload
|
||||
|
||||
|
||||
251
request_llm/bridge_newbing.py
普通文件
251
request_llm/bridge_newbing.py
普通文件
@@ -0,0 +1,251 @@
|
||||
"""
|
||||
========================================================================
|
||||
第一部分:来自EdgeGPT.py
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
========================================================================
|
||||
"""
|
||||
from .edge_gpt import NewbingChatbot
|
||||
load_message = "等待NewBing响应。"
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
第二部分:子进程Worker(调用主体)
|
||||
========================================================================
|
||||
"""
|
||||
import time
|
||||
import json
|
||||
import re
|
||||
import asyncio
|
||||
import importlib
|
||||
import threading
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
def preprocess_newbing_out(s):
|
||||
pattern = r'\^(\d+)\^' # 匹配^数字^
|
||||
sub = lambda m: '\['+m.group(1)+'\]' # 将匹配到的数字作为替换值
|
||||
result = re.sub(pattern, sub, s) # 替换操作
|
||||
if '[1]' in result:
|
||||
result += '\n\n```\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
|
||||
return result
|
||||
|
||||
def preprocess_newbing_out_simple(result):
|
||||
if '[1]' in result:
|
||||
result += '\n\n```\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
|
||||
return result
|
||||
|
||||
class NewBingHandle(Process):
|
||||
def __init__(self):
|
||||
super().__init__(daemon=True)
|
||||
self.parent, self.child = Pipe()
|
||||
self.newbing_model = None
|
||||
self.info = ""
|
||||
self.success = True
|
||||
self.local_history = []
|
||||
self.check_dependency()
|
||||
self.start()
|
||||
self.threadLock = threading.Lock()
|
||||
|
||||
def check_dependency(self):
|
||||
try:
|
||||
self.success = False
|
||||
import certifi, httpx, rich
|
||||
self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。"
|
||||
self.success = True
|
||||
except:
|
||||
self.info = "缺少的依赖,如果要使用Newbing,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。"
|
||||
self.success = False
|
||||
|
||||
def ready(self):
|
||||
return self.newbing_model is not None
|
||||
|
||||
async def async_run(self):
|
||||
# 读取配置
|
||||
NEWBING_STYLE, = get_conf('NEWBING_STYLE')
|
||||
from request_llm.bridge_all import model_info
|
||||
endpoint = model_info['newbing']['endpoint']
|
||||
while True:
|
||||
# 等待
|
||||
kwargs = self.child.recv()
|
||||
question=kwargs['query']
|
||||
history=kwargs['history']
|
||||
system_prompt=kwargs['system_prompt']
|
||||
|
||||
# 是否重置
|
||||
if len(self.local_history) > 0 and len(history)==0:
|
||||
await self.newbing_model.reset()
|
||||
self.local_history = []
|
||||
|
||||
# 开始问问题
|
||||
prompt = ""
|
||||
if system_prompt not in self.local_history:
|
||||
self.local_history.append(system_prompt)
|
||||
prompt += system_prompt + '\n'
|
||||
|
||||
# 追加历史
|
||||
for ab in history:
|
||||
a, b = ab
|
||||
if a not in self.local_history:
|
||||
self.local_history.append(a)
|
||||
prompt += a + '\n'
|
||||
# if b not in self.local_history:
|
||||
# self.local_history.append(b)
|
||||
# prompt += b + '\n'
|
||||
|
||||
# 问题
|
||||
prompt += question
|
||||
self.local_history.append(question)
|
||||
print('question:', prompt)
|
||||
# 提交
|
||||
async for final, response in self.newbing_model.ask_stream(
|
||||
prompt=question,
|
||||
conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
|
||||
wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
|
||||
):
|
||||
if not final:
|
||||
print(response)
|
||||
self.child.send(str(response))
|
||||
else:
|
||||
print('-------- receive final ---------')
|
||||
self.child.send('[Finish]')
|
||||
# self.local_history.append(response)
|
||||
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
这个函数运行在子进程
|
||||
"""
|
||||
# 第一次运行,加载参数
|
||||
self.success = False
|
||||
self.local_history = []
|
||||
if (self.newbing_model is None) or (not self.success):
|
||||
# 代理设置
|
||||
proxies, = get_conf('proxies')
|
||||
if proxies is None:
|
||||
self.proxies_https = None
|
||||
else:
|
||||
self.proxies_https = proxies['https']
|
||||
# cookie
|
||||
NEWBING_COOKIES, = get_conf('NEWBING_COOKIES')
|
||||
try:
|
||||
cookies = json.loads(NEWBING_COOKIES)
|
||||
except:
|
||||
self.success = False
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] 不能加载Newbing组件。NEWBING_COOKIES未填写或有格式错误。')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
raise RuntimeError(f"不能加载Newbing组件。NEWBING_COOKIES未填写或有格式错误。")
|
||||
|
||||
try:
|
||||
self.newbing_model = NewbingChatbot(proxy=self.proxies_https, cookies=cookies)
|
||||
except:
|
||||
self.success = False
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] 不能加载Newbing组件。{tb_str}')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
raise RuntimeError(f"不能加载Newbing组件。")
|
||||
|
||||
self.success = True
|
||||
try:
|
||||
# 进入任务等待状态
|
||||
asyncio.run(self.async_run())
|
||||
except Exception:
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
|
||||
def stream_chat(self, **kwargs):
|
||||
"""
|
||||
这个函数运行在主进程
|
||||
"""
|
||||
self.threadLock.acquire()
|
||||
self.parent.send(kwargs) # 发送请求到子进程
|
||||
while True:
|
||||
res = self.parent.recv() # 等待newbing回复的片段
|
||||
if res == '[Finish]':
|
||||
break # 结束
|
||||
elif res == '[Fail]':
|
||||
self.success = False
|
||||
break
|
||||
else:
|
||||
yield res # newbing回复的片段
|
||||
self.threadLock.release()
|
||||
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
第三部分:主进程统一调用函数接口
|
||||
========================================================================
|
||||
"""
|
||||
global newbing_handle
|
||||
newbing_handle = None
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llm/bridge_all.py
|
||||
"""
|
||||
global newbing_handle
|
||||
if (newbing_handle is None) or (not newbing_handle.success):
|
||||
newbing_handle = NewBingHandle()
|
||||
observe_window[0] = load_message + "\n\n" + newbing_handle.info
|
||||
if not newbing_handle.success:
|
||||
error = newbing_handle.info
|
||||
newbing_handle = None
|
||||
raise RuntimeError(error)
|
||||
|
||||
# 没有 sys_prompt 接口,因此把prompt加入 history
|
||||
history_feedin = []
|
||||
for i in range(len(history)//2):
|
||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||
|
||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||
response = ""
|
||||
observe_window[0] = "[Local Message]: 等待NewBing响应中 ..."
|
||||
for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
observe_window[0] = preprocess_newbing_out_simple(response)
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("程序终止。")
|
||||
return preprocess_newbing_out_simple(response)
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
单线程方法
|
||||
函数的说明请见 request_llm/bridge_all.py
|
||||
"""
|
||||
chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ..."))
|
||||
|
||||
global newbing_handle
|
||||
if (newbing_handle is None) or (not newbing_handle.success):
|
||||
newbing_handle = NewBingHandle()
|
||||
chatbot[-1] = (inputs, load_message + "\n\n" + newbing_handle.info)
|
||||
yield from update_ui(chatbot=chatbot, history=[])
|
||||
if not newbing_handle.success:
|
||||
newbing_handle = None
|
||||
return
|
||||
|
||||
if additional_fn is not None:
|
||||
import core_functional
|
||||
importlib.reload(core_functional) # 热更新prompt
|
||||
core_functional = core_functional.get_core_functions()
|
||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||
|
||||
history_feedin = []
|
||||
for i in range(len(history)//2):
|
||||
history_feedin.append([history[2*i], history[2*i+1]] )
|
||||
|
||||
chatbot[-1] = (inputs, "[Local Message]: 等待NewBing响应中 ...")
|
||||
response = "[Local Message]: 等待NewBing响应中 ..."
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
||||
for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
chatbot[-1] = (inputs, preprocess_newbing_out(response))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
||||
if response == "[Local Message]: 等待NewBing响应中 ...": response = "[Local Message]: NewBing响应异常,请刷新界面重试 ..."
|
||||
history.extend([inputs, response])
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
|
||||
|
||||
171
request_llm/bridge_tgui.py
普通文件
171
request_llm/bridge_tgui.py
普通文件
@@ -0,0 +1,171 @@
|
||||
'''
|
||||
Contributed by SagsMug. Modified by binary-husky
|
||||
https://github.com/oobabooga/text-generation-webui/pull/175
|
||||
'''
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
import websockets
|
||||
import logging
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
from toolbox import get_conf, update_ui
|
||||
|
||||
|
||||
def random_hash():
|
||||
letters = string.ascii_lowercase + string.digits
|
||||
return ''.join(random.choice(letters) for i in range(9))
|
||||
|
||||
async def run(context, max_token, temperature, top_p, addr, port):
|
||||
params = {
|
||||
'max_new_tokens': max_token,
|
||||
'do_sample': True,
|
||||
'temperature': temperature,
|
||||
'top_p': top_p,
|
||||
'typical_p': 1,
|
||||
'repetition_penalty': 1.05,
|
||||
'encoder_repetition_penalty': 1.0,
|
||||
'top_k': 0,
|
||||
'min_length': 0,
|
||||
'no_repeat_ngram_size': 0,
|
||||
'num_beams': 1,
|
||||
'penalty_alpha': 0,
|
||||
'length_penalty': 1,
|
||||
'early_stopping': True,
|
||||
'seed': -1,
|
||||
}
|
||||
session = random_hash()
|
||||
|
||||
async with websockets.connect(f"ws://{addr}:{port}/queue/join") as websocket:
|
||||
while content := json.loads(await websocket.recv()):
|
||||
#Python3.10 syntax, replace with if elif on older
|
||||
if content["msg"] == "send_hash":
|
||||
await websocket.send(json.dumps({
|
||||
"session_hash": session,
|
||||
"fn_index": 12
|
||||
}))
|
||||
elif content["msg"] == "estimation":
|
||||
pass
|
||||
elif content["msg"] == "send_data":
|
||||
await websocket.send(json.dumps({
|
||||
"session_hash": session,
|
||||
"fn_index": 12,
|
||||
"data": [
|
||||
context,
|
||||
params['max_new_tokens'],
|
||||
params['do_sample'],
|
||||
params['temperature'],
|
||||
params['top_p'],
|
||||
params['typical_p'],
|
||||
params['repetition_penalty'],
|
||||
params['encoder_repetition_penalty'],
|
||||
params['top_k'],
|
||||
params['min_length'],
|
||||
params['no_repeat_ngram_size'],
|
||||
params['num_beams'],
|
||||
params['penalty_alpha'],
|
||||
params['length_penalty'],
|
||||
params['early_stopping'],
|
||||
params['seed'],
|
||||
]
|
||||
}))
|
||||
elif content["msg"] == "process_starts":
|
||||
pass
|
||||
elif content["msg"] in ["process_generating", "process_completed"]:
|
||||
yield content["output"]["data"][0]
|
||||
# You can search for your desired end indicator and
|
||||
# stop generation by closing the websocket here
|
||||
if (content["msg"] == "process_completed"):
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||
"""
|
||||
发送至chatGPT,流式获取输出。
|
||||
用于基础的对话功能。
|
||||
inputs 是本次问询的输入
|
||||
top_p, temperature是chatGPT的内部调优参数
|
||||
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||
"""
|
||||
if additional_fn is not None:
|
||||
import core_functional
|
||||
importlib.reload(core_functional) # 热更新prompt
|
||||
core_functional = core_functional.get_core_functions()
|
||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||
|
||||
raw_input = "What I would like to say is the following: " + inputs
|
||||
history.extend([inputs, ""])
|
||||
chatbot.append([inputs, ""])
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||
|
||||
prompt = raw_input
|
||||
tgui_say = ""
|
||||
|
||||
model_name, addr_port = llm_kwargs['llm_model'].split('@')
|
||||
assert ':' in addr_port, "LLM_MODEL 格式不正确!" + llm_kwargs['llm_model']
|
||||
addr, port = addr_port.split(':')
|
||||
|
||||
|
||||
mutable = ["", time.time()]
|
||||
def run_coorotine(mutable):
|
||||
async def get_result(mutable):
|
||||
# "tgui:galactica-1.3b@localhost:7860"
|
||||
|
||||
async for response in run(context=prompt, max_token=llm_kwargs['max_length'],
|
||||
temperature=llm_kwargs['temperature'],
|
||||
top_p=llm_kwargs['top_p'], addr=addr, port=port):
|
||||
print(response[len(mutable[0]):])
|
||||
mutable[0] = response
|
||||
if (time.time() - mutable[1]) > 3:
|
||||
print('exit when no listener')
|
||||
break
|
||||
asyncio.run(get_result(mutable))
|
||||
|
||||
thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True)
|
||||
thread_listen.start()
|
||||
|
||||
while thread_listen.is_alive():
|
||||
time.sleep(1)
|
||||
mutable[1] = time.time()
|
||||
# Print intermediate steps
|
||||
if tgui_say != mutable[0]:
|
||||
tgui_say = mutable[0]
|
||||
history[-1] = tgui_say
|
||||
chatbot[-1] = (history[-2], history[-1])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
|
||||
raw_input = "What I would like to say is the following: " + inputs
|
||||
prompt = raw_input
|
||||
tgui_say = ""
|
||||
model_name, addr_port = llm_kwargs['llm_model'].split('@')
|
||||
assert ':' in addr_port, "LLM_MODEL 格式不正确!" + llm_kwargs['llm_model']
|
||||
addr, port = addr_port.split(':')
|
||||
|
||||
|
||||
def run_coorotine(observe_window):
|
||||
async def get_result(observe_window):
|
||||
async for response in run(context=prompt, max_token=llm_kwargs['max_length'],
|
||||
temperature=llm_kwargs['temperature'],
|
||||
top_p=llm_kwargs['top_p'], addr=addr, port=port):
|
||||
print(response[len(observe_window[0]):])
|
||||
observe_window[0] = response
|
||||
if (time.time() - observe_window[1]) > 5:
|
||||
print('exit when no listener')
|
||||
break
|
||||
asyncio.run(get_result(observe_window))
|
||||
thread_listen = threading.Thread(target=run_coorotine, args=(observe_window,))
|
||||
thread_listen.start()
|
||||
return observe_window[0]
|
||||
409
request_llm/edge_gpt.py
普通文件
409
request_llm/edge_gpt.py
普通文件
@@ -0,0 +1,409 @@
|
||||
"""
|
||||
========================================================================
|
||||
第一部分:来自EdgeGPT.py
|
||||
https://github.com/acheong08/EdgeGPT
|
||||
========================================================================
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import ssl
|
||||
import sys
|
||||
import uuid
|
||||
from enum import Enum
|
||||
from typing import Generator
|
||||
from typing import Literal
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
import websockets.client as websockets
|
||||
|
||||
DELIMITER = "\x1e"
|
||||
|
||||
|
||||
# Generate random IP between range 13.104.0.0/14
|
||||
FORWARDED_IP = (
|
||||
f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
||||
)
|
||||
|
||||
HEADERS = {
|
||||
"accept": "application/json",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"content-type": "application/json",
|
||||
"sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"',
|
||||
"sec-ch-ua-arch": '"x86"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-full-version": '"109.0.1518.78"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": "",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-ch-ua-platform-version": '"15.0.0"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"x-ms-client-request-id": str(uuid.uuid4()),
|
||||
"x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32",
|
||||
"Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx",
|
||||
"Referrer-Policy": "origin-when-cross-origin",
|
||||
"x-forwarded-for": FORWARDED_IP,
|
||||
}
|
||||
|
||||
HEADERS_INIT_CONVER = {
|
||||
"authority": "edgeservices.bing.com",
|
||||
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"cache-control": "max-age=0",
|
||||
"sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
||||
"sec-ch-ua-arch": '"x86"',
|
||||
"sec-ch-ua-bitness": '"64"',
|
||||
"sec-ch-ua-full-version": '"110.0.1587.69"',
|
||||
"sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-model": '""',
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-ch-ua-platform-version": '"15.0.0"',
|
||||
"sec-fetch-dest": "document",
|
||||
"sec-fetch-mode": "navigate",
|
||||
"sec-fetch-site": "none",
|
||||
"sec-fetch-user": "?1",
|
||||
"upgrade-insecure-requests": "1",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69",
|
||||
"x-edge-shopping-flag": "1",
|
||||
"x-forwarded-for": FORWARDED_IP,
|
||||
}
|
||||
|
||||
def get_ssl_context():
|
||||
import certifi
|
||||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.load_verify_locations(certifi.where())
|
||||
return ssl_context
|
||||
|
||||
|
||||
|
||||
class NotAllowedToAccess(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConversationStyle(Enum):
|
||||
creative = "h3imaginative,clgalileo,gencontentv3"
|
||||
balanced = "galileo"
|
||||
precise = "h3precise,clgalileo"
|
||||
|
||||
|
||||
CONVERSATION_STYLE_TYPE = Optional[
|
||||
Union[ConversationStyle, Literal["creative", "balanced", "precise"]]
|
||||
]
|
||||
|
||||
|
||||
def _append_identifier(msg: dict) -> str:
|
||||
"""
|
||||
Appends special character to end of message to identify end of message
|
||||
"""
|
||||
# Convert dict to json string
|
||||
return json.dumps(msg) + DELIMITER
|
||||
|
||||
|
||||
def _get_ran_hex(length: int = 32) -> str:
|
||||
"""
|
||||
Returns random hex string
|
||||
"""
|
||||
return "".join(random.choice("0123456789abcdef") for _ in range(length))
|
||||
|
||||
|
||||
class _ChatHubRequest:
|
||||
"""
|
||||
Request object for ChatHub
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
conversation_signature: str,
|
||||
client_id: str,
|
||||
conversation_id: str,
|
||||
invocation_id: int = 0,
|
||||
) -> None:
|
||||
self.struct: dict = {}
|
||||
|
||||
self.client_id: str = client_id
|
||||
self.conversation_id: str = conversation_id
|
||||
self.conversation_signature: str = conversation_signature
|
||||
self.invocation_id: int = invocation_id
|
||||
|
||||
def update(
|
||||
self,
|
||||
prompt,
|
||||
conversation_style,
|
||||
options,
|
||||
) -> None:
|
||||
"""
|
||||
Updates request object
|
||||
"""
|
||||
if options is None:
|
||||
options = [
|
||||
"deepleo",
|
||||
"enable_debug_commands",
|
||||
"disable_emoji_spoken_text",
|
||||
"enablemm",
|
||||
]
|
||||
if conversation_style:
|
||||
if not isinstance(conversation_style, ConversationStyle):
|
||||
conversation_style = getattr(ConversationStyle, conversation_style)
|
||||
options = [
|
||||
"nlu_direct_response_filter",
|
||||
"deepleo",
|
||||
"disable_emoji_spoken_text",
|
||||
"responsible_ai_policy_235",
|
||||
"enablemm",
|
||||
conversation_style.value,
|
||||
"dtappid",
|
||||
"cricinfo",
|
||||
"cricinfov2",
|
||||
"dv3sugg",
|
||||
]
|
||||
self.struct = {
|
||||
"arguments": [
|
||||
{
|
||||
"source": "cib",
|
||||
"optionsSets": options,
|
||||
"sliceIds": [
|
||||
"222dtappid",
|
||||
"225cricinfo",
|
||||
"224locals0",
|
||||
],
|
||||
"traceId": _get_ran_hex(32),
|
||||
"isStartOfSession": self.invocation_id == 0,
|
||||
"message": {
|
||||
"author": "user",
|
||||
"inputMethod": "Keyboard",
|
||||
"text": prompt,
|
||||
"messageType": "Chat",
|
||||
},
|
||||
"conversationSignature": self.conversation_signature,
|
||||
"participant": {
|
||||
"id": self.client_id,
|
||||
},
|
||||
"conversationId": self.conversation_id,
|
||||
},
|
||||
],
|
||||
"invocationId": str(self.invocation_id),
|
||||
"target": "chat",
|
||||
"type": 4,
|
||||
}
|
||||
self.invocation_id += 1
|
||||
|
||||
|
||||
class _Conversation:
|
||||
"""
|
||||
Conversation API
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cookies,
|
||||
proxy,
|
||||
) -> None:
|
||||
self.struct: dict = {
|
||||
"conversationId": None,
|
||||
"clientId": None,
|
||||
"conversationSignature": None,
|
||||
"result": {"value": "Success", "message": None},
|
||||
}
|
||||
import httpx
|
||||
self.proxy = proxy
|
||||
proxy = (
|
||||
proxy
|
||||
or os.environ.get("all_proxy")
|
||||
or os.environ.get("ALL_PROXY")
|
||||
or os.environ.get("https_proxy")
|
||||
or os.environ.get("HTTPS_PROXY")
|
||||
or None
|
||||
)
|
||||
if proxy is not None and proxy.startswith("socks5h://"):
|
||||
proxy = "socks5://" + proxy[len("socks5h://") :]
|
||||
self.session = httpx.Client(
|
||||
proxies=proxy,
|
||||
timeout=30,
|
||||
headers=HEADERS_INIT_CONVER,
|
||||
)
|
||||
for cookie in cookies:
|
||||
self.session.cookies.set(cookie["name"], cookie["value"])
|
||||
|
||||
# Send GET request
|
||||
response = self.session.get(
|
||||
url=os.environ.get("BING_PROXY_URL")
|
||||
or "https://edgeservices.bing.com/edgesvc/turing/conversation/create",
|
||||
)
|
||||
if response.status_code != 200:
|
||||
response = self.session.get(
|
||||
"https://edge.churchless.tech/edgesvc/turing/conversation/create",
|
||||
)
|
||||
if response.status_code != 200:
|
||||
print(f"Status code: {response.status_code}")
|
||||
print(response.text)
|
||||
print(response.url)
|
||||
raise Exception("Authentication failed")
|
||||
try:
|
||||
self.struct = response.json()
|
||||
except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc:
|
||||
raise Exception(
|
||||
"Authentication failed. You have not been accepted into the beta.",
|
||||
) from exc
|
||||
if self.struct["result"]["value"] == "UnauthorizedRequest":
|
||||
raise NotAllowedToAccess(self.struct["result"]["message"])
|
||||
|
||||
|
||||
class _ChatHub:
|
||||
"""
|
||||
Chat API
|
||||
"""
|
||||
|
||||
def __init__(self, conversation) -> None:
|
||||
self.wss = None
|
||||
self.request: _ChatHubRequest
|
||||
self.loop: bool
|
||||
self.task: asyncio.Task
|
||||
print(conversation.struct)
|
||||
self.request = _ChatHubRequest(
|
||||
conversation_signature=conversation.struct["conversationSignature"],
|
||||
client_id=conversation.struct["clientId"],
|
||||
conversation_id=conversation.struct["conversationId"],
|
||||
)
|
||||
|
||||
async def ask_stream(
|
||||
self,
|
||||
prompt: str,
|
||||
wss_link: str,
|
||||
conversation_style: CONVERSATION_STYLE_TYPE = None,
|
||||
raw: bool = False,
|
||||
options: dict = None,
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Ask a question to the bot
|
||||
"""
|
||||
if self.wss and not self.wss.closed:
|
||||
await self.wss.close()
|
||||
# Check if websocket is closed
|
||||
self.wss = await websockets.connect(
|
||||
wss_link,
|
||||
extra_headers=HEADERS,
|
||||
max_size=None,
|
||||
ssl=get_ssl_context()
|
||||
)
|
||||
await self._initial_handshake()
|
||||
# Construct a ChatHub request
|
||||
self.request.update(
|
||||
prompt=prompt,
|
||||
conversation_style=conversation_style,
|
||||
options=options,
|
||||
)
|
||||
# Send request
|
||||
await self.wss.send(_append_identifier(self.request.struct))
|
||||
final = False
|
||||
while not final:
|
||||
objects = str(await self.wss.recv()).split(DELIMITER)
|
||||
for obj in objects:
|
||||
if obj is None or not obj:
|
||||
continue
|
||||
response = json.loads(obj)
|
||||
if response.get("type") != 2 and raw:
|
||||
yield False, response
|
||||
elif response.get("type") == 1 and response["arguments"][0].get(
|
||||
"messages",
|
||||
):
|
||||
resp_txt = response["arguments"][0]["messages"][0]["adaptiveCards"][
|
||||
0
|
||||
]["body"][0].get("text")
|
||||
yield False, resp_txt
|
||||
elif response.get("type") == 2:
|
||||
final = True
|
||||
yield True, response
|
||||
|
||||
async def _initial_handshake(self) -> None:
|
||||
await self.wss.send(_append_identifier({"protocol": "json", "version": 1}))
|
||||
await self.wss.recv()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the connection
|
||||
"""
|
||||
if self.wss and not self.wss.closed:
|
||||
await self.wss.close()
|
||||
|
||||
|
||||
class NewbingChatbot:
|
||||
"""
|
||||
Combines everything to make it seamless
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cookies,
|
||||
proxy
|
||||
) -> None:
|
||||
if cookies is None:
|
||||
cookies = {}
|
||||
self.cookies = cookies
|
||||
self.proxy = proxy
|
||||
self.chat_hub: _ChatHub = _ChatHub(
|
||||
_Conversation(self.cookies, self.proxy),
|
||||
)
|
||||
|
||||
async def ask(
|
||||
self,
|
||||
prompt: str,
|
||||
wss_link: str,
|
||||
conversation_style: CONVERSATION_STYLE_TYPE = None,
|
||||
options: dict = None,
|
||||
) -> dict:
|
||||
"""
|
||||
Ask a question to the bot
|
||||
"""
|
||||
async for final, response in self.chat_hub.ask_stream(
|
||||
prompt=prompt,
|
||||
conversation_style=conversation_style,
|
||||
wss_link=wss_link,
|
||||
options=options,
|
||||
):
|
||||
if final:
|
||||
return response
|
||||
await self.chat_hub.wss.close()
|
||||
return None
|
||||
|
||||
async def ask_stream(
|
||||
self,
|
||||
prompt: str,
|
||||
wss_link: str,
|
||||
conversation_style: CONVERSATION_STYLE_TYPE = None,
|
||||
raw: bool = False,
|
||||
options: dict = None,
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Ask a question to the bot
|
||||
"""
|
||||
async for response in self.chat_hub.ask_stream(
|
||||
prompt=prompt,
|
||||
conversation_style=conversation_style,
|
||||
wss_link=wss_link,
|
||||
raw=raw,
|
||||
options=options,
|
||||
):
|
||||
yield response
|
||||
|
||||
async def close(self) -> None:
|
||||
"""
|
||||
Close the connection
|
||||
"""
|
||||
await self.chat_hub.close()
|
||||
|
||||
async def reset(self) -> None:
|
||||
"""
|
||||
Reset the conversation
|
||||
"""
|
||||
await self.close()
|
||||
self.chat_hub = _ChatHub(_Conversation(self.cookies, self.proxy))
|
||||
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
protobuf
|
||||
transformers==4.27.1
|
||||
cpm_kernels
|
||||
torch>=1.10
|
||||
mdtex2html
|
||||
sentencepiece
|
||||
@@ -0,0 +1,8 @@
|
||||
BingImageCreator
|
||||
certifi
|
||||
httpx
|
||||
prompt_toolkit
|
||||
requests
|
||||
rich
|
||||
websockets
|
||||
httpx[socks]
|
||||
@@ -1,3 +1,16 @@
|
||||
gradio
|
||||
gradio==3.25.0
|
||||
tiktoken>=0.3.3
|
||||
requests[socks]
|
||||
transformers
|
||||
python-markdown-math
|
||||
beautifulsoup4
|
||||
latex2mathml
|
||||
python-docx
|
||||
mdtex2html
|
||||
colorama
|
||||
Markdown
|
||||
pygments
|
||||
pymupdf
|
||||
openai
|
||||
numpy
|
||||
arxiv
|
||||
|
||||
78
show_math.py
78
show_math.py
@@ -1,78 +0,0 @@
|
||||
from latex2mathml.converter import convert as tex2mathml
|
||||
import re
|
||||
|
||||
incomplete = '<font style="color:orange;" class="tooltip">⚠<span class="tooltiptext">formula incomplete</span></font>'
|
||||
convError = '<font style="color:red" class="tooltip">⚠<span class="tooltiptext">LaTeX-convert-error</span></font>'
|
||||
|
||||
def convert(mdtex, extensions=[], splitParagraphs=True):
|
||||
''' converts recursively the Markdown-LaTeX-mixture to HTML with MathML '''
|
||||
found = False
|
||||
# handle all paragraphs separately (prevents aftereffects)
|
||||
if splitParagraphs:
|
||||
parts = re.split("\n\n", mdtex)
|
||||
result = ''
|
||||
for part in parts:
|
||||
result += convert(part, extensions, splitParagraphs=False)
|
||||
return result
|
||||
# find first $$-formula:
|
||||
parts = re.split('\${2}', mdtex, 2)
|
||||
if len(parts)>1:
|
||||
found = True
|
||||
result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
|
||||
try:
|
||||
result += '<div class="blockformula">'+tex2mathml(parts[1])+'</div>\n'
|
||||
except:
|
||||
result += '<div class="blockformula">'+convError+'</div>'
|
||||
if len(parts)==3:
|
||||
result += convert(parts[2], extensions, splitParagraphs=False)
|
||||
else:
|
||||
result += '<div class="blockformula">'+incomplete+'</div>'
|
||||
# else find first $-formulas:
|
||||
else:
|
||||
parts = re.split('\${1}', mdtex, 2)
|
||||
if len(parts)>1 and not found:
|
||||
found = True
|
||||
try:
|
||||
mathml = tex2mathml(parts[1])
|
||||
except:
|
||||
mathml = convError
|
||||
if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
|
||||
parts[0]=parts[0]+'​'
|
||||
if len(parts)==3:
|
||||
result = convert(parts[0]+mathml+parts[2], extensions, splitParagraphs=False)
|
||||
else:
|
||||
result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
|
||||
# else find first \[..\]-equation:
|
||||
else:
|
||||
parts = re.split(r'\\\[', mdtex, 1)
|
||||
if len(parts)>1 and not found:
|
||||
found = True
|
||||
result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
|
||||
parts = re.split(r'\\\]', parts[1], 1)
|
||||
try:
|
||||
result += '<div class="blockformula">'+tex2mathml(parts[0])+'</div>\n'
|
||||
except:
|
||||
result += '<div class="blockformula">'+convError+'</div>'
|
||||
if len(parts)==2:
|
||||
result += convert(parts[1], extensions, splitParagraphs=False)
|
||||
else:
|
||||
result += '<div class="blockformula">'+incomplete+'</div>'
|
||||
# else find first \(..\)-equation:
|
||||
else:
|
||||
parts = re.split(r'\\\(', mdtex, 1)
|
||||
if len(parts)>1 and not found:
|
||||
found = True
|
||||
subp = re.split(r'\\\)', parts[1], 1)
|
||||
try:
|
||||
mathml = tex2mathml(subp[0])
|
||||
except:
|
||||
mathml = convError
|
||||
if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
|
||||
parts[0]=parts[0]+'​'
|
||||
if len(subp)==2:
|
||||
result = convert(parts[0]+mathml+subp[1], extensions, splitParagraphs=False)
|
||||
else:
|
||||
result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
|
||||
if not found:
|
||||
result = mdtex
|
||||
return result
|
||||
342
theme.py
普通文件
342
theme.py
普通文件
@@ -0,0 +1,342 @@
|
||||
import gradio as gr
|
||||
from toolbox import get_conf
|
||||
CODE_HIGHLIGHT, = get_conf('CODE_HIGHLIGHT')
|
||||
# gradio可用颜色列表
|
||||
# gr.themes.utils.colors.slate (石板色)
|
||||
# gr.themes.utils.colors.gray (灰色)
|
||||
# gr.themes.utils.colors.zinc (锌色)
|
||||
# gr.themes.utils.colors.neutral (中性色)
|
||||
# gr.themes.utils.colors.stone (石头色)
|
||||
# gr.themes.utils.colors.red (红色)
|
||||
# gr.themes.utils.colors.orange (橙色)
|
||||
# gr.themes.utils.colors.amber (琥珀色)
|
||||
# gr.themes.utils.colors.yellow (黄色)
|
||||
# gr.themes.utils.colors.lime (酸橙色)
|
||||
# gr.themes.utils.colors.green (绿色)
|
||||
# gr.themes.utils.colors.emerald (祖母绿)
|
||||
# gr.themes.utils.colors.teal (青蓝色)
|
||||
# gr.themes.utils.colors.cyan (青色)
|
||||
# gr.themes.utils.colors.sky (天蓝色)
|
||||
# gr.themes.utils.colors.blue (蓝色)
|
||||
# gr.themes.utils.colors.indigo (靛蓝色)
|
||||
# gr.themes.utils.colors.violet (紫罗兰色)
|
||||
# gr.themes.utils.colors.purple (紫色)
|
||||
# gr.themes.utils.colors.fuchsia (洋红色)
|
||||
# gr.themes.utils.colors.pink (粉红色)
|
||||
# gr.themes.utils.colors.rose (玫瑰色)
|
||||
|
||||
|
||||
def adjust_theme():
|
||||
try:
|
||||
color_er = gr.themes.utils.colors.fuchsia
|
||||
set_theme = gr.themes.Default(
|
||||
primary_hue=gr.themes.utils.colors.orange,
|
||||
neutral_hue=gr.themes.utils.colors.gray,
|
||||
font=["sans-serif", "Microsoft YaHei", "ui-sans-serif", "system-ui",
|
||||
"sans-serif", gr.themes.utils.fonts.GoogleFont("Source Sans Pro")],
|
||||
font_mono=["ui-monospace", "Consolas", "monospace", gr.themes.utils.fonts.GoogleFont("IBM Plex Mono")])
|
||||
set_theme.set(
|
||||
# Colors
|
||||
input_background_fill_dark="*neutral_800",
|
||||
# Transition
|
||||
button_transition="none",
|
||||
# Shadows
|
||||
button_shadow="*shadow_drop",
|
||||
button_shadow_hover="*shadow_drop_lg",
|
||||
button_shadow_active="*shadow_inset",
|
||||
input_shadow="0 0 0 *shadow_spread transparent, *shadow_inset",
|
||||
input_shadow_focus="0 0 0 *shadow_spread *secondary_50, *shadow_inset",
|
||||
input_shadow_focus_dark="0 0 0 *shadow_spread *neutral_700, *shadow_inset",
|
||||
checkbox_label_shadow="*shadow_drop",
|
||||
block_shadow="*shadow_drop",
|
||||
form_gap_width="1px",
|
||||
# Button borders
|
||||
input_border_width="1px",
|
||||
input_background_fill="white",
|
||||
# Gradients
|
||||
stat_background_fill="linear-gradient(to right, *primary_400, *primary_200)",
|
||||
stat_background_fill_dark="linear-gradient(to right, *primary_400, *primary_600)",
|
||||
error_background_fill=f"linear-gradient(to right, {color_er.c100}, *background_fill_secondary)",
|
||||
error_background_fill_dark="*background_fill_primary",
|
||||
checkbox_label_background_fill="linear-gradient(to top, *neutral_50, white)",
|
||||
checkbox_label_background_fill_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
|
||||
checkbox_label_background_fill_hover="linear-gradient(to top, *neutral_100, white)",
|
||||
checkbox_label_background_fill_hover_dark="linear-gradient(to top, *neutral_900, *neutral_800)",
|
||||
button_primary_background_fill="linear-gradient(to bottom right, *primary_100, *primary_300)",
|
||||
button_primary_background_fill_dark="linear-gradient(to bottom right, *primary_500, *primary_600)",
|
||||
button_primary_background_fill_hover="linear-gradient(to bottom right, *primary_100, *primary_200)",
|
||||
button_primary_background_fill_hover_dark="linear-gradient(to bottom right, *primary_500, *primary_500)",
|
||||
button_primary_border_color_dark="*primary_500",
|
||||
button_secondary_background_fill="linear-gradient(to bottom right, *neutral_100, *neutral_200)",
|
||||
button_secondary_background_fill_dark="linear-gradient(to bottom right, *neutral_600, *neutral_700)",
|
||||
button_secondary_background_fill_hover="linear-gradient(to bottom right, *neutral_100, *neutral_100)",
|
||||
button_secondary_background_fill_hover_dark="linear-gradient(to bottom right, *neutral_600, *neutral_600)",
|
||||
button_cancel_background_fill=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c200})",
|
||||
button_cancel_background_fill_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c700})",
|
||||
button_cancel_background_fill_hover=f"linear-gradient(to bottom right, {color_er.c100}, {color_er.c100})",
|
||||
button_cancel_background_fill_hover_dark=f"linear-gradient(to bottom right, {color_er.c600}, {color_er.c600})",
|
||||
button_cancel_border_color=color_er.c200,
|
||||
button_cancel_border_color_dark=color_er.c600,
|
||||
button_cancel_text_color=color_er.c600,
|
||||
button_cancel_text_color_dark="white",
|
||||
)
|
||||
except:
|
||||
set_theme = None
|
||||
print('gradio版本较旧, 不能自定义字体和颜色')
|
||||
return set_theme
|
||||
|
||||
|
||||
advanced_css = """
|
||||
/* 设置表格的外边距为1em,内部单元格之间边框合并,空单元格显示. */
|
||||
.markdown-body table {
|
||||
margin: 1em 0;
|
||||
border-collapse: collapse;
|
||||
empty-cells: show;
|
||||
}
|
||||
|
||||
/* 设置表格单元格的内边距为5px,边框粗细为1.2px,颜色为--border-color-primary. */
|
||||
.markdown-body th, .markdown-body td {
|
||||
border: 1.2px solid var(--border-color-primary);
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
/* 设置表头背景颜色为rgba(175,184,193,0.2),透明度为0.2. */
|
||||
.markdown-body thead {
|
||||
background-color: rgba(175,184,193,0.2);
|
||||
}
|
||||
|
||||
/* 设置表头单元格的内边距为0.5em和0.2em. */
|
||||
.markdown-body thead th {
|
||||
padding: .5em .2em;
|
||||
}
|
||||
|
||||
/* 去掉列表前缀的默认间距,使其与文本线对齐. */
|
||||
.markdown-body ol, .markdown-body ul {
|
||||
padding-inline-start: 2em !important;
|
||||
}
|
||||
|
||||
/* 设定聊天气泡的样式,包括圆角、最大宽度和阴影等. */
|
||||
[class *= "message"] {
|
||||
border-radius: var(--radius-xl) !important;
|
||||
/* padding: var(--spacing-xl) !important; */
|
||||
/* font-size: var(--text-md) !important; */
|
||||
/* line-height: var(--line-md) !important; */
|
||||
/* min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
|
||||
/* min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); */
|
||||
}
|
||||
[data-testid = "bot"] {
|
||||
max-width: 95%;
|
||||
/* width: auto !important; */
|
||||
border-bottom-left-radius: 0 !important;
|
||||
}
|
||||
[data-testid = "user"] {
|
||||
max-width: 100%;
|
||||
/* width: auto !important; */
|
||||
border-bottom-right-radius: 0 !important;
|
||||
}
|
||||
|
||||
/* 行内代码的背景设为淡灰色,设定圆角和间距. */
|
||||
.markdown-body code {
|
||||
display: inline;
|
||||
white-space: break-spaces;
|
||||
border-radius: 6px;
|
||||
margin: 0 2px 0 2px;
|
||||
padding: .2em .4em .1em .4em;
|
||||
background-color: rgba(13, 17, 23, 0.95);
|
||||
color: #c9d1d9;
|
||||
}
|
||||
|
||||
.dark .markdown-body code {
|
||||
display: inline;
|
||||
white-space: break-spaces;
|
||||
border-radius: 6px;
|
||||
margin: 0 2px 0 2px;
|
||||
padding: .2em .4em .1em .4em;
|
||||
background-color: rgba(175,184,193,0.2);
|
||||
}
|
||||
|
||||
/* 设定代码块的样式,包括背景颜色、内、外边距、圆角。 */
|
||||
.markdown-body pre code {
|
||||
display: block;
|
||||
overflow: auto;
|
||||
white-space: pre;
|
||||
background-color: rgba(13, 17, 23, 0.95);
|
||||
border-radius: 10px;
|
||||
padding: 1em;
|
||||
margin: 1em 2em 1em 0.5em;
|
||||
}
|
||||
|
||||
.dark .markdown-body pre code {
|
||||
display: block;
|
||||
overflow: auto;
|
||||
white-space: pre;
|
||||
background-color: rgba(175,184,193,0.2);
|
||||
border-radius: 10px;
|
||||
padding: 1em;
|
||||
margin: 1em 2em 1em 0.5em;
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
if CODE_HIGHLIGHT:
|
||||
advanced_css += """
|
||||
|
||||
.codehilite .hll { background-color: #6e7681 }
|
||||
.codehilite .c { color: #8b949e; font-style: italic } /* Comment */
|
||||
.codehilite .err { color: #f85149 } /* Error */
|
||||
.codehilite .esc { color: #c9d1d9 } /* Escape */
|
||||
.codehilite .g { color: #c9d1d9 } /* Generic */
|
||||
.codehilite .k { color: #ff7b72 } /* Keyword */
|
||||
.codehilite .l { color: #a5d6ff } /* Literal */
|
||||
.codehilite .n { color: #c9d1d9 } /* Name */
|
||||
.codehilite .o { color: #ff7b72; font-weight: bold } /* Operator */
|
||||
.codehilite .x { color: #c9d1d9 } /* Other */
|
||||
.codehilite .p { color: #c9d1d9 } /* Punctuation */
|
||||
.codehilite .ch { color: #8b949e; font-style: italic } /* Comment.Hashbang */
|
||||
.codehilite .cm { color: #8b949e; font-style: italic } /* Comment.Multiline */
|
||||
.codehilite .cp { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Preproc */
|
||||
.codehilite .cpf { color: #8b949e; font-style: italic } /* Comment.PreprocFile */
|
||||
.codehilite .c1 { color: #8b949e; font-style: italic } /* Comment.Single */
|
||||
.codehilite .cs { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Special */
|
||||
.codehilite .gd { color: #ffa198; background-color: #490202 } /* Generic.Deleted */
|
||||
.codehilite .ge { color: #c9d1d9; font-style: italic } /* Generic.Emph */
|
||||
.codehilite .gr { color: #ffa198 } /* Generic.Error */
|
||||
.codehilite .gh { color: #79c0ff; font-weight: bold } /* Generic.Heading */
|
||||
.codehilite .gi { color: #56d364; background-color: #0f5323 } /* Generic.Inserted */
|
||||
.codehilite .go { color: #8b949e } /* Generic.Output */
|
||||
.codehilite .gp { color: #8b949e } /* Generic.Prompt */
|
||||
.codehilite .gs { color: #c9d1d9; font-weight: bold } /* Generic.Strong */
|
||||
.codehilite .gu { color: #79c0ff } /* Generic.Subheading */
|
||||
.codehilite .gt { color: #ff7b72 } /* Generic.Traceback */
|
||||
.codehilite .g-Underline { color: #c9d1d9; text-decoration: underline } /* Generic.Underline */
|
||||
.codehilite .kc { color: #79c0ff } /* Keyword.Constant */
|
||||
.codehilite .kd { color: #ff7b72 } /* Keyword.Declaration */
|
||||
.codehilite .kn { color: #ff7b72 } /* Keyword.Namespace */
|
||||
.codehilite .kp { color: #79c0ff } /* Keyword.Pseudo */
|
||||
.codehilite .kr { color: #ff7b72 } /* Keyword.Reserved */
|
||||
.codehilite .kt { color: #ff7b72 } /* Keyword.Type */
|
||||
.codehilite .ld { color: #79c0ff } /* Literal.Date */
|
||||
.codehilite .m { color: #a5d6ff } /* Literal.Number */
|
||||
.codehilite .s { color: #a5d6ff } /* Literal.String */
|
||||
.codehilite .na { color: #c9d1d9 } /* Name.Attribute */
|
||||
.codehilite .nb { color: #c9d1d9 } /* Name.Builtin */
|
||||
.codehilite .nc { color: #f0883e; font-weight: bold } /* Name.Class */
|
||||
.codehilite .no { color: #79c0ff; font-weight: bold } /* Name.Constant */
|
||||
.codehilite .nd { color: #d2a8ff; font-weight: bold } /* Name.Decorator */
|
||||
.codehilite .ni { color: #ffa657 } /* Name.Entity */
|
||||
.codehilite .ne { color: #f0883e; font-weight: bold } /* Name.Exception */
|
||||
.codehilite .nf { color: #d2a8ff; font-weight: bold } /* Name.Function */
|
||||
.codehilite .nl { color: #79c0ff; font-weight: bold } /* Name.Label */
|
||||
.codehilite .nn { color: #ff7b72 } /* Name.Namespace */
|
||||
.codehilite .nx { color: #c9d1d9 } /* Name.Other */
|
||||
.codehilite .py { color: #79c0ff } /* Name.Property */
|
||||
.codehilite .nt { color: #7ee787 } /* Name.Tag */
|
||||
.codehilite .nv { color: #79c0ff } /* Name.Variable */
|
||||
.codehilite .ow { color: #ff7b72; font-weight: bold } /* Operator.Word */
|
||||
.codehilite .pm { color: #c9d1d9 } /* Punctuation.Marker */
|
||||
.codehilite .w { color: #6e7681 } /* Text.Whitespace */
|
||||
.codehilite .mb { color: #a5d6ff } /* Literal.Number.Bin */
|
||||
.codehilite .mf { color: #a5d6ff } /* Literal.Number.Float */
|
||||
.codehilite .mh { color: #a5d6ff } /* Literal.Number.Hex */
|
||||
.codehilite .mi { color: #a5d6ff } /* Literal.Number.Integer */
|
||||
.codehilite .mo { color: #a5d6ff } /* Literal.Number.Oct */
|
||||
.codehilite .sa { color: #79c0ff } /* Literal.String.Affix */
|
||||
.codehilite .sb { color: #a5d6ff } /* Literal.String.Backtick */
|
||||
.codehilite .sc { color: #a5d6ff } /* Literal.String.Char */
|
||||
.codehilite .dl { color: #79c0ff } /* Literal.String.Delimiter */
|
||||
.codehilite .sd { color: #a5d6ff } /* Literal.String.Doc */
|
||||
.codehilite .s2 { color: #a5d6ff } /* Literal.String.Double */
|
||||
.codehilite .se { color: #79c0ff } /* Literal.String.Escape */
|
||||
.codehilite .sh { color: #79c0ff } /* Literal.String.Heredoc */
|
||||
.codehilite .si { color: #a5d6ff } /* Literal.String.Interpol */
|
||||
.codehilite .sx { color: #a5d6ff } /* Literal.String.Other */
|
||||
.codehilite .sr { color: #79c0ff } /* Literal.String.Regex */
|
||||
.codehilite .s1 { color: #a5d6ff } /* Literal.String.Single */
|
||||
.codehilite .ss { color: #a5d6ff } /* Literal.String.Symbol */
|
||||
.codehilite .bp { color: #c9d1d9 } /* Name.Builtin.Pseudo */
|
||||
.codehilite .fm { color: #d2a8ff; font-weight: bold } /* Name.Function.Magic */
|
||||
.codehilite .vc { color: #79c0ff } /* Name.Variable.Class */
|
||||
.codehilite .vg { color: #79c0ff } /* Name.Variable.Global */
|
||||
.codehilite .vi { color: #79c0ff } /* Name.Variable.Instance */
|
||||
.codehilite .vm { color: #79c0ff } /* Name.Variable.Magic */
|
||||
.codehilite .il { color: #a5d6ff } /* Literal.Number.Integer.Long */
|
||||
|
||||
.dark .codehilite .hll { background-color: #2C3B41 }
|
||||
.dark .codehilite .c { color: #79d618; font-style: italic } /* Comment */
|
||||
.dark .codehilite .err { color: #FF5370 } /* Error */
|
||||
.dark .codehilite .esc { color: #89DDFF } /* Escape */
|
||||
.dark .codehilite .g { color: #EEFFFF } /* Generic */
|
||||
.dark .codehilite .k { color: #BB80B3 } /* Keyword */
|
||||
.dark .codehilite .l { color: #C3E88D } /* Literal */
|
||||
.dark .codehilite .n { color: #EEFFFF } /* Name */
|
||||
.dark .codehilite .o { color: #89DDFF } /* Operator */
|
||||
.dark .codehilite .p { color: #89DDFF } /* Punctuation */
|
||||
.dark .codehilite .ch { color: #79d618; font-style: italic } /* Comment.Hashbang */
|
||||
.dark .codehilite .cm { color: #79d618; font-style: italic } /* Comment.Multiline */
|
||||
.dark .codehilite .cp { color: #79d618; font-style: italic } /* Comment.Preproc */
|
||||
.dark .codehilite .cpf { color: #79d618; font-style: italic } /* Comment.PreprocFile */
|
||||
.dark .codehilite .c1 { color: #79d618; font-style: italic } /* Comment.Single */
|
||||
.dark .codehilite .cs { color: #79d618; font-style: italic } /* Comment.Special */
|
||||
.dark .codehilite .gd { color: #FF5370 } /* Generic.Deleted */
|
||||
.dark .codehilite .ge { color: #89DDFF } /* Generic.Emph */
|
||||
.dark .codehilite .gr { color: #FF5370 } /* Generic.Error */
|
||||
.dark .codehilite .gh { color: #C3E88D } /* Generic.Heading */
|
||||
.dark .codehilite .gi { color: #C3E88D } /* Generic.Inserted */
|
||||
.dark .codehilite .go { color: #79d618 } /* Generic.Output */
|
||||
.dark .codehilite .gp { color: #FFCB6B } /* Generic.Prompt */
|
||||
.dark .codehilite .gs { color: #FF5370 } /* Generic.Strong */
|
||||
.dark .codehilite .gu { color: #89DDFF } /* Generic.Subheading */
|
||||
.dark .codehilite .gt { color: #FF5370 } /* Generic.Traceback */
|
||||
.dark .codehilite .kc { color: #89DDFF } /* Keyword.Constant */
|
||||
.dark .codehilite .kd { color: #BB80B3 } /* Keyword.Declaration */
|
||||
.dark .codehilite .kn { color: #89DDFF; font-style: italic } /* Keyword.Namespace */
|
||||
.dark .codehilite .kp { color: #89DDFF } /* Keyword.Pseudo */
|
||||
.dark .codehilite .kr { color: #BB80B3 } /* Keyword.Reserved */
|
||||
.dark .codehilite .kt { color: #BB80B3 } /* Keyword.Type */
|
||||
.dark .codehilite .ld { color: #C3E88D } /* Literal.Date */
|
||||
.dark .codehilite .m { color: #F78C6C } /* Literal.Number */
|
||||
.dark .codehilite .s { color: #C3E88D } /* Literal.String */
|
||||
.dark .codehilite .na { color: #BB80B3 } /* Name.Attribute */
|
||||
.dark .codehilite .nb { color: #82AAFF } /* Name.Builtin */
|
||||
.dark .codehilite .nc { color: #FFCB6B } /* Name.Class */
|
||||
.dark .codehilite .no { color: #EEFFFF } /* Name.Constant */
|
||||
.dark .codehilite .nd { color: #82AAFF } /* Name.Decorator */
|
||||
.dark .codehilite .ni { color: #89DDFF } /* Name.Entity */
|
||||
.dark .codehilite .ne { color: #FFCB6B } /* Name.Exception */
|
||||
.dark .codehilite .nf { color: #82AAFF } /* Name.Function */
|
||||
.dark .codehilite .nl { color: #82AAFF } /* Name.Label */
|
||||
.dark .codehilite .nn { color: #FFCB6B } /* Name.Namespace */
|
||||
.dark .codehilite .nx { color: #EEFFFF } /* Name.Other */
|
||||
.dark .codehilite .py { color: #FFCB6B } /* Name.Property */
|
||||
.dark .codehilite .nt { color: #FF5370 } /* Name.Tag */
|
||||
.dark .codehilite .nv { color: #89DDFF } /* Name.Variable */
|
||||
.dark .codehilite .ow { color: #89DDFF; font-style: italic } /* Operator.Word */
|
||||
.dark .codehilite .pm { color: #89DDFF } /* Punctuation.Marker */
|
||||
.dark .codehilite .w { color: #EEFFFF } /* Text.Whitespace */
|
||||
.dark .codehilite .mb { color: #F78C6C } /* Literal.Number.Bin */
|
||||
.dark .codehilite .mf { color: #F78C6C } /* Literal.Number.Float */
|
||||
.dark .codehilite .mh { color: #F78C6C } /* Literal.Number.Hex */
|
||||
.dark .codehilite .mi { color: #F78C6C } /* Literal.Number.Integer */
|
||||
.dark .codehilite .mo { color: #F78C6C } /* Literal.Number.Oct */
|
||||
.dark .codehilite .sa { color: #BB80B3 } /* Literal.String.Affix */
|
||||
.dark .codehilite .sb { color: #C3E88D } /* Literal.String.Backtick */
|
||||
.dark .codehilite .sc { color: #C3E88D } /* Literal.String.Char */
|
||||
.dark .codehilite .dl { color: #EEFFFF } /* Literal.String.Delimiter */
|
||||
.dark .codehilite .sd { color: #79d618; font-style: italic } /* Literal.String.Doc */
|
||||
.dark .codehilite .s2 { color: #C3E88D } /* Literal.String.Double */
|
||||
.dark .codehilite .se { color: #EEFFFF } /* Literal.String.Escape */
|
||||
.dark .codehilite .sh { color: #C3E88D } /* Literal.String.Heredoc */
|
||||
.dark .codehilite .si { color: #89DDFF } /* Literal.String.Interpol */
|
||||
.dark .codehilite .sx { color: #C3E88D } /* Literal.String.Other */
|
||||
.dark .codehilite .sr { color: #89DDFF } /* Literal.String.Regex */
|
||||
.dark .codehilite .s1 { color: #C3E88D } /* Literal.String.Single */
|
||||
.dark .codehilite .ss { color: #89DDFF } /* Literal.String.Symbol */
|
||||
.dark .codehilite .bp { color: #89DDFF } /* Name.Builtin.Pseudo */
|
||||
.dark .codehilite .fm { color: #82AAFF } /* Name.Function.Magic */
|
||||
.dark .codehilite .vc { color: #89DDFF } /* Name.Variable.Class */
|
||||
.dark .codehilite .vg { color: #89DDFF } /* Name.Variable.Global */
|
||||
.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */
|
||||
.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */
|
||||
.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */
|
||||
|
||||
"""
|
||||
645
toolbox.py
普通文件
645
toolbox.py
普通文件
@@ -0,0 +1,645 @@
|
||||
import markdown
|
||||
import importlib
|
||||
import traceback
|
||||
import inspect
|
||||
import re
|
||||
from latex2mathml.converter import convert as tex2mathml
|
||||
from functools import wraps, lru_cache
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
第一部分
|
||||
函数插件输入输出接驳区
|
||||
- ChatBotWithCookies: 带Cookies的Chatbot类,为实现更多强大的功能做基础
|
||||
- ArgsGeneralWrapper: 装饰器函数,用于重组输入参数,改变输入参数的顺序与结构
|
||||
- update_ui: 刷新界面用 yield from update_ui(chatbot, history)
|
||||
- CatchException: 将插件中出的所有问题显示在界面上
|
||||
- HotReload: 实现插件的热更新
|
||||
- trimmed_format_exc: 打印traceback,为了安全而隐藏绝对地址
|
||||
========================================================================
|
||||
"""
|
||||
|
||||
class ChatBotWithCookies(list):
|
||||
def __init__(self, cookie):
|
||||
self._cookies = cookie
|
||||
|
||||
def write_list(self, list):
|
||||
for t in list:
|
||||
self.append(t)
|
||||
|
||||
def get_list(self):
|
||||
return [t for t in self]
|
||||
|
||||
def get_cookies(self):
|
||||
return self._cookies
|
||||
|
||||
|
||||
def ArgsGeneralWrapper(f):
|
||||
"""
|
||||
装饰器函数,用于重组输入参数,改变输入参数的顺序与结构。
|
||||
"""
|
||||
def decorated(cookies, max_length, llm_model, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg, *args):
|
||||
txt_passon = txt
|
||||
if txt == "" and txt2 != "": txt_passon = txt2
|
||||
# 引入一个有cookie的chatbot
|
||||
cookies.update({
|
||||
'top_p':top_p,
|
||||
'temperature':temperature,
|
||||
})
|
||||
llm_kwargs = {
|
||||
'api_key': cookies['api_key'],
|
||||
'llm_model': llm_model,
|
||||
'top_p':top_p,
|
||||
'max_length': max_length,
|
||||
'temperature':temperature,
|
||||
}
|
||||
plugin_kwargs = {
|
||||
"advanced_arg": plugin_advanced_arg,
|
||||
}
|
||||
chatbot_with_cookie = ChatBotWithCookies(cookies)
|
||||
chatbot_with_cookie.write_list(chatbot)
|
||||
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
|
||||
return decorated
|
||||
|
||||
|
||||
def update_ui(chatbot, history, msg='正常', **kwargs): # 刷新界面
|
||||
"""
|
||||
刷新用户界面
|
||||
"""
|
||||
assert isinstance(chatbot, ChatBotWithCookies), "在传递chatbot的过程中不要将其丢弃。必要时,可用clear将其清空,然后用for+append循环重新赋值。"
|
||||
yield chatbot.get_cookies(), chatbot, history, msg
|
||||
|
||||
def trimmed_format_exc():
|
||||
import os, traceback
|
||||
str = traceback.format_exc()
|
||||
current_path = os.getcwd()
|
||||
replace_path = "."
|
||||
return str.replace(current_path, replace_path)
|
||||
|
||||
def CatchException(f):
|
||||
"""
|
||||
装饰器函数,捕捉函数f中的异常并封装到一个生成器中返回,并显示到聊天当中。
|
||||
"""
|
||||
|
||||
@wraps(f)
|
||||
def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT):
|
||||
try:
|
||||
yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)
|
||||
except Exception as e:
|
||||
from check_proxy import check_proxy
|
||||
from toolbox import get_conf
|
||||
proxies, = get_conf('proxies')
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
if chatbot is None or len(chatbot) == 0:
|
||||
chatbot = [["插件调度异常", "异常原因"]]
|
||||
chatbot[-1] = (chatbot[-1][0],
|
||||
f"[Local Message] 实验性函数调用出错: \n\n{tb_str} \n\n当前代理可用性: \n\n{check_proxy(proxies)}")
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg=f'异常 {e}') # 刷新界面
|
||||
return decorated
|
||||
|
||||
|
||||
def HotReload(f):
|
||||
"""
|
||||
HotReload的装饰器函数,用于实现Python函数插件的热更新。
|
||||
函数热更新是指在不停止程序运行的情况下,更新函数代码,从而达到实时更新功能。
|
||||
在装饰器内部,使用wraps(f)来保留函数的元信息,并定义了一个名为decorated的内部函数。
|
||||
内部函数通过使用importlib模块的reload函数和inspect模块的getmodule函数来重新加载并获取函数模块,
|
||||
然后通过getattr函数获取函数名,并在新模块中重新加载函数。
|
||||
最后,使用yield from语句返回重新加载过的函数,并在被装饰的函数上执行。
|
||||
最终,装饰器函数返回内部函数。这个内部函数可以将函数的原始定义更新为最新版本,并执行函数的新版本。
|
||||
"""
|
||||
@wraps(f)
|
||||
def decorated(*args, **kwargs):
|
||||
fn_name = f.__name__
|
||||
f_hot_reload = getattr(importlib.reload(inspect.getmodule(f)), fn_name)
|
||||
yield from f_hot_reload(*args, **kwargs)
|
||||
return decorated
|
||||
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
第二部分
|
||||
其他小工具:
|
||||
- write_results_to_file: 将结果写入markdown文件中
|
||||
- regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。
|
||||
- report_execption: 向chatbot中添加简单的意外错误信息
|
||||
- text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
||||
- markdown_convertion: 用多种方式组合,将markdown转化为好看的html
|
||||
- format_io: 接管gradio默认的markdown处理方式
|
||||
- on_file_uploaded: 处理文件的上传(自动解压)
|
||||
- on_report_generated: 将生成的报告自动投射到文件上传区
|
||||
- clip_history: 当历史上下文过长时,自动截断
|
||||
- get_conf: 获取设置
|
||||
- select_api_key: 根据当前的模型类别,抽取可用的api-key
|
||||
========================================================================
|
||||
"""
|
||||
|
||||
def get_reduce_token_percent(text):
|
||||
"""
|
||||
* 此函数未来将被弃用
|
||||
"""
|
||||
try:
|
||||
# text = "maximum context length is 4097 tokens. However, your messages resulted in 4870 tokens"
|
||||
pattern = r"(\d+)\s+tokens\b"
|
||||
match = re.findall(pattern, text)
|
||||
EXCEED_ALLO = 500 # 稍微留一点余地,否则在回复时会因余量太少出问题
|
||||
max_limit = float(match[0]) - EXCEED_ALLO
|
||||
current_tokens = float(match[1])
|
||||
ratio = max_limit/current_tokens
|
||||
assert ratio > 0 and ratio < 1
|
||||
return ratio, str(int(current_tokens-max_limit))
|
||||
except:
|
||||
return 0.5, '不详'
|
||||
|
||||
|
||||
def write_results_to_file(history, file_name=None):
|
||||
"""
|
||||
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
if file_name is None:
|
||||
# file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
|
||||
file_name = 'chatGPT分析报告' + \
|
||||
time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
|
||||
os.makedirs('./gpt_log/', exist_ok=True)
|
||||
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
|
||||
f.write('# chatGPT 分析报告\n')
|
||||
for i, content in enumerate(history):
|
||||
try: # 这个bug没找到触发条件,暂时先这样顶一下
|
||||
if type(content) != str:
|
||||
content = str(content)
|
||||
except:
|
||||
continue
|
||||
if i % 2 == 0:
|
||||
f.write('## ')
|
||||
f.write(content)
|
||||
f.write('\n\n')
|
||||
res = '以上材料已经被写入' + os.path.abspath(f'./gpt_log/{file_name}')
|
||||
print(res)
|
||||
return res
|
||||
|
||||
|
||||
def regular_txt_to_markdown(text):
|
||||
"""
|
||||
将普通文本转换为Markdown格式的文本。
|
||||
"""
|
||||
text = text.replace('\n', '\n\n')
|
||||
text = text.replace('\n\n\n', '\n\n')
|
||||
text = text.replace('\n\n\n', '\n\n')
|
||||
return text
|
||||
|
||||
|
||||
|
||||
|
||||
def report_execption(chatbot, history, a, b):
|
||||
"""
|
||||
向chatbot中添加错误信息
|
||||
"""
|
||||
chatbot.append((a, b))
|
||||
history.append(a)
|
||||
history.append(b)
|
||||
|
||||
|
||||
def text_divide_paragraph(text):
|
||||
"""
|
||||
将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
||||
"""
|
||||
if '```' in text:
|
||||
# careful input
|
||||
return text
|
||||
else:
|
||||
# wtf input
|
||||
lines = text.split("\n")
|
||||
for i, line in enumerate(lines):
|
||||
lines[i] = lines[i].replace(" ", " ")
|
||||
text = "</br>".join(lines)
|
||||
return text
|
||||
|
||||
|
||||
def markdown_convertion(txt):
|
||||
"""
|
||||
将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
|
||||
"""
|
||||
pre = '<div class="markdown-body">'
|
||||
suf = '</div>'
|
||||
markdown_extension_configs = {
|
||||
'mdx_math': {
|
||||
'enable_dollar_delimiter': True,
|
||||
'use_gitlab_delimiters': False,
|
||||
},
|
||||
}
|
||||
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
||||
|
||||
def tex2mathml_catch_exception(content, *args, **kwargs):
|
||||
try:
|
||||
content = tex2mathml(content, *args, **kwargs)
|
||||
except:
|
||||
content = content
|
||||
return content
|
||||
|
||||
def replace_math_no_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
content = content.replace('\n', '</br>')
|
||||
return f"<font color=\"#00FF00\">$$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$$</font>"
|
||||
else:
|
||||
return f"<font color=\"#00FF00\">$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$</font>"
|
||||
|
||||
def replace_math_render(match):
|
||||
content = match.group(1)
|
||||
if 'mode=display' in match.group(0):
|
||||
if '\\begin{aligned}' in content:
|
||||
content = content.replace('\\begin{aligned}', '\\begin{array}')
|
||||
content = content.replace('\\end{aligned}', '\\end{array}')
|
||||
content = content.replace('&', ' ')
|
||||
content = tex2mathml_catch_exception(content, display="block")
|
||||
return content
|
||||
else:
|
||||
return tex2mathml_catch_exception(content)
|
||||
|
||||
def markdown_bug_hunt(content):
|
||||
"""
|
||||
解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
|
||||
"""
|
||||
content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
|
||||
content = content.replace('</script>\n</script>', '</script>')
|
||||
return content
|
||||
|
||||
|
||||
if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识
|
||||
# convert everything to html format
|
||||
split = markdown.markdown(text='---')
|
||||
convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
|
||||
convert_stage_1 = markdown_bug_hunt(convert_stage_1)
|
||||
# re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
|
||||
# 1. convert to easy-to-copy tex (do not render math)
|
||||
convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
|
||||
# 2. convert to rendered equation
|
||||
convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
|
||||
# cat them together
|
||||
return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
|
||||
else:
|
||||
return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
|
||||
|
||||
|
||||
def close_up_code_segment_during_stream(gpt_reply):
|
||||
"""
|
||||
在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的```
|
||||
|
||||
Args:
|
||||
gpt_reply (str): GPT模型返回的回复字符串。
|
||||
|
||||
Returns:
|
||||
str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
|
||||
|
||||
"""
|
||||
if '```' not in gpt_reply:
|
||||
return gpt_reply
|
||||
if gpt_reply.endswith('```'):
|
||||
return gpt_reply
|
||||
|
||||
# 排除了以上两个情况,我们
|
||||
segments = gpt_reply.split('```')
|
||||
n_mark = len(segments) - 1
|
||||
if n_mark % 2 == 1:
|
||||
# print('输出代码片段中!')
|
||||
return gpt_reply+'\n```'
|
||||
else:
|
||||
return gpt_reply
|
||||
|
||||
|
||||
def format_io(self, y):
|
||||
"""
|
||||
将输入和输出解析为HTML格式。将y中最后一项的输入部分段落化,并将输出部分的Markdown和数学公式转换为HTML格式。
|
||||
"""
|
||||
if y is None or y == []:
|
||||
return []
|
||||
i_ask, gpt_reply = y[-1]
|
||||
i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
|
||||
gpt_reply = close_up_code_segment_during_stream(gpt_reply) # 当代码输出半截的时候,试着补上后个```
|
||||
y[-1] = (
|
||||
None if i_ask is None else markdown.markdown(i_ask, extensions=['fenced_code', 'tables']),
|
||||
None if gpt_reply is None else markdown_convertion(gpt_reply)
|
||||
)
|
||||
return y
|
||||
|
||||
|
||||
def find_free_port():
|
||||
"""
|
||||
返回当前系统中可用的未使用端口。
|
||||
"""
|
||||
import socket
|
||||
from contextlib import closing
|
||||
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
|
||||
s.bind(('', 0))
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
return s.getsockname()[1]
|
||||
|
||||
|
||||
def extract_archive(file_path, dest_dir):
|
||||
import zipfile
|
||||
import tarfile
|
||||
import os
|
||||
# Get the file extension of the input file
|
||||
file_extension = os.path.splitext(file_path)[1]
|
||||
|
||||
# Extract the archive based on its extension
|
||||
if file_extension == '.zip':
|
||||
with zipfile.ZipFile(file_path, 'r') as zipobj:
|
||||
zipobj.extractall(path=dest_dir)
|
||||
print("Successfully extracted zip archive to {}".format(dest_dir))
|
||||
|
||||
elif file_extension in ['.tar', '.gz', '.bz2']:
|
||||
with tarfile.open(file_path, 'r:*') as tarobj:
|
||||
tarobj.extractall(path=dest_dir)
|
||||
print("Successfully extracted tar archive to {}".format(dest_dir))
|
||||
|
||||
# 第三方库,需要预先pip install rarfile
|
||||
# 此外,Windows上还需要安装winrar软件,配置其Path环境变量,如"C:\Program Files\WinRAR"才可以
|
||||
elif file_extension == '.rar':
|
||||
try:
|
||||
import rarfile
|
||||
with rarfile.RarFile(file_path) as rf:
|
||||
rf.extractall(path=dest_dir)
|
||||
print("Successfully extracted rar archive to {}".format(dest_dir))
|
||||
except:
|
||||
print("Rar format requires additional dependencies to install")
|
||||
return '\n\n需要安装pip install rarfile来解压rar文件'
|
||||
|
||||
# 第三方库,需要预先pip install py7zr
|
||||
elif file_extension == '.7z':
|
||||
try:
|
||||
import py7zr
|
||||
with py7zr.SevenZipFile(file_path, mode='r') as f:
|
||||
f.extractall(path=dest_dir)
|
||||
print("Successfully extracted 7z archive to {}".format(dest_dir))
|
||||
except:
|
||||
print("7z format requires additional dependencies to install")
|
||||
return '\n\n需要安装pip install py7zr来解压7z文件'
|
||||
else:
|
||||
return ''
|
||||
return ''
|
||||
|
||||
|
||||
def find_recent_files(directory):
|
||||
"""
|
||||
me: find files that is created with in one minutes under a directory with python, write a function
|
||||
gpt: here it is!
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
current_time = time.time()
|
||||
one_minute_ago = current_time - 60
|
||||
recent_files = []
|
||||
|
||||
for filename in os.listdir(directory):
|
||||
file_path = os.path.join(directory, filename)
|
||||
if file_path.endswith('.log'):
|
||||
continue
|
||||
created_time = os.path.getmtime(file_path)
|
||||
if created_time >= one_minute_ago:
|
||||
if os.path.isdir(file_path):
|
||||
continue
|
||||
recent_files.append(file_path)
|
||||
|
||||
return recent_files
|
||||
|
||||
|
||||
def on_file_uploaded(files, chatbot, txt, txt2, checkboxes):
|
||||
"""
|
||||
当文件被上传时的回调函数
|
||||
"""
|
||||
if len(files) == 0:
|
||||
return chatbot, txt
|
||||
import shutil
|
||||
import os
|
||||
import time
|
||||
import glob
|
||||
from toolbox import extract_archive
|
||||
try:
|
||||
shutil.rmtree('./private_upload/')
|
||||
except:
|
||||
pass
|
||||
time_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||
os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
|
||||
err_msg = ''
|
||||
for file in files:
|
||||
file_origin_name = os.path.basename(file.orig_name)
|
||||
shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
|
||||
err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
|
||||
dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract')
|
||||
moved_files = [fp for fp in glob.glob('private_upload/**/*', recursive=True)]
|
||||
if "底部输入区" in checkboxes:
|
||||
txt = ""
|
||||
txt2 = f'private_upload/{time_tag}'
|
||||
else:
|
||||
txt = f'private_upload/{time_tag}'
|
||||
txt2 = ""
|
||||
moved_files_str = '\t\n\n'.join(moved_files)
|
||||
chatbot.append(['我上传了文件,请查收',
|
||||
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
||||
f'\n\n调用路径参数已自动修正到: \n\n{txt}' +
|
||||
f'\n\n现在您点击任意“红颜色”标识的函数插件时,以上文件将被作为输入参数'+err_msg])
|
||||
return chatbot, txt, txt2
|
||||
|
||||
|
||||
def on_report_generated(files, chatbot):
|
||||
from toolbox import find_recent_files
|
||||
report_files = find_recent_files('gpt_log')
|
||||
if len(report_files) == 0:
|
||||
return None, chatbot
|
||||
# files.extend(report_files)
|
||||
chatbot.append(['汇总报告如何远程获取?', '汇总报告已经添加到右侧“文件上传区”(可能处于折叠状态),请查收。'])
|
||||
return report_files, chatbot
|
||||
|
||||
def is_openai_api_key(key):
|
||||
API_MATCH = re.match(r"sk-[a-zA-Z0-9]{48}$", key)
|
||||
return bool(API_MATCH)
|
||||
|
||||
def is_api2d_key(key):
|
||||
if key.startswith('fk') and len(key) == 41:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def is_any_api_key(key):
|
||||
if ',' in key:
|
||||
keys = key.split(',')
|
||||
for k in keys:
|
||||
if is_any_api_key(k): return True
|
||||
return False
|
||||
else:
|
||||
return is_openai_api_key(key) or is_api2d_key(key)
|
||||
|
||||
def what_keys(keys):
|
||||
avail_key_list = {'OpenAI Key':0, "API2D Key":0}
|
||||
key_list = keys.split(',')
|
||||
|
||||
for k in key_list:
|
||||
if is_openai_api_key(k):
|
||||
avail_key_list['OpenAI Key'] += 1
|
||||
|
||||
for k in key_list:
|
||||
if is_api2d_key(k):
|
||||
avail_key_list['API2D Key'] += 1
|
||||
|
||||
return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']} 个,API2D Key {avail_key_list['API2D Key']} 个"
|
||||
|
||||
def select_api_key(keys, llm_model):
|
||||
import random
|
||||
avail_key_list = []
|
||||
key_list = keys.split(',')
|
||||
|
||||
if llm_model.startswith('gpt-'):
|
||||
for k in key_list:
|
||||
if is_openai_api_key(k): avail_key_list.append(k)
|
||||
|
||||
if llm_model.startswith('api2d-'):
|
||||
for k in key_list:
|
||||
if is_api2d_key(k): avail_key_list.append(k)
|
||||
|
||||
if len(avail_key_list) == 0:
|
||||
raise RuntimeError(f"您提供的api-key不满足要求,不包含任何可用于{llm_model}的api-key。您可能选择了错误的模型或请求源。")
|
||||
|
||||
api_key = random.choice(avail_key_list) # 随机负载均衡
|
||||
return api_key
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def read_single_conf_with_lru_cache(arg):
|
||||
from colorful import print亮红, print亮绿, print亮蓝
|
||||
try:
|
||||
r = getattr(importlib.import_module('config_private'), arg)
|
||||
except:
|
||||
r = getattr(importlib.import_module('config'), arg)
|
||||
# 在读取API_KEY时,检查一下是不是忘了改config
|
||||
if arg == 'API_KEY':
|
||||
print亮蓝(f"[API_KEY] 本项目现已支持OpenAI和API2D的api-key。也支持同时填写多个api-key,如API_KEY=\"openai-key1,openai-key2,api2d-key3\"")
|
||||
print亮蓝(f"[API_KEY] 您既可以在config.py中修改api-key(s),也可以在问题输入区输入临时的api-key(s),然后回车键提交后即可生效。")
|
||||
if is_any_api_key(r):
|
||||
print亮绿(f"[API_KEY] 您的 API_KEY 是: {r[:15]}*** API_KEY 导入成功")
|
||||
else:
|
||||
print亮红( "[API_KEY] 正确的 API_KEY 是'sk'开头的51位密钥(OpenAI),或者 'fk'开头的41位密钥,请在config文件中修改API密钥之后再运行。")
|
||||
if arg == 'proxies':
|
||||
if r is None:
|
||||
print亮红('[PROXY] 网络代理状态:未配置。无代理状态下很可能无法访问OpenAI家族的模型。建议:检查USE_PROXY选项是否修改。')
|
||||
else:
|
||||
print亮绿('[PROXY] 网络代理状态:已配置。配置信息如下:', r)
|
||||
assert isinstance(r, dict), 'proxies格式错误,请注意proxies选项的格式,不要遗漏括号。'
|
||||
return r
|
||||
|
||||
|
||||
def get_conf(*args):
|
||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||
res = []
|
||||
for arg in args:
|
||||
r = read_single_conf_with_lru_cache(arg)
|
||||
res.append(r)
|
||||
return res
|
||||
|
||||
|
||||
def clear_line_break(txt):
|
||||
txt = txt.replace('\n', ' ')
|
||||
txt = txt.replace(' ', ' ')
|
||||
txt = txt.replace(' ', ' ')
|
||||
return txt
|
||||
|
||||
|
||||
class DummyWith():
|
||||
"""
|
||||
这段代码定义了一个名为DummyWith的空上下文管理器,
|
||||
它的作用是……额……就是不起作用,即在代码结构不变得情况下取代其他的上下文管理器。
|
||||
上下文管理器是一种Python对象,用于与with语句一起使用,
|
||||
以确保一些资源在代码块执行期间得到正确的初始化和清理。
|
||||
上下文管理器必须实现两个方法,分别为 __enter__()和 __exit__()。
|
||||
在上下文执行开始的情况下,__enter__()方法会在代码块被执行前被调用,
|
||||
而在上下文执行结束时,__exit__()方法则会被调用。
|
||||
"""
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
return
|
||||
|
||||
def run_gradio_in_subpath(demo, auth, port, custom_path):
|
||||
"""
|
||||
把gradio的运行地址更改到指定的二次路径上
|
||||
"""
|
||||
def is_path_legal(path: str)->bool:
|
||||
'''
|
||||
check path for sub url
|
||||
path: path to check
|
||||
return value: do sub url wrap
|
||||
'''
|
||||
if path == "/": return True
|
||||
if len(path) == 0:
|
||||
print("ilegal custom path: {}\npath must not be empty\ndeploy on root url".format(path))
|
||||
return False
|
||||
if path[0] == '/':
|
||||
if path[1] != '/':
|
||||
print("deploy on sub-path {}".format(path))
|
||||
return True
|
||||
return False
|
||||
print("ilegal custom path: {}\npath should begin with \'/\'\ndeploy on root url".format(path))
|
||||
return False
|
||||
|
||||
if not is_path_legal(custom_path): raise RuntimeError('Ilegal custom path')
|
||||
import uvicorn
|
||||
import gradio as gr
|
||||
from fastapi import FastAPI
|
||||
app = FastAPI()
|
||||
if custom_path != "/":
|
||||
@app.get("/")
|
||||
def read_main():
|
||||
return {"message": f"Gradio is running at: {custom_path}"}
|
||||
app = gr.mount_gradio_app(app, demo, path=custom_path)
|
||||
uvicorn.run(app, host="0.0.0.0", port=port) # , auth=auth
|
||||
|
||||
|
||||
def clip_history(inputs, history, tokenizer, max_token_limit):
|
||||
"""
|
||||
reduce the length of history by clipping.
|
||||
this function search for the longest entries to clip, little by little,
|
||||
until the number of token of history is reduced under threshold.
|
||||
通过裁剪来缩短历史记录的长度。
|
||||
此函数逐渐地搜索最长的条目进行剪辑,
|
||||
直到历史记录的标记数量降低到阈值以下。
|
||||
"""
|
||||
import numpy as np
|
||||
from request_llm.bridge_all import model_info
|
||||
def get_token_num(txt):
|
||||
return len(tokenizer.encode(txt, disallowed_special=()))
|
||||
input_token_num = get_token_num(inputs)
|
||||
if input_token_num < max_token_limit * 3 / 4:
|
||||
# 当输入部分的token占比小于限制的3/4时,裁剪时
|
||||
# 1. 把input的余量留出来
|
||||
max_token_limit = max_token_limit - input_token_num
|
||||
# 2. 把输出用的余量留出来
|
||||
max_token_limit = max_token_limit - 128
|
||||
# 3. 如果余量太小了,直接清除历史
|
||||
if max_token_limit < 128:
|
||||
history = []
|
||||
return history
|
||||
else:
|
||||
# 当输入部分的token占比 > 限制的3/4时,直接清除历史
|
||||
history = []
|
||||
return history
|
||||
|
||||
everything = ['']
|
||||
everything.extend(history)
|
||||
n_token = get_token_num('\n'.join(everything))
|
||||
everything_token = [get_token_num(e) for e in everything]
|
||||
|
||||
# 截断时的颗粒度
|
||||
delta = max(everything_token) // 16
|
||||
|
||||
while n_token > max_token_limit:
|
||||
where = np.argmax(everything_token)
|
||||
encoded = tokenizer.encode(everything[where], disallowed_special=())
|
||||
clipped_encoded = encoded[:len(encoded)-delta]
|
||||
everything[where] = tokenizer.decode(clipped_encoded)[:-1] # -1 to remove the may-be illegal char
|
||||
everything_token[where] = get_token_num(everything[where])
|
||||
n_token = get_token_num('\n'.join(everything))
|
||||
|
||||
history = everything[1:]
|
||||
return history
|
||||
在新工单中引用
屏蔽一个用户