Compare commits

..

237 commits

Author SHA1 Message Date
Koi to Coco
a8ebe0b9be
fix: use vec! to allocate buffer #213 (#214)
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-06-29 16:18:25 +08:00
ssrlive
31b0972801 make rustc happy 2025-06-29 16:11:14 +08:00
B. Blechschmidt
9a6c96cf8b chore: publish v0.7.11 2025-06-19 19:16:24 +02:00
B. Blechschmidt
b5fbaa2d19 doc: fix Docker URL in README 2025-06-19 16:08:01 +02:00
B. Blechschmidt
3baa41a1fb fix: support multi-arch musl builds in Dockerfile 2025-06-19 16:05:54 +02:00
ssrlive
0cf4427ef6 setup_logging function 2025-06-19 20:15:47 +08:00
B. Blechschmidt
bc00dcc5ae fix: docker publish workflow 2025-06-19 13:47:48 +02:00
B. Blechschmidt
d87562b8d3 style: add pre-commit-config 2025-06-19 13:43:14 +02:00
B. Blechschmidt
fa09daabac fix: variable ref in publish-docker action 2025-06-19 13:42:57 +02:00
B. Blechschmidt
b36473ced9 feat(Docker): multi-stage Dockerfile with OS-less container 2025-06-19 13:32:01 +02:00
B. Blechschmidt
584bdc17ed feat(Linux): phase out reliance on iproute2 2025-06-17 01:10:25 +02:00
ssrlive
1880396822 use ctrlc2 async feature 2025-06-14 13:05:54 +08:00
Paper-Dragon
8b4ecabd8f
build image based on alpine/musl (#212) 2025-06-11 14:12:02 +08:00
B. Blechschmidt
fbc47a3001 fix(ci): account for change in load_dotenv 2025-06-11 00:21:03 +02:00
ssrlive
88d31ce168 Bump version 0.7.10 2025-06-03 14:05:28 +08:00
dependabot[bot]
ddebf5ee50
Update tun requirement from 0.7 to 0.8 (#209) 2025-06-03 12:02:31 +08:00
ssrlive
8cdb4f535d
Significant change in --setup parameter (#207) 2025-06-02 10:31:44 +08:00
ssrlive
6a5692cea0 refine code 2025-05-21 15:45:48 +08:00
ssrlive
3dc8f222cb Bump version 0.7.9 2025-05-08 10:26:00 +08:00
ssrlive
7c32b62727 Exclude dependabot[bot] in Integration Tests 2025-05-02 16:49:34 +08:00
dependabot[bot]
cf4a565f93
Update socks5-impl requirement from 0.6 to 0.7 (#201) 2025-05-01 08:30:43 +08:00
ssrlive
54f7dbc81b update nix deps 2025-04-30 10:59:48 +08:00
ssrlive
b71f479bf3 close-stale-issues.yml 2025-04-23 13:58:13 +08:00
ssrlive
2ead13a3f4 version_info & about_info 2025-04-22 14:58:52 +08:00
ssrlive
88423039c6 make TASK_COUNT as local task_count variable 2025-04-20 19:56:36 +08:00
ssrlive
7121a80300 Bump version 0.7.8 2025-04-19 17:50:56 +08:00
ssrlive
9e75475a23 force exit process while fatal error 2025-04-18 16:09:35 +08:00
ssrlive
7657f1603f Bump version 0.7.7 2025-03-28 20:23:47 +08:00
ssrlive
a380817951 update hickory-proto (DNS parser) 2025-03-19 08:36:29 +08:00
ssrlive
a2399c8b28 log ipstack info adjusted 2025-03-12 11:18:47 +08:00
ssrlive
61bbafcf82 version_info method 2025-03-11 12:41:57 +08:00
ssrlive
ca7cd25c4e Bump version 0.7.6 2025-03-07 14:15:14 +08:00
ssrlive
68716bdc9f update deps 2025-03-07 14:07:55 +08:00
ssrlive
e556f7657b Bump version 0.7.5 2025-02-27 14:56:14 +08:00
ssrlive
fd7dca9988 unsafe_in_unsafe issues 2025-02-27 14:40:09 +08:00
ssrlive
9a018f2393 update ipstack 2025-02-19 21:45:23 +08:00
ssrlive
c5d907551b ubuntu-20.04 used in publish script 2025-02-12 20:54:53 +08:00
ssrlive
6b038c2a80 Bump version 0.7.4 2025-02-12 18:09:37 +08:00
ssrlive
5287bef3c0 PI issues for macOS 2025-01-10 18:48:32 +08:00
ssrlive
04db15f553 Bump version 0.7.3 2025-01-07 21:15:44 +08:00
Ahmed Elsayed
f8c902b61c
use shlex instead of split whitespaces. (#179) 2025-01-07 21:03:25 +08:00
ssrlive
8ba2c1a2b7 Bump version 0.7.2 2025-01-03 15:30:41 +08:00
ssrlive
e939f5f3dc remove mod mobile 2025-01-03 15:22:14 +08:00
ssrlive
ecd1ab80bf base64 removed 2025-01-03 12:32:28 +08:00
Mostafa Kazemi
51de01854b
Fix typo in comment (#178) 2025-01-03 11:00:19 +08:00
ssrlive
bac54ec56c Bump version 0.7.1 2025-01-03 02:26:51 +08:00
ssrlive
6034870264 rename desktop_run_async to general_run_async 2025-01-03 02:05:50 +08:00
ssrlive
e933e5d4c0 iOS & Android testing suits 2025-01-03 00:41:29 +08:00
ssrlive
7136e2a20c refactor desktop_run_async 2025-01-02 23:44:57 +08:00
ssrlive
2a8e31225c refine clap::Parser 2025-01-02 17:08:19 +08:00
ssrlive
ea5ee834db Bump version 0.6.7 2024-12-27 17:15:48 +08:00
ssrlive
4d4a0ce85c minor changes 2024-12-26 20:38:13 +08:00
ssrlive
258637a52e upgrade dependencies 2024-12-17 11:35:58 +08:00
ssrlive
a01de17b36 minor changes 2024-11-26 13:32:39 +08:00
Paper-Dragon
724557b30e
docker-compose.yaml support (#166) 2024-11-26 13:12:04 +08:00
ssrlive
7a7293effd Refine code 2024-11-26 12:58:26 +08:00
ssrlive
46bf4434ef Bump version 0.6.6 2024-11-26 12:28:03 +08:00
ssrlive
d37cb44b62 Fix #165 2024-11-26 12:17:16 +08:00
ssrlive
987635d3dc Contributors in README 2024-11-24 15:49:10 +08:00
ssrlive
ebd3128778 Bump version 0.6.5 2024-11-21 14:15:37 +08:00
ssrlive
ee4df8f97b cbindgen issues 2024-11-17 17:54:30 +08:00
ssrlive
7314906841 mask_socket_addr function 2024-11-11 15:11:06 +08:00
ssrlive
23d4e59367 minor changes 2024-11-11 11:51:28 +08:00
ssrlive
28d54be638 Bump version 0.6.4 2024-11-09 18:30:56 +08:00
dependabot[bot]
8c98d1dc74
Update thiserror requirement from 1 to 2 (#162) 2024-11-07 10:37:46 +08:00
ssrlive
1a508918a2
Auto merge script 2024-11-05 17:31:33 +08:00
ssrlive
c2382ee29b minor changes 2024-11-04 22:08:21 +08:00
ssrlive
21355e37da Bump version 0.6.3 2024-11-03 10:36:07 +08:00
ssrlive
e8143a691b remove useless is_in_heartbeat in udpgw 2024-11-02 17:16:54 +08:00
ssrlive
53f60ffda6 readme on udpgw 2024-11-02 13:55:47 +08:00
ssrlive
9088cf6fe5 minor changes 2024-11-02 07:25:46 +08:00
ssrlive
d7e3913450 Bump version 0.6.2 2024-11-01 15:08:49 +08:00
ssrlive
52d814ce79 refine udpgw 2024-11-01 15:02:18 +08:00
ssrlive
b4142453fd Bump version 0.6.1 2024-10-30 19:10:04 +08:00
ssrlive
0aad0d1709 refactor udpgw 2024-10-30 19:00:28 +08:00
ssrlive
3fb02f0fc7 switch to tun crate instead of tun2 2024-10-28 14:03:35 +08:00
ssrlive
b9cf06da33 refine code 2024-10-27 15:27:50 +08:00
ssrlive
2ade72e79d publish version 0.6.0 2024-10-26 11:04:26 +08:00
ssrlive
e3cc5ea1ce fix daemonize issues 2024-10-26 09:51:00 +08:00
sujiacong
b6bb9bedfc
support udp gateway mode (#155) 2024-10-26 02:15:52 +08:00
ssrlive
f823202b33 Bump version 0.5.4 2024-10-10 00:35:30 +08:00
ssrlive
9aa2afb0fd Fix daemonize issues 2024-10-09 23:54:25 +08:00
ssrlive
918e6137ab Bump version 0.5.3 2024-10-09 17:00:40 +08:00
ssrlive
d093973160 refine ctrl-c logic 2024-10-09 16:58:37 +08:00
ssrlive
4ef71a5b4c --max-sessions option 2024-10-09 16:57:14 +08:00
ssrlive
b03032b8cd Bump version 0.5.2 2024-10-07 13:34:48 +08:00
ssrlive
c991006f4c --exit-on-fatal-error option 2024-10-07 13:29:32 +08:00
ssrlive
fe32a65291 Bump version 0.5.1 2024-10-03 08:50:51 +08:00
ssrlive
93e15e0a8b build x86_64-win7-windows-msvc target 2024-10-03 08:50:23 +08:00
ssrlive
b74aeab182 target armv7-unknown-linux-musleabi 2024-09-30 17:12:46 +08:00
ssrlive
c9b24a865c minor changes 2024-09-29 18:17:45 +08:00
ssrlive
2396d769d2 Bump version 0.5.0 2024-09-26 12:29:56 +08:00
ssrlive
b24d48a042 testing python script issues 2024-09-26 11:51:52 +08:00
ssrlive
6c8ae7a33f rename target from 'tun2proxy' to 'tun2proxy-bin'
make rust compiler happy
2024-09-26 10:54:54 +08:00
ssrlive
77d651dc70 minor changes 2024-09-26 10:06:52 +08:00
ssrlive
febd654f35 CI testing scripts 2024-09-23 16:01:34 +08:00
ssrlive
143f203fde
Bump version 0.4.5 2024-09-16 17:18:14 +08:00
Paper-Dragon
a5bc8f49b4
multi-arch build docker images (#141) 2024-09-16 17:10:56 +08:00
ssrlive
1ccba18273 Bump version 0.4.4 2024-09-14 22:12:10 +08:00
ssrlive
607d709c03 Apply daemonize for unix 2024-09-14 22:02:05 +08:00
ssrlive
e817257866 refine code 2024-09-14 21:38:15 +08:00
ssrlive
c583e884b5 Bump version 0.4.3 2024-09-14 16:10:33 +08:00
ssrlive
1e6c6f4f66 Fix #144 2024-09-14 16:08:54 +08:00
ssrlive
c167f45a5e Bump version 0.4.2 2024-09-14 10:11:25 +08:00
ssrlive
02b15951b6 update Semver checking script 2024-09-14 10:05:47 +08:00
ssrlive
6dadc1504a Support windows service, fix #143 2024-09-14 09:55:27 +08:00
ssrlive
187e251142 Bump version 0.4.1 2024-09-02 23:22:11 +08:00
ssrlive
15646925a7 issues of parameter constraint for 'tun' 2024-08-29 10:01:58 +08:00
ssrlive
beb3d364a8 fix windows issues 2024-08-29 09:23:33 +08:00
B. Blechschmidt
8334acd085 Update version 2024-08-28 23:17:59 +02:00
B. Blechschmidt
1e7f649192 Remove unused import with updated dependencies 2024-08-28 23:12:21 +02:00
B. Blechschmidt
8c28f2e000 Implement --virtual-dns-pool 2024-08-28 23:06:37 +02:00
ssrlive
3f76ccec97 Apply mimalloc to iOS only 2024-08-03 18:21:02 +08:00
ssrlive
f787ff6d23 rust toolchain version issues 2024-08-02 10:35:05 +08:00
ssrlive
1dd6746bbc mimalloc usage 2024-08-02 10:18:11 +08:00
ssrlive
6567b6bc00 LazyLock usage 2024-07-27 00:08:07 +08:00
ssrlive
016aaa6128 Bump version 0.3.1 2024-07-24 21:11:23 +08:00
ssrlive
824b443d2b Bump version 0.3.0 2024-07-22 09:32:02 +08:00
ssrlive
06ed994655 Check semver script 2024-07-20 12:55:01 +08:00
ssrlive
e879599e6b close_fd_on_drop issues 2024-07-20 12:28:52 +08:00
ssrlive
0ca92dcdc2 minor changes 2024-07-18 19:35:14 +08:00
BlackbirdBop
635c7e557f
Make close_fd_on_drop configurable (#132) 2024-07-18 19:01:11 +08:00
ssrlive
15fe95a2c6 Bump version 0.2.24 2024-07-16 11:50:58 +08:00
ssrlive
d5a404fda7 update deps 2024-07-08 18:35:18 +08:00
ssrlive
3b2adf92cb update deps 2024-06-30 16:06:43 +08:00
ssrlive
1ba8f8b167 Update deps 2024-06-28 13:46:25 +08:00
James Brown
48f527ad81
Update lib.rs (#130)
Fix #129
2024-06-28 01:35:49 +08:00
B. Blechschmidt
060ca5740f Format build provenance attestation section 2024-06-23 21:30:43 +02:00
B. Blechschmidt
bb1a1fe286 Add build provenance attestation hint 2024-06-23 21:27:59 +02:00
B. Blechschmidt
d8d40b09de Bump version 0.2.23 2024-06-23 21:07:33 +02:00
B. Blechschmidt
ea0c10a5c1 Add more comments to virtual DNS implementation 2024-06-23 20:37:26 +02:00
B. Blechschmidt
01ba8f382f Virtual DNS: Move name canonicalization into find_or_allocate_ip 2024-06-23 20:31:08 +02:00
B. Blechschmidt
b525d3f99e Virtual DNS: Do not add trailing dot 2024-06-22 16:39:25 +02:00
ssrlive
b8c22db037 build-android.sh 2024-06-18 17:44:06 +08:00
dependabot[bot]
dbf960884d
Update android_logger requirement from 0.13 to 0.14 (#122) 2024-06-17 11:45:39 +08:00
B. Blechschmidt
b0432c7659 Re-add tested build provenance attestations 2024-06-15 16:30:46 +02:00
ssrlive
628e6cba84 Bump version 0.2.22 2024-06-14 15:11:28 +08:00
ssrlive
203cfba302 update deps 2024-06-14 14:59:08 +08:00
ssrlive
9d9c152b54
Bump version 0.2.21 2024-06-11 19:08:51 +08:00
ssrlive
3b5f526728 traffic status logic 2024-06-11 19:03:07 +08:00
ssrlive
1789259f6f Implementation of traffic status callback 2024-06-11 17:25:13 +08:00
Birk Blechschmidt
4243057fbf
Merge pull request #119 from koitococo/master
Write unshare pid into file for scripting purposes
2024-06-05 18:11:18 +02:00
Koi to Coco
07ffbe057c
Write unshare pid into file for scripting purposes 2024-06-05 04:22:53 +00:00
B. Blechschmidt
4554d3bc55 Build provenance attestation 2024-06-01 02:00:45 +02:00
B. Blechschmidt
a082a6f45b Enable build verification 2024-05-31 23:14:55 +02:00
ssrlive
4b0ca087eb Don't play with fire 2024-05-30 20:06:15 +08:00
ssrlive
1023f00d12 Bump version 0.2.20 2024-05-26 10:56:40 +08:00
ssrlive
30a54329e4 Fix #114 2024-05-26 10:52:29 +08:00
ssrlive
e604dec01c fix nix error cos upgrade 2024-05-26 09:34:49 +08:00
ssrlive
d062b1b66a Fix #115 2024-05-23 21:27:42 +08:00
ssrlive
e6360d83a7 refine code 2024-05-16 13:30:10 +08:00
ssrlive
588364d060
Add files via upload 2024-05-16 13:18:02 +08:00
ssrlive
3202e7bbd2
Bump version 0.2.19 2024-05-09 18:26:53 +08:00
ssrlive
3980b985f2 warnings removed 2024-05-05 17:02:58 +08:00
ssrlive
64dd43c6f3 cbindgen issues in publish script 2024-04-29 11:40:39 +08:00
ssrlive
0f241325ad Bump version 0.2.18 2024-04-29 11:13:45 +08:00
ssrlive
5e32994f91 unhandled transport 2024-04-23 19:54:19 +08:00
ssrlive
04a0555101 test scripts issues 2024-04-23 13:22:27 +08:00
ssrlive
a9ef8f658b Android build script 2024-04-21 04:35:37 +00:00
ssrlive
8438eddc95 The bypass value is IP/CIDR now 2024-04-19 22:17:36 +08:00
B. Blechschmidt
c36c4ecf1b Add CI workflow for functional tests 2024-04-18 23:24:24 +02:00
B. Blechschmidt
03f98a0741 Update tproxy-config dependency 2024-04-18 22:39:07 +02:00
B. Blechschmidt
8aa2a66942 Add functional tests 2024-04-18 22:20:05 +02:00
B. Blechschmidt
f418ca4fe7 Fix over-tcp DNS for HTTP proxies 2024-04-18 21:45:09 +02:00
ssrlive
d5d847fa92 Bump version 0.2.17 2024-04-14 19:18:18 +08:00
B. Blechschmidt
09994d43cc Fix routing issues described in #104 2024-04-13 17:16:30 +02:00
ssrlive
2df59ae596 UDP read time out case 2024-04-10 11:26:34 +08:00
ssrlive
7bee2e0968 TryFrom for ArgProxy 2024-04-09 14:24:22 +08:00
ssrlive
58364580f5 TryFrom for ProxyType 2024-04-09 12:57:18 +08:00
ssrlive
18f4689d21 refine code 2024-04-08 20:49:44 +08:00
ssrlive
ba1615fcd1 minor changes 2024-04-08 19:23:13 +08:00
ssrlive
92011edd43 use percent-encoding instead of url-escape 2024-04-08 18:37:56 +08:00
ssrlive
84c03426f2
Bump version 0.2.16 2024-04-08 15:39:20 +08:00
ssrlive
e582d6cbec Fix #107 2024-04-08 15:28:56 +08:00
ssrlive
c1d93942cc async-recursion removed 2024-04-08 14:24:41 +08:00
ssrlive
18044a8056 change Apple building description 2024-04-08 13:45:42 +08:00
B. Blechschmidt
ebbe939f85 Use destructor to restore network config 2024-04-07 23:08:32 +02:00
Birk Blechschmidt
0239a225a1
Merge support for unprivileged namespaces on Linux
Add support for unprivileged namespaces on Linux
(pull request #104 from one-d-wide/namespaces)
2024-04-07 21:52:21 +02:00
B. Blechschmidt
40368dd232 Increase security and portability through the use of /proc/self/exe 2024-04-07 21:47:15 +02:00
B. Blechschmidt
4f5a128972 Update README 2024-04-07 21:33:04 +02:00
B. Blechschmidt
e8469f0aee Restrict namespace arguments to Linux 2024-04-07 21:33:04 +02:00
ssrlive
af6a8a3cb0 minor changes 2024-04-07 21:33:04 +02:00
Remy D. Farley
f9f5401ba4 fix socks5 udp connectivity 2024-04-07 21:33:04 +02:00
ssrlive
56be614334 Args class 2024-04-07 21:33:04 +02:00
ssrlive
181497e709 remove useless get_server_addr 2024-04-07 21:33:04 +02:00
B. Blechschmidt
a08b3338c3 Apply clippy suggestion 2024-04-07 21:33:04 +02:00
Remy D. Farley
d351b5031c add support for unprivileged namespaces 2024-04-07 21:32:58 +02:00
ssrlive
050f8c0e65 minor changes 2024-04-04 20:55:50 +08:00
Remy D. Farley
5e99c9f874 add no-proxy mode 2024-04-03 20:58:15 +00:00
Remy D. Farley
361cf95f4e add udp timeout option 2024-04-03 20:58:15 +00:00
Remy D. Farley
74e5220d08 ci: don't abort checks immediately if error is encountered 2024-04-03 20:58:07 +00:00
Remy D. Farley
b7e59b130e ci: don't abort checks immediately if error is encountered 2024-04-03 22:51:58 +02:00
ssrlive
ce0c02b3bf Bump version 0.2.15 2024-03-30 12:30:01 +08:00
ssrlive
4adc38c726 Bump version 0.2.14 2024-03-28 17:03:36 +08:00
ssrlive
eab795e61c build-apple-debug.sh 2024-03-24 17:20:51 +08:00
ssrlive
715a85920c update deps 2024-03-19 18:31:04 +08:00
ssrlive
c430d76534 tcp timeout option 2024-03-18 13:12:30 +08:00
ssrlive
3fe47d92ec update deps 2024-03-15 16:52:55 +08:00
ssrlive
c9272609b8 building script 2024-03-14 20:28:33 +08:00
ssrlive
3a156f5837
Bump version 0.2.13 2024-03-13 14:17:27 +08:00
ssrlive
9841987031 fix #101 2024-03-13 13:07:32 +08:00
ssrlive
bd96807bf8 minor changes 2024-03-11 08:57:18 +08:00
ssrlive
7cb251c190 refine TUN_QUIT 2024-03-06 18:01:02 +08:00
ssrlive
989406d00c script issues 2024-03-06 17:08:28 +08:00
ssrlive
d3e77e6c17
Bump version 0.2.12 2024-03-06 12:22:41 +08:00
ssrlive
fb7b6862e5
tst (#99) 2024-03-05 12:33:47 +08:00
ssrlive
7e7aadb04b
tun shutdown issues. (#97) 2024-03-03 19:49:10 +08:00
Ebrahim Tahernejad
4ab6f1a9bc
XCFramework build for apple (#93) 2024-02-29 11:38:44 +08:00
B. Blechschmidt
01a0d9164d Fix banner bug with HTTP proxies 2024-02-26 01:24:22 +01:00
B. Blechschmidt
b3314f5abc Properly close streams 2024-02-26 00:50:23 +01:00
ssrlive
ee63dc1559 Bump version 0.2.11 2024-02-25 22:24:55 +08:00
ssrlive
3628533c8b update deps 2024-02-25 22:20:54 +08:00
B. Blechschmidt
a52dccd827 Update to v0.2.10 2024-02-25 13:49:15 +01:00
B. Blechschmidt
444e72689c Add .idea to gitignore 2024-02-25 13:47:38 +01:00
B. Blechschmidt
12efc5f392 Update tproxy-config to 2.0 2024-02-25 13:34:56 +01:00
B. Blechschmidt
1d49ec87ad Update Desktop API to use returned state 2024-02-25 13:13:23 +01:00
ssrlive
5c228ca07e tun_name issues 2024-02-25 16:59:18 +08:00
B. Blechschmidt
1b859a5374 Re-add Dockerfile and Docker build workflow 2024-02-25 01:09:22 +01:00
B. Blechschmidt
a5db99b03b Change order of operations to support auto setup 2024-02-25 00:45:42 +01:00
B. Blechschmidt
498a43b471 Do not swallow error when not tracing 2024-02-24 23:38:52 +01:00
B. Blechschmidt
d03e3c268d Improve error message 2024-02-24 22:25:22 +01:00
B. Blechschmidt
91fcd07733 Do not terminate when UDP is not supported by proxy
When an HTTP proxy is used, `new_proxy_handler` can result in an error
when a UDP packet is processed. Without this commit, this results in the
termination of tun2proxy.
2024-02-24 22:09:14 +01:00
ssrlive
bd27833c29 print error info 2024-02-24 20:40:39 +08:00
ssrlive
cfbc5fabb1 print error info 2024-02-24 19:24:51 +08:00
ssrlive
b11e49b455 wiki link 2024-02-24 14:22:37 +08:00
ssrlive
129450a9db setup parameter issues 2024-02-22 18:23:48 +08:00
ssrlive
977c3ce518
Bump version 0.2.9 2024-02-13 10:51:13 +08:00
ssrlive
a1083273ee
Ffi2 (#90) 2024-02-13 10:46:13 +08:00
ssrlive
a26621bbcd
Ffi (#89) 2024-02-12 21:36:18 +08:00
ssrlive
e9c378099e
Bump version 0.2.8 2024-02-11 18:41:11 +08:00
ssrlive
9f60eee2e1 ArgProxy issues 2024-02-11 12:36:36 +08:00
ssrlive
5514da71f9 refine TUN_QUIT 2024-02-11 01:53:20 +08:00
ssrlive
a317a3fc9e
Bump version 0.2.7 2024-02-11 01:07:03 +08:00
Andrej Mihajlov
2a9775ce2e
Introduce cancellation token and reduce amount of code (#88)
Test passed on Android. Thanks a lot.
2024-02-11 00:36:54 +08:00
ssrlive
2434c62524 Bump version 0.2.6 2024-02-10 20:21:28 +08:00
Andrej Mihajlov
9a4bd9f800
Re-export the missing types that are used within Args (#87) 2024-02-10 19:59:48 +08:00
ssrlive
ea5ffff82c Bump version 0.2.5 2024-02-10 14:45:44 +08:00
51 changed files with 3720 additions and 1080 deletions

1
.dockerignore Symbolic link
View file

@ -0,0 +1 @@
.gitignore

6
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,6 @@
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "daily"

20
.github/workflows/auto-merge.yaml vendored Normal file
View file

@ -0,0 +1,20 @@
name: Dependabot Auto Merge
on:
pull_request_target:
types: [labeled]
jobs:
auto:
if: github.actor == 'dependabot[bot]'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Auto approve pull request, then squash and merge
uses: ahmadnassri/action-dependabot-auto-merge@v2
with:
# target: minor
# here `PAT_REPO_ADMIN` is a user's passkey provided by github.
github-token: ${{ secrets.PAT_REPO_ADMIN }}

View file

@ -0,0 +1,26 @@
name: Close stale issues and PRs
on:
schedule:
- cron: "0 0 * * *" # run a cron job every day at midnight
jobs:
stale:
runs-on: ubuntu-latest
steps:
- name: Close stale issues and PRs
uses: actions/stale@v9
with:
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
stale-pr-message: 'This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.'
close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.'
days-before-issue-stale: 30
days-before-pr-stale: 45
days-before-issue-close: 5
days-before-pr-close: 10
stale-issue-label: 'no-issue-activity'
exempt-issue-labels: 'keep-open,awaiting-approval,work-in-progress'
stale-pr-label: 'no-pr-activity'
exempt-pr-labels: 'awaiting-approval,work-in-progress'
# only-labels: 'awaiting-feedback,awaiting-answers'

72
.github/workflows/publish-docker.yml vendored Normal file
View file

@ -0,0 +1,72 @@
name: Publish Docker Images
on:
push:
tags: [ 'v*.*.*' ]
# Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds.
env:
REGISTRY: ghcr.io
# This also contains the owner, i.e. tun2proxy/tun2proxy.
IMAGE_PATH: ${{ github.repository }}
IMAGE_NAME: ${{ github.event.repository.name }}
DEFAULT_OS: scratch
# There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu.
jobs:
build-and-push-image:
name: Build and push Docker image
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
os: [ 'scratch', 'ubuntu', 'alpine' ]
# Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
permissions:
contents: read
packages: write
#
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Add support for more platforms with QEMU (optional)
# https://github.com/docker/setup-qemu-action
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
- name: Extract metadata (tags, labels) for Docker Image
id: meta
uses: docker/metadata-action@v5
with:
# We publish the images with an OS-suffix.
# The image based on a default OS is also published without a suffix.
images: |
${{ env.REGISTRY }}/${{ env.IMAGE_PATH }}-${{ matrix.os }}
${{ env.DEFAULT_OS == matrix.os && format('{0}/{1}', env.REGISTRY, env.IMAGE_PATH) || '' }}
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
- name: Build and push Docker image
uses: docker/build-push-action@v6
with:
platforms: linux/amd64,linux/arm64
context: .
file: Dockerfile
target: ${{ env.IMAGE_NAME }}-${{ matrix.os }}
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View file

@ -1,4 +1,5 @@
on:
workflow_dispatch:
push:
tags:
- "v*.*.*"
@ -8,29 +9,42 @@ name: Publish Releases
jobs:
build_publish:
name: Publishing Tasks
permissions:
contents: write
id-token: write
attestations: write
strategy:
fail-fast: false
matrix:
target:
- x86_64-unknown-linux-gnu
- x86_64-unknown-linux-musl
- i686-unknown-linux-musl
- aarch64-unknown-linux-gnu
- armv7-unknown-linux-gnueabihf
- armv7-unknown-linux-musleabi
- armv7-unknown-linux-musleabihf
- x86_64-apple-darwin
- aarch64-apple-darwin
- x86_64-pc-windows-msvc
- i686-pc-windows-msvc
- aarch64-pc-windows-msvc
- x86_64-win7-windows-msvc
- i686-win7-windows-msvc
include:
- target: x86_64-unknown-linux-gnu
host_os: ubuntu-latest
host_os: ubuntu-22.04
- target: x86_64-unknown-linux-musl
host_os: ubuntu-latest
- target: i686-unknown-linux-musl
host_os: ubuntu-latest
- target: aarch64-unknown-linux-gnu
host_os: ubuntu-latest
- target: armv7-unknown-linux-gnueabihf
- target: armv7-unknown-linux-musleabi
host_os: ubuntu-latest
- target: armv7-unknown-linux-musleabihf
host_os: ubuntu-latest
- target: x86_64-apple-darwin
host_os: macos-latest
@ -40,42 +54,87 @@ jobs:
host_os: windows-latest
- target: i686-pc-windows-msvc
host_os: windows-latest
- target: aarch64-pc-windows-msvc
host_os: windows-latest
- target: x86_64-win7-windows-msvc
host_os: windows-latest
- target: i686-win7-windows-msvc
host_os: windows-latest
runs-on: ${{ matrix.host_os }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Prepare
shell: bash
run: |
mkdir mypubdir4
rustup target add ${{ matrix.target }}
if [[ "${{ matrix.host_os }}" == "ubuntu-latest" ]]; then
if [[ "${{ matrix.target }}" != "x86_64-win7-windows-msvc" && "${{ matrix.target }}" != "i686-win7-windows-msvc" ]]; then
rustup target add ${{ matrix.target }}
fi
cargo install cbindgen
if [[ "${{ contains(matrix.host_os, 'ubuntu') }}" == "true" && "${{ matrix.host_os }}" != "ubuntu-22.04" ]]; then
sudo .github/workflows/install-cross.sh
fi
- name: Build
if: ${{ !cancelled() }}
shell: bash
run: |
if [[ "${{ matrix.host_os }}" == "ubuntu-latest" ]]; then
if [[ "${{ contains(matrix.host_os, 'ubuntu') }}" == "true" && "${{ matrix.host_os }}" != "ubuntu-22.04" ]]; then
cross build --all-features --release --target ${{ matrix.target }}
else
cargo build --all-features --release --target ${{ matrix.target }}
if [[ "${{ matrix.target }}" == "x86_64-win7-windows-msvc" || "${{ matrix.target }}" == "i686-win7-windows-msvc" ]]; then
rustup toolchain install nightly
rustup component add rust-src --toolchain nightly
cargo +nightly build --release -Z build-std --target ${{ matrix.target }}
else
cargo build --all-features --release --target ${{ matrix.target }}
fi
fi
cbindgen --config cbindgen.toml -o target/tun2proxy.h
if [[ "${{ matrix.host_os }}" == "windows-latest" ]]; then
powershell -Command "(Get-Item README.md).LastWriteTime = Get-Date"
powershell -Command "(Get-Item target/${{ matrix.target }}/release/wintun.dll).LastWriteTime = Get-Date"
powershell Compress-Archive -Path target/${{ matrix.target }}/release/tun2proxy.exe, README.md, target/${{ matrix.target }}/release/wintun.dll -DestinationPath mypubdir4/tun2proxy-${{ matrix.target }}.zip
powershell Compress-Archive -Path target/${{ matrix.target }}/release/tun2proxy-bin.exe, target/${{ matrix.target }}/release/udpgw-server.exe, README.md, target/tun2proxy.h, target/${{ matrix.target }}/release/tun2proxy.dll, target/${{ matrix.target }}/release/wintun.dll -DestinationPath mypubdir4/tun2proxy-${{ matrix.target }}.zip
elif [[ "${{ matrix.host_os }}" == "macos-latest" ]]; then
zip -j mypubdir4/tun2proxy-${{ matrix.target }}.zip target/${{ matrix.target }}/release/tun2proxy README.md
elif [[ "${{ matrix.host_os }}" == "ubuntu-latest" ]]; then
zip -j mypubdir4/tun2proxy-${{ matrix.target }}.zip target/${{ matrix.target }}/release/tun2proxy README.md
zip -j mypubdir4/tun2proxy-${{ matrix.target }}.zip target/${{ matrix.target }}/release/tun2proxy-bin target/${{ matrix.target }}/release/udpgw-server README.md target/tun2proxy.h target/${{ matrix.target }}/release/libtun2proxy.dylib
if [[ "${{ matrix.target }}" == "x86_64-apple-darwin" ]]; then
./build-aarch64-apple-ios.sh
zip -r mypubdir4/tun2proxy-aarch64-apple-ios-xcframework.zip ./tun2proxy.xcframework/
./build-apple.sh
zip -r mypubdir4/tun2proxy-apple-xcframework.zip ./tun2proxy.xcframework/
fi
elif [[ "${{ contains(matrix.host_os, 'ubuntu') }}" == "true" ]]; then
zip -j mypubdir4/tun2proxy-${{ matrix.target }}.zip target/${{ matrix.target }}/release/tun2proxy-bin target/${{ matrix.target }}/release/udpgw-server README.md target/tun2proxy.h target/${{ matrix.target }}/release/libtun2proxy.so
if [[ "${{ matrix.target }}" == "x86_64-unknown-linux-gnu" ]]; then
./build-android.sh
cp ./tun2proxy-android-libs.zip ./mypubdir4/
fi
fi
- name: Upload artifacts
if: ${{ !cancelled() }}
uses: actions/upload-artifact@v4
with:
name: bin-${{ matrix.target }}
path: mypubdir4/*
- name: Generate artifact attestation
if: ${{ !cancelled() }}
uses: actions/attest-build-provenance@v1
with:
subject-path: mypubdir4/*
- name: Publish
if: ${{ !cancelled() }}
uses: softprops/action-gh-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: mypubdir4/*
- name: Abort on error
if: ${{ failure() }}
run: echo "Some of jobs failed" && false

View file

@ -1,7 +1,15 @@
name: Push or PR
on:
[push, pull_request]
workflow_dispatch:
push:
branches:
- '**'
pull_request:
branches:
- '**'
schedule:
- cron: '0 0 * * 0' # Every Sunday at midnight UTC
env:
CARGO_TERM_COLOR: always
@ -9,18 +17,99 @@ env:
jobs:
build_n_test:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: rustfmt
if: ${{ !cancelled() }}
run: cargo fmt --all -- --check
- name: check
if: ${{ !cancelled() }}
run: cargo check --verbose
- name: clippy
if: ${{ !cancelled() }}
run: cargo clippy --all-targets --all-features -- -D warnings
- name: Build
run: cargo build --verbose --tests --all-features
if: ${{ !cancelled() }}
run: |
cargo build --verbose --tests --all-features
cargo clean
cargo build --verbose
- name: Abort on error
if: ${{ failure() }}
run: echo "Some of jobs failed" && false
build_n_test_android:
strategy:
fail-fast: false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install cargo ndk and rust compiler for android target
if: ${{ !cancelled() }}
run: |
cargo install --locked cargo-ndk
rustup target add x86_64-linux-android
- name: clippy
if: ${{ !cancelled() }}
run: cargo ndk -t x86_64 clippy --all-features -- -D warnings
- name: Build
if: ${{ !cancelled() }}
run: |
cargo ndk -t x86_64 rustc --verbose --all-features --lib --crate-type=cdylib
- name: Abort on error
if: ${{ failure() }}
run: echo "Android build job failed" && false
build_n_test_ios:
strategy:
fail-fast: false
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install cargo lipo and rust compiler for ios target
if: ${{ !cancelled() }}
run: |
cargo install --locked cargo-lipo
rustup target add x86_64-apple-ios aarch64-apple-ios
- name: clippy
if: ${{ !cancelled() }}
run: cargo clippy --target x86_64-apple-ios --all-features -- -D warnings
- name: Build
if: ${{ !cancelled() }}
run: |
cargo lipo --verbose --all-features
- name: Abort on error
if: ${{ failure() }}
run: echo "iOS build job failed" && false
semver:
name: Check semver
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Check semver
if: ${{ !cancelled() }}
uses: obi1kenobi/cargo-semver-checks-action@v2
- name: Abort on error
if: ${{ failure() }}
run: echo "Semver check failed" && false

45
.github/workflows/tests.yml vendored Normal file
View file

@ -0,0 +1,45 @@
on:
pull_request_review:
types: [submitted]
push:
workflow_dispatch:
pull_request_target:
types: [labeled]
name: Integration Tests
jobs:
proxy_tests:
name: Proxy Tests
runs-on: ubuntu-latest
if: (github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'safe to test')) && github.actor != 'dependabot[bot]' && github.actor != 'github-actions[bot]'
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Populate .env
env:
DOTENV: ${{ secrets.DOTENV }}
run: |
echo "$DOTENV" > tests/.env
ln -s tests/.env
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Create virtual environment
run: python -m venv venv
- name: Activate virtual environment and install dependencies
run: |
source venv/bin/activate
pip install -r tests/requirements.txt
- name: Build project
run: cargo build --release
- name: Run tests
run: |
source venv/bin/activate
python tests/tests.py

5
.gitignore vendored
View file

@ -1,9 +1,14 @@
tun2proxy-android-libs.zip
tun2proxy-android-libs/
tun2proxy.xcframework/
.env
project.xcworkspace/
xcuserdata/
.vs/
.vscode/
.VSCodeCounter/
build/
tmp/
Cargo.lock
target/
.idea/

11
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,11 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/rhysd/actionlint
rev: v1.7.7
hooks:
- id: actionlint

View file

@ -1,43 +1,82 @@
[package]
name = "tun2proxy"
version = "0.2.4"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
version = "0.7.11"
edition = "2024"
license = "MIT"
repository = "https://github.com/tun2proxy/tun2proxy"
homepage = "https://github.com/tun2proxy/tun2proxy"
authors = ["B. Blechschmidt", "ssrlive"]
description = "Tunnel interface to proxy"
readme = "README.md"
rust-version = "1.85"
[lib]
crate-type = ["staticlib", "cdylib", "lib"]
[[bin]]
name = "tun2proxy-bin"
path = "src/bin/main.rs"
[[bin]]
name = "udpgw-server"
path = "src/bin/udpgw_server.rs"
required-features = ["udpgw"]
[features]
default = ["udpgw"]
udpgw = []
[dependencies]
async-recursion = "1.0"
async-trait = "0.1"
base64 = { version = "0.21" }
base64easy = "0.1"
chrono = "0.4"
clap = { version = "4.4", features = ["derive", "wrap_help", "color"] }
ctrlc2 = { version = "3.5", features = ["tokio", "termination"] }
clap = { version = "4", features = ["derive", "wrap_help", "color"] }
ctrlc2 = { version = "3.6.5", features = ["async", "termination"] }
digest_auth = "0.3"
dotenvy = "0.15"
env_logger = "0.11"
hashlink = "0.9"
httparse = "1.8"
ipstack = { version = "0.0", features = ["log"] }
hashlink = "0.10"
hickory-proto = "0.25"
httparse = "1"
ipstack = { version = "0.4" }
log = { version = "0.4", features = ["std"] }
socks5-impl = { version = "0.5" }
thiserror = "1.0"
tokio = { version = "1.36", features = ["full"] }
tproxy-config = { version = "0.1", features = ["log"] }
trust-dns-proto = "0.23"
tun2 = { version = "1.0", features = ["async"] }
udp-stream = { version = "0.0", default-features = false }
unicase = "2.7"
url = "2.5"
mimalloc = { version = "0.1", default-features = false, optional = true }
percent-encoding = "2"
shlex = "1.3.0"
socks5-impl = { version = "0.7", default-features = false, features = [
"tokio",
] }
thiserror = "2"
tokio = { version = "1", features = ["full"] }
tokio-util = "0.7"
tproxy-config = { version = "7", default-features = false }
tun = { version = "0.8", features = ["async"] }
udp-stream = { version = "0.0.12", default-features = false }
unicase = "2"
url = "2"
[target.'cfg(target_os="android")'.dependencies]
android_logger = "0.13"
android_logger = "0.15"
jni = { version = "0.21", default-features = false }
[build-dependencies]
serde_json = "1.0"
[target.'cfg(target_os="linux")'.dependencies]
bincode = "2"
serde = { version = "1", features = ["derive"] }
[[bin]]
name = "tun2proxy"
path = "src/bin/main.rs"
[target.'cfg(target_os="windows")'.dependencies]
windows-service = "0.8"
[target.'cfg(unix)'.dependencies]
daemonize = "0.5"
nix = { version = "0.30", default-features = false, features = [
"fs",
"socket",
"uio",
] }
[build-dependencies]
chrono = "0.4"
serde_json = "1"
# [profile.release]
# strip = "symbols"

61
Dockerfile Normal file
View file

@ -0,0 +1,61 @@
####################################################################################################
# This is a multi-stage Dockerfile.
# Build with `docker buildx build -t <image-tag> --target <stage> .`
# For example, to build the Alpine-based image while naming it tun2proxy, run:
# `docker buildx build -t tun2proxy --target tun2proxy-alpine .`
####################################################################################################
####################################################################################################
## glibc builder
####################################################################################################
FROM rust:latest AS glibc-builder
WORKDIR /worker
COPY ./ .
RUN cargo build --release
####################################################################################################
## musl builder
####################################################################################################
FROM rust:latest AS musl-builder
WORKDIR /worker
COPY ./ .
RUN ARCH=$(rustc -vV | sed -nE 's/host:\s*([^-]+).*/\1/p') \
&& rustup target add "$ARCH-unknown-linux-musl" \
&& cargo build --release --target "$ARCH-unknown-linux-musl"
RUN mkdir /.etc \
&& touch /.etc/resolv.conf \
&& mkdir /.tmp \
&& chmod 777 /.tmp \
&& chmod +t /.tmp
####################################################################################################
## Alpine image
####################################################################################################
FROM alpine:latest AS tun2proxy-alpine
COPY --from=musl-builder /worker/target/*/release/tun2proxy-bin /usr/bin/tun2proxy-bin
ENTRYPOINT ["/usr/bin/tun2proxy-bin", "--setup"]
####################################################################################################
## Ubuntu image
####################################################################################################
FROM ubuntu:latest AS tun2proxy-ubuntu
COPY --from=glibc-builder /worker/target/release/tun2proxy-bin /usr/bin/tun2proxy-bin
ENTRYPOINT ["/usr/bin/tun2proxy-bin", "--setup"]
####################################################################################################
## OS-less image (default)
####################################################################################################
FROM scratch AS tun2proxy-scratch
COPY --from=musl-builder ./tmp /tmp
COPY --from=musl-builder ./etc /etc
COPY --from=musl-builder /worker/target/*/release/tun2proxy-bin /usr/bin/tun2proxy-bin
ENTRYPOINT ["/usr/bin/tun2proxy-bin", "--setup"]

167
README.md
View file

@ -1,5 +1,15 @@
[![tun2proxy](https://socialify.git.ci/tun2proxy/tun2proxy/image?description=1&language=1&name=1&stargazers=1&theme=Light)](https://github.com/tun2proxy/tun2proxy)
# tun2proxy
A tunnel interface for HTTP and SOCKS proxies on Linux based on [smoltcp](https://github.com/smoltcp-rs/smoltcp).
A tunnel interface for HTTP and SOCKS proxies on Linux, Android, macOS, iOS and Windows.
[![Crates.io](https://img.shields.io/crates/v/tun2proxy.svg)](https://crates.io/crates/tun2proxy)
[![tun2proxy](https://docs.rs/tun2proxy/badge.svg)](https://docs.rs/tun2proxy)
[![Documentation](https://img.shields.io/badge/docs-release-brightgreen.svg?style=flat)](https://docs.rs/tun2proxy)
[![Download](https://img.shields.io/crates/d/tun2proxy.svg)](https://crates.io/crates/tun2proxy)
[![License](https://img.shields.io/crates/l/tun2proxy.svg?style=flat)](https://github.com/tun2proxy/tun2proxy/blob/master/LICENSE)
> Additional information can be found in the [wiki](https://github.com/tun2proxy/tun2proxy/wiki)
## Features
- HTTP proxy support (unauthenticated, basic and digest auth)
@ -7,9 +17,10 @@ A tunnel interface for HTTP and SOCKS proxies on Linux based on [smoltcp](https:
- SOCKS4a and SOCKS5h support (through the virtual DNS feature)
- Minimal configuration setup for routing all traffic
- IPv4 and IPv6 support
- GFW evasion mechanism for certain use cases (see [issue #35](https://github.com/blechschmidt/tun2proxy/issues/35))
- GFW evasion mechanism for certain use cases (see [issue #35](https://github.com/tun2proxy/tun2proxy/issues/35))
- SOCKS5 UDP support
- Native support for proxying DNS over TCP
- UdpGW (UDP gateway) support for UDP over TCP, see the [wiki](https://github.com/tun2proxy/tun2proxy/wiki/UDP-gateway-feature) for more information
## Build
Clone the repository and `cd` into the project folder. Then run the following:
@ -17,22 +28,55 @@ Clone the repository and `cd` into the project folder. Then run the following:
cargo build --release
```
### Building Framework for Apple Devices
To build an XCFramework for macOS and iOS, run the following:
```
./build-apple.sh
```
## Installation
### Install from binary
Download the binary from [releases](https://github.com/tun2proxy/tun2proxy/releases) and put it in your `PATH`.
<details>
<summary>Authenticity Verification</summary>
Since v0.2.23 [build provenance attestations](https://docs.github.com/en/actions/security-guides/using-artifact-attestations-to-establish-provenance-for-builds#verifying-artifact-attestations-with-the-github-cli)
are supported. These allow you to ensure that the builds have been generated from the code on GitHub through the GitHub
CI/CD pipeline. To verify the authenticity of the build files, you can use the [GitHub CLI](https://cli.github.com/):
```shell
gh attestation verify <*.zip file> --owner tun2proxy
```
</details>
### Install from source
If you have [rust](https://rustup.rs/) toolchain installed, this should work:
```shell
cargo install tun2proxy
```
> Note: In Windows, you need to copy [wintun](https://www.wintun.net/) DLL to the same directory as the binary.
> It's `%USERPROFILE%\.cargo\bin` by default.
## Setup
## Automated Setup
Using `--setup auto`, you can have tun2proxy configure your system to automatically route all traffic through the
Using `--setup`, you can have tun2proxy configure your system to automatically route all traffic through the
specified proxy. This requires running the tool as root and will roughly perform the steps outlined in the section
describing the manual setup, except that a bind mount is used to overlay the `/etc/resolv.conf` file.
You would then run the tool as follows:
```bash
sudo ./target/release/tun2proxy --setup auto --proxy "socks5://1.2.3.4:1080"
sudo ./target/release/tun2proxy-bin --setup --proxy "socks5://1.2.3.4:1080"
```
Apart from SOCKS5, SOCKS4 and HTTP are supported.
Note that if your proxy is a non-global IP address (e.g. because the proxy is provided by some tunneling tool running
locally), you will additionally need to provide the public IP address of the server through which the traffic is
actually tunneled. In such a case, the tool will tell you to specify the address through `--bypass-ip <address>` if you
actually tunneled. In such a case, the tool will tell you to specify the address through `--bypass <IP/CIDR>` if you
wish to make use of the automated setup feature.
## Manual Setup
@ -44,9 +88,9 @@ PROXY_IP=1.2.3.4
PROXY_PORT=1080
BYPASS_IP=123.45.67.89
# Create a tunnel interface named tun0 which your user can bind to,
# Create a tunnel interface named tun0 which you can bind to,
# so we don't need to run tun2proxy as root.
sudo ip tuntap add name tun0 mode tun user $USER
sudo ip tuntap add name tun0 mode tun
sudo ip link set tun0 up
# To prevent a routing loop, we add a route to the proxy server that behaves
@ -64,17 +108,14 @@ sudo ip route add 8000::/1 dev tun0
# Make sure that DNS queries are routed through the tunnel.
sudo sh -c "echo nameserver 198.18.0.1 > /etc/resolv.conf"
./target/release/tun2proxy --tun tun0 --proxy "$PROXY_TYPE://$PROXY_IP:$PROXY_PORT"
./target/release/tun2proxy-bin --tun tun0 --proxy "$PROXY_TYPE://$PROXY_IP:$PROXY_PORT"
```
Note that if you paste these commands into a shell script, which you then run with `sudo`, you might want to replace
`$USER` with `$SUDO_USER`.
This tool implements a virtual DNS feature that is used by default. When a DNS packet to port 53 is detected, an IP
This tool implements a virtual DNS feature that is used by switch `--dns virtual`. When a DNS packet to port 53 is detected, an IP
address from `198.18.0.0/15` is chosen and mapped to the query name. Connections destined for an IP address from that
range will supply the proxy with the mapped query name instead of the IP address. Since many proxies do not support UDP,
this enables an out-of-the-box experience in most cases, without relying on third-party resolvers or applications.
Depending on your use case, you may want to disable this feature using `--dns none`.
Depending on your use case, you may want to disable this feature using `--dns direct`.
In that case, you might need an additional tool like [dnsproxy](https://github.com/AdguardTeam/dnsproxy) that is
configured to listen on a local UDP port and communicates with a third-party upstream DNS server via TCP.
@ -88,31 +129,64 @@ sudo ip link del tun0
```
Tunnel interface to proxy.
Usage: tun2proxy [OPTIONS] --proxy <URL>
Usage: tun2proxy-bin [OPTIONS] --proxy <URL> [ADMIN_COMMAND]...
Arguments:
[ADMIN_COMMAND]... Specify a command to run with root-like capabilities in the new namespace when using `--unshare`. This could be
useful to start additional daemons, e.g. `openvpn` instance
Options:
-t, --tun <name> Name of the tun interface [default: tun0]
--tun-fd <fd> File descriptor of the tun interface
--tun-mtu <mtu> MTU of the tun interface (only with tunnel file descriptor) [default: 1500]
-p, --proxy <URL> Proxy URL in the form proto://[username[:password]@]host:port
-d, --dns <strategy> DNS handling strategy [default: virtual] [possible values: virtual, over-tcp, direct]
--dns-addr <IP> DNS resolver address [default: 8.8.8.8]
-6, --ipv6-enabled IPv6 enabled
-s, --setup <method> Routing and system setup [default: none] [possible values: none, auto]
-b, --bypass <IP|CIDR> IPs and CIDRs used in routing setup which should bypass the tunnel
-v, --verbosity <level> Verbosity level [default: info] [possible values: off, error, warn, info, debug, trace]
-h, --help Print help
-V, --version Print version
-p, --proxy <URL> Proxy URL in the form proto://[username[:password]@]host:port, where proto is one of
socks4, socks5, http. Username and password are encoded in percent encoding. For example:
socks5://myname:pass%40word@127.0.0.1:1080
-t, --tun <name> Name of the tun interface, such as tun0, utun4, etc. If this option is not provided, the
OS will generate a random one
--tun-fd <fd> File descriptor of the tun interface
--close-fd-on-drop <true or false> Set whether to close the received raw file descriptor on drop or not. This setting is
dependent on [tun_fd] [possible values: true, false]
--unshare Create a tun interface in a newly created unprivileged namespace while maintaining proxy
connectivity via the global network namespace
--unshare-pidfile <UNSHARE_PIDFILE> Create a pidfile of `unshare` process when using `--unshare`
-6, --ipv6-enabled IPv6 enabled
-s, --setup Routing and system setup, which decides whether to setup the routing and system
configuration. This option requires root-like privileges on every platform.
It is very important on Linux, see `capabilities(7)`
-d, --dns <strategy> DNS handling strategy [default: direct] [possible values: virtual, over-tcp, direct]
--dns-addr <IP> DNS resolver address [default: 8.8.8.8]
--virtual-dns-pool <CIDR> IP address pool to be used by virtual DNS in CIDR notation [default: 198.18.0.0/15]
-b, --bypass <IP/CIDR> IPs used in routing setup which should bypass the tunnel, in the form of IP or IP/CIDR.
Multiple IPs can be specified, e.g. --bypass 3.4.5.0/24 --bypass 5.6.7.8
--tcp-timeout <seconds> TCP timeout in seconds [default: 600]
--udp-timeout <seconds> UDP timeout in seconds [default: 10]
-v, --verbosity <level> Verbosity level [default: info] [possible values: off, error, warn, info, debug, trace]
--daemonize Daemonize for unix family or run as Windows service
--exit-on-fatal-error Exit immediately when fatal error occurs, useful for running as a service
--max-sessions <number> Maximum number of sessions to be handled concurrently [default: 200]
--udpgw-server <IP:PORT> UDP gateway server address, forwards UDP packets via specified TCP server
--udpgw-connections <number> Max connections for the UDP gateway, default value is 5
--udpgw-keepalive <seconds> Keepalive interval in seconds for the UDP gateway, default value is 30
-h, --help Print help
-V, --version Print version
```
Currently, tun2proxy supports HTTP, SOCKS4/SOCKS4a and SOCKS5. A proxy is supplied to the `--proxy` argument in the
URL format. For example, an HTTP proxy at `1.2.3.4:3128` with a username of `john.doe` and a password of `secret` is
supplied as `--proxy http://john.doe:secret@1.2.3.4:3128`. This works analogously to curl's `--proxy` argument.
## Docker Support
## Container Support
### Docker
Tun2proxy can serve as a proxy for other Docker containers. To make use of that feature, first build the image:
```bash
docker build -t tun2proxy .
docker buildx build -t tun2proxy .
```
This will build an image containing a statically linked `tun2proxy` binary (based on `musl`) without OS.
Alternatively, you can build images based on Ubuntu or Alpine as follows:
```bash
docker buildx build -t tun2proxy --target tun2proxy-ubuntu .
docker buildx build -t tun2proxy --target tun2proxy-alpine .
```
Next, start a container from the tun2proxy image:
@ -133,6 +207,36 @@ docker run -it \
--network "container:tun2proxy" \
ubuntu:latest
```
### Docker Compose
Create a `docker-compose.yaml` file with the following content:
```yaml
services:
tun2proxy:
volumes:
- /dev/net/tun:/dev/net/tun
sysctls:
- net.ipv6.conf.default.disable_ipv6=0
cap_add:
- NET_ADMIN
container_name: tun2proxy
image: ghcr.io/tun2proxy/tun2proxy-ubuntu:latest
command: --proxy proto://[username[:password]@]host:port
alpine:
stdin_open: true
tty: true
network_mode: container:tun2proxy
image: alpine:latest
command: apk add curl && curl ifconfig.icu && sleep 10
```
Then run the compose file
```bash
docker compose up -d tun2proxy
docker compose up alpine
```
## Configuration Tips
### DNS
@ -154,3 +258,10 @@ asked to open connections to IPv6 destinations. In such a case, you can disable
either through `sysctl -w net.ipv6.conf.all.disable_ipv6=1` and `sysctl -w net.ipv6.conf.default.disable_ipv6=1`
or through `ip -6 route del default`, which causes the `libc` resolver (and other software) to not issue DNS AAAA
requests for IPv6 addresses.
## Contributors ✨
Thanks goes to these wonderful people:
<a href="https://github.com/tun2proxy/tun2proxy/graphs/contributors">
<img src="https://contrib.rocks/image?repo=tun2proxy/tun2proxy" />
</a>

View file

@ -1,4 +1,4 @@
Build iOS framework
Build iOS xcframework
----------------
# Install Rust build tools
@ -8,14 +8,15 @@ Build iOS framework
- Install iOS target support: `rustup target add aarch64-apple-ios aarch64-apple-ios-sim x86_64-apple-ios`
- Install cbindgen tool: `cargo install cbindgen`
# Building iOS framework
# Building iOS xcframework
Due to an unknown reason at present, compiling Rust code inside Xcode fails, so you have to manually compile it. Please run the following command in zsh (or bash):
Run the following command in zsh (or bash):
```bash
cd tun2proxy
cargo build --release --target aarch64-apple-ios
cargo build --release --target x86_64-apple-ios
lipo -create target/aarch64-apple-ios/release/libtun2proxy.a target/x86_64-apple-ios/release/libtun2proxy.a -output target/libtun2proxy.a
cbindgen --config cbindgen.toml -l C -o target/tun2proxy-ffi.h
./build-apple.sh
```
The script `build-apple.sh` will build the iOS/macOS xcframework and output it to `./tun2proxy.xcframework`
To save the build time, you can use the `build-aarch64-apple-ios-debug.sh` or `build-aarch64-apple-ios.sh` script
to build the `aarch64-apple-ios` target only.

View file

@ -1,398 +0,0 @@
// !$*UTF8*$!
{
archiveVersion = 1;
classes = {
};
objectVersion = 55;
objects = {
/* Begin PBXBuildFile section */
B648A35929F43D110045B334 /* Tun2proxyWrapper.m in Sources */ = {isa = PBXBuildFile; fileRef = B648A35829F43D110045B334 /* Tun2proxyWrapper.m */; };
B648A35B29F43DDB0045B334 /* Tun2proxyWrapper.h in Headers */ = {isa = PBXBuildFile; fileRef = B648A35A29F43DDB0045B334 /* Tun2proxyWrapper.h */; settings = {ATTRIBUTES = (Public, ); }; };
B692ACC929F7EA4C006BF04D /* libtun2proxy.a in Frameworks */ = {isa = PBXBuildFile; fileRef = B692ACC829F7EA4C006BF04D /* libtun2proxy.a */; };
B6DE654429F4255A00468184 /* tun2proxy.h in Headers */ = {isa = PBXBuildFile; fileRef = B6DE654329F4255A00468184 /* tun2proxy.h */; settings = {ATTRIBUTES = (Public, ); }; };
/* End PBXBuildFile section */
/* Begin PBXFileReference section */
B648A35829F43D110045B334 /* Tun2proxyWrapper.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = Tun2proxyWrapper.m; sourceTree = "<group>"; };
B648A35A29F43DDB0045B334 /* Tun2proxyWrapper.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = Tun2proxyWrapper.h; sourceTree = "<group>"; };
B692ACC829F7EA4C006BF04D /* libtun2proxy.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libtun2proxy.a; path = ../target/libtun2proxy.a; sourceTree = "<group>"; };
B6DE654029F4255A00468184 /* tun2proxy.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = tun2proxy.framework; sourceTree = BUILT_PRODUCTS_DIR; };
B6DE654329F4255A00468184 /* tun2proxy.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = tun2proxy.h; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
B6DE653D29F4255A00468184 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
B692ACC929F7EA4C006BF04D /* libtun2proxy.a in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXFrameworksBuildPhase section */
/* Begin PBXGroup section */
B692ACC729F7EA4C006BF04D /* Frameworks */ = {
isa = PBXGroup;
children = (
B692ACC829F7EA4C006BF04D /* libtun2proxy.a */,
);
name = Frameworks;
sourceTree = "<group>";
};
B6DE653629F4255A00468184 = {
isa = PBXGroup;
children = (
B6DE654229F4255A00468184 /* tun2proxy */,
B6DE654129F4255A00468184 /* Products */,
B692ACC729F7EA4C006BF04D /* Frameworks */,
);
sourceTree = "<group>";
};
B6DE654129F4255A00468184 /* Products */ = {
isa = PBXGroup;
children = (
B6DE654029F4255A00468184 /* tun2proxy.framework */,
);
name = Products;
sourceTree = "<group>";
};
B6DE654229F4255A00468184 /* tun2proxy */ = {
isa = PBXGroup;
children = (
B6DE654329F4255A00468184 /* tun2proxy.h */,
B648A35829F43D110045B334 /* Tun2proxyWrapper.m */,
B648A35A29F43DDB0045B334 /* Tun2proxyWrapper.h */,
);
path = tun2proxy;
sourceTree = "<group>";
};
/* End PBXGroup section */
/* Begin PBXHeadersBuildPhase section */
B6DE653B29F4255A00468184 /* Headers */ = {
isa = PBXHeadersBuildPhase;
buildActionMask = 2147483647;
files = (
B648A35B29F43DDB0045B334 /* Tun2proxyWrapper.h in Headers */,
B6DE654429F4255A00468184 /* tun2proxy.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXHeadersBuildPhase section */
/* Begin PBXNativeTarget section */
B6DE653F29F4255A00468184 /* tun2proxy */ = {
isa = PBXNativeTarget;
buildConfigurationList = B6DE654729F4255A00468184 /* Build configuration list for PBXNativeTarget "tun2proxy" */;
buildPhases = (
B692ACB329F7E203006BF04D /* Run Script */,
B6DE653B29F4255A00468184 /* Headers */,
B6DE653C29F4255A00468184 /* Sources */,
B6DE653D29F4255A00468184 /* Frameworks */,
B6DE653E29F4255A00468184 /* Resources */,
);
buildRules = (
);
dependencies = (
);
name = tun2proxy;
productName = tun2proxy;
productReference = B6DE654029F4255A00468184 /* tun2proxy.framework */;
productType = "com.apple.product-type.framework";
};
/* End PBXNativeTarget section */
/* Begin PBXProject section */
B6DE653729F4255A00468184 /* Project object */ = {
isa = PBXProject;
attributes = {
BuildIndependentTargetsInParallel = 1;
LastUpgradeCheck = 1430;
TargetAttributes = {
B6DE653F29F4255A00468184 = {
CreatedOnToolsVersion = 13.2.1;
};
};
};
buildConfigurationList = B6DE653A29F4255A00468184 /* Build configuration list for PBXProject "tun2proxy" */;
compatibilityVersion = "Xcode 13.0";
developmentRegion = en;
hasScannedForEncodings = 0;
knownRegions = (
en,
Base,
);
mainGroup = B6DE653629F4255A00468184;
productRefGroup = B6DE654129F4255A00468184 /* Products */;
projectDirPath = "";
projectRoot = "";
targets = (
B6DE653F29F4255A00468184 /* tun2proxy */,
);
};
/* End PBXProject section */
/* Begin PBXResourcesBuildPhase section */
B6DE653E29F4255A00468184 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXResourcesBuildPhase section */
/* Begin PBXShellScriptBuildPhase section */
B692ACB329F7E203006BF04D /* Run Script */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputFileListPaths = (
);
inputPaths = (
);
name = "Run Script";
outputFileListPaths = (
);
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/bash;
shellScript = "set -e\nPATH=\"$PATH:${HOME}/.cargo/bin\"\nRUST_PROJ=${PROJECT_DIR}/..\ncd \"${RUST_PROJ}\"\ncargo build --release --target aarch64-apple-ios\ncargo build --release --target x86_64-apple-ios\nlipo -create target/aarch64-apple-ios/release/libtun2proxy.a target/x86_64-apple-ios/release/libtun2proxy.a -output target/libtun2proxy.a\ncbindgen --config cbindgen.toml -l C -o target/tun2proxy-ffi.h\n";
};
/* End PBXShellScriptBuildPhase section */
/* Begin PBXSourcesBuildPhase section */
B6DE653C29F4255A00468184 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
B648A35929F43D110045B334 /* Tun2proxyWrapper.m in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXSourcesBuildPhase section */
/* Begin XCBuildConfiguration section */
B6DE654529F4255A00468184 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++17";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_ENABLE_OBJC_WEAK = YES;
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_COMMA = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
CLANG_WARN_STRICT_PROTOTYPES = YES;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
COPY_PHASE_STRIP = NO;
CURRENT_PROJECT_VERSION = 1;
DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_STRICT_OBJC_MSGSEND = YES;
ENABLE_TESTABILITY = YES;
GCC_C_LANGUAGE_STANDARD = gnu11;
GCC_DYNAMIC_NO_PIC = NO;
GCC_NO_COMMON_BLOCKS = YES;
GCC_OPTIMIZATION_LEVEL = 0;
GCC_PREPROCESSOR_DEFINITIONS = (
"DEBUG=1",
"$(inherited)",
);
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 11.0;
MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE;
MTL_FAST_MATH = YES;
ONLY_ACTIVE_ARCH = YES;
SDKROOT = iphoneos;
VERSIONING_SYSTEM = "apple-generic";
VERSION_INFO_PREFIX = "";
};
name = Debug;
};
B6DE654629F4255A00468184 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++17";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_ENABLE_OBJC_WEAK = YES;
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_COMMA = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
CLANG_WARN_STRICT_PROTOTYPES = YES;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
COPY_PHASE_STRIP = NO;
CURRENT_PROJECT_VERSION = 1;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
ENABLE_NS_ASSERTIONS = NO;
ENABLE_STRICT_OBJC_MSGSEND = YES;
GCC_C_LANGUAGE_STANDARD = gnu11;
GCC_NO_COMMON_BLOCKS = YES;
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 11.0;
MTL_ENABLE_DEBUG_INFO = NO;
MTL_FAST_MATH = YES;
SDKROOT = iphoneos;
VALIDATE_PRODUCT = YES;
VERSIONING_SYSTEM = "apple-generic";
VERSION_INFO_PREFIX = "";
};
name = Release;
};
B6DE654829F4255A00468184 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
CODE_SIGN_IDENTITY = "";
CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 1;
DEFINES_MODULE = YES;
DEVELOPMENT_TEAM = "";
DYLIB_COMPATIBILITY_VERSION = 1;
DYLIB_CURRENT_VERSION = 1;
DYLIB_INSTALL_NAME_BASE = "@rpath";
ENABLE_BITCODE = NO;
ENABLE_MODULE_VERIFIER = YES;
GENERATE_INFOPLIST_FILE = YES;
HEADER_SEARCH_PATHS = "";
INFOPLIST_KEY_NSHumanReadableCopyright = "";
INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks";
LD_RUNPATH_SEARCH_PATHS = (
"$(inherited)",
"@executable_path/Frameworks",
"@loader_path/Frameworks",
);
LIBRARY_SEARCH_PATHS = (
../target,
"$(PROJECT_DIR)/../target",
);
MARKETING_VERSION = 1.0;
MODULE_VERIFIER_SUPPORTED_LANGUAGES = "objective-c objective-c++";
MODULE_VERIFIER_SUPPORTED_LANGUAGE_STANDARDS = "gnu11 gnu++17";
PRODUCT_BUNDLE_IDENTIFIER = com.ssrlive.tun2proxy;
PRODUCT_NAME = "$(TARGET_NAME:c99extidentifier)";
SKIP_INSTALL = YES;
SWIFT_EMIT_LOC_STRINGS = YES;
TARGETED_DEVICE_FAMILY = "1,2";
USER_HEADER_SEARCH_PATHS = ../target;
};
name = Debug;
};
B6DE654929F4255A00468184 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
CODE_SIGN_IDENTITY = "";
CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 1;
DEFINES_MODULE = YES;
DEVELOPMENT_TEAM = "";
DYLIB_COMPATIBILITY_VERSION = 1;
DYLIB_CURRENT_VERSION = 1;
DYLIB_INSTALL_NAME_BASE = "@rpath";
ENABLE_BITCODE = NO;
ENABLE_MODULE_VERIFIER = YES;
GENERATE_INFOPLIST_FILE = YES;
HEADER_SEARCH_PATHS = "";
INFOPLIST_KEY_NSHumanReadableCopyright = "";
INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks";
LD_RUNPATH_SEARCH_PATHS = (
"$(inherited)",
"@executable_path/Frameworks",
"@loader_path/Frameworks",
);
LIBRARY_SEARCH_PATHS = (
../target,
"$(PROJECT_DIR)/../target",
);
MARKETING_VERSION = 1.0;
MODULE_VERIFIER_SUPPORTED_LANGUAGES = "objective-c objective-c++";
MODULE_VERIFIER_SUPPORTED_LANGUAGE_STANDARDS = "gnu11 gnu++17";
PRODUCT_BUNDLE_IDENTIFIER = com.ssrlive.tun2proxy;
PRODUCT_NAME = "$(TARGET_NAME:c99extidentifier)";
SKIP_INSTALL = YES;
SWIFT_EMIT_LOC_STRINGS = YES;
TARGETED_DEVICE_FAMILY = "1,2";
USER_HEADER_SEARCH_PATHS = ../target;
};
name = Release;
};
/* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */
B6DE653A29F4255A00468184 /* Build configuration list for PBXProject "tun2proxy" */ = {
isa = XCConfigurationList;
buildConfigurations = (
B6DE654529F4255A00468184 /* Debug */,
B6DE654629F4255A00468184 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
B6DE654729F4255A00468184 /* Build configuration list for PBXNativeTarget "tun2proxy" */ = {
isa = XCConfigurationList;
buildConfigurations = (
B6DE654829F4255A00468184 /* Debug */,
B6DE654929F4255A00468184 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
/* End XCConfigurationList section */
};
rootObject = B6DE653729F4255A00468184 /* Project object */;
}

View file

@ -1,22 +0,0 @@
//
// Tun2proxyWrapper.h
// tun2proxy
//
// Created by ssrlive on 2023/4/23.
//
#ifndef Tun2proxyWrapper_h
#define Tun2proxyWrapper_h
@interface Tun2proxyWrapper : NSObject
+ (void)startWithConfig:(NSString *)proxy_url
tun_fd:(int)tun_fd
tun_mtu:(uint32_t)tun_mtu
dns_over_tcp:(bool)dns_over_tcp
verbose:(bool)verbose;
+ (void) shutdown;
@end
#endif /* Tun2proxyWrapper_h */

View file

@ -1,27 +0,0 @@
//
// Tun2proxyWrapper.m
// tun2proxy
//
// Created by ssrlive on 2023/4/23.
//
#import <Foundation/Foundation.h>
#import "Tun2proxyWrapper.h"
#include "tun2proxy-ffi.h"
@implementation Tun2proxyWrapper
+ (void)startWithConfig:(NSString *)proxy_url
tun_fd:(int)tun_fd
tun_mtu:(uint32_t)tun_mtu
dns_strategy:(ArgDns)dns_strategy
verbosity:(ArgVerbosity)verbosity {
tun2proxy_run(proxy_url.UTF8String, tun_fd, tun_mtu, dns_strategy, verbosity);
}
+ (void)shutdown {
tun2proxy_stop();
}
@end

View file

@ -1,18 +0,0 @@
//
// tun2proxy.h
// tun2proxy
//
// Created by tun2proxy on 2023/4/22.
//
#import <Foundation/Foundation.h>
//! Project version number for tun2proxy.
FOUNDATION_EXPORT double tun2proxyVersionNumber;
//! Project version string for tun2proxy.
FOUNDATION_EXPORT const unsigned char tun2proxyVersionString[];
// In this header, you should import all the public headers of your framework using statements like #import <tun2proxy/PublicHeader.h>
#import <tun2proxy/Tun2proxyWrapper.h>

View file

@ -0,0 +1,26 @@
#! /bin/sh
echo "Setting up the rust environment..."
rustup target add aarch64-apple-ios
cargo install cbindgen
echo "Building target aarch64-apple-ios..."
cargo build --target aarch64-apple-ios --features mimalloc
echo "Generating includes..."
mkdir -p target/include/
rm -rf target/include/*
cbindgen --config cbindgen.toml -o target/include/tun2proxy.h
cat > target/include/tun2proxy.modulemap <<EOF
framework module tun2proxy {
umbrella header "tun2proxy.h"
export *
module * { export * }
}
EOF
echo "Creating XCFramework"
rm -rf ./tun2proxy.xcframework
xcodebuild -create-xcframework \
-library ./target/aarch64-apple-ios/debug/libtun2proxy.a -headers ./target/include/ \
-output ./tun2proxy.xcframework

26
build-aarch64-apple-ios.sh Executable file
View file

@ -0,0 +1,26 @@
#! /bin/sh
echo "Setting up the rust environment..."
rustup target add aarch64-apple-ios
cargo install cbindgen
echo "Building target aarch64-apple-ios..."
cargo build --release --target aarch64-apple-ios --features mimalloc
echo "Generating includes..."
mkdir -p target/include/
rm -rf target/include/*
cbindgen --config cbindgen.toml -o target/include/tun2proxy.h
cat > target/include/tun2proxy.modulemap <<EOF
framework module tun2proxy {
umbrella header "tun2proxy.h"
export *
module * { export * }
}
EOF
echo "Creating XCFramework"
rm -rf ./tun2proxy.xcframework
xcodebuild -create-xcframework \
-library ./target/aarch64-apple-ios/release/libtun2proxy.a -headers ./target/include/ \
-output ./tun2proxy.xcframework

129
build-android.sh Executable file
View file

@ -0,0 +1,129 @@
#! /bin/bash
work_dir=$(pwd)
ANDROID_API_VERSION=21
# NDK homepage: https://developer.android.com/ndk/downloads#lts-downloads
ANDROID_NDK_VERSION=26.3.11579264
# Android commandline tools homepage: https://developer.android.com/studio/index.html#command-line-tools-only
CMDLINE_TOOLS_VERSION=6858069
export ANDROID_HOME=/tmp/Android/sdk
export NDK_HOME=${ANDROID_HOME}/ndk/${ANDROID_NDK_VERSION}
export PATH=$ANDROID_HOME/cmdline-tools/bin:$PATH
mkdir -p $ANDROID_HOME
name=tun2proxy
BASE=`dirname "$0"`
android_libs=$BASE/${name}-android-libs
mkdir -p $android_libs
function setup_env() {
cargo install cbindgen
apt update && apt install -y make llvm-dev libclang-dev clang pkg-config zip unzip curl default-jdk build-essential
cd /tmp/
curl -OL https://dl.google.com/android/repository/commandlinetools-linux-${CMDLINE_TOOLS_VERSION}_latest.zip
rm -rf /tmp/cmdline-tools
unzip commandlinetools-linux-${CMDLINE_TOOLS_VERSION}_latest.zip
rm -rf $ANDROID_HOME/cmdline-tools
mv cmdline-tools $ANDROID_HOME
yes | sdkmanager --sdk_root=$ANDROID_HOME --licenses
if [ $? -ne 0 ]; then
echo "Failed to accept the licenses"
exit 1
fi
sdkmanager --sdk_root=$ANDROID_HOME "ndk;${ANDROID_NDK_VERSION}" "platforms;android-${ANDROID_API_VERSION}"
if [ $? -ne 0 ]; then
echo "Failed to install NDK"
exit 1
fi
}
function build_android() {
local manifest=./Cargo.toml
local mode=--release
local mode2=release
local targets=
if [ ! -z "$2" ]; then
targets="$2"
else
targets="aarch64-linux-android armv7-linux-androideabi x86_64-linux-android i686-linux-android"
fi
for target in $targets; do
rustup target add $target
done
if [ "$1" = "debug" ]; then
mode=
mode2=debug
fi
local BASE=`dirname "$0"`
local HOST_OS=`uname -s | tr "[:upper:]" "[:lower:]"`
local HOST_ARCH=`uname -m | tr "[:upper:]" "[:lower:]"`
local android_tools="$NDK_HOME/toolchains/llvm/prebuilt/$HOST_OS-$HOST_ARCH/bin"
export PATH="${android_tools}/":$PATH
for target in $targets; do
local target_dir=
case $target in
'armv7-linux-androideabi')
export CC_armv7_linux_androideabi="$android_tools/armv7a-linux-androideabi${ANDROID_API_VERSION}-clang"
export AR_armv7_linux_androideabi="$android_tools/llvm-ar"
export CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_LINKER="$android_tools/armv7a-linux-androideabi${ANDROID_API_VERSION}-clang"
target_dir=armeabi-v7a
;;
'x86_64-linux-android')
export CC_x86_64_linux_android="$android_tools/${target}${ANDROID_API_VERSION}-clang"
export AR_x86_64_linux_android="$android_tools/llvm-ar"
export CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER="$android_tools/${target}${ANDROID_API_VERSION}-clang"
target_dir=x86_64
;;
'aarch64-linux-android')
export CC_aarch64_linux_android="$android_tools/${target}${ANDROID_API_VERSION}-clang"
export AR_aarch64_linux_android="$android_tools/llvm-ar"
export CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER="$android_tools/${target}${ANDROID_API_VERSION}-clang"
target_dir=arm64-v8a
;;
'i686-linux-android')
export CC_i686_linux_android="$android_tools/${target}${ANDROID_API_VERSION}-clang"
export AR_i686_linux_android="$android_tools/llvm-ar"
export CARGO_TARGET_I686_LINUX_ANDROID_LINKER="$android_tools/${target}${ANDROID_API_VERSION}-clang"
target_dir=x86
;;
*)
echo "Unknown target $target"
;;
esac
cargo build --target $target $mode
if [ $? -ne 0 ]; then
echo "Failed to build for target $target"
exit 1
fi
mkdir -p $android_libs/$target_dir
cp $BASE/target/$target/${mode2}/lib${name}.so $android_libs/${target_dir}/lib${name}.so
cp $BASE/target/$target/${mode2}/lib${name}.a $android_libs/${target_dir}/lib${name}.a
done
cbindgen -c $BASE/cbindgen.toml -o $android_libs/$name.h
}
function main() {
echo "Setting up the build environment..."
setup_env
cd $work_dir
echo "build android target"
build_android "$@"
cd $work_dir
echo "Creating zip file"
rm -rf ${name}-android-libs.zip
zip -r ${name}-android-libs.zip ${name}-android-libs
}
main "$@"

56
build-apple.sh Executable file
View file

@ -0,0 +1,56 @@
#! /bin/sh
echo "Setting up the rust environment..."
rustup target add aarch64-apple-ios aarch64-apple-ios-sim x86_64-apple-ios x86_64-apple-darwin aarch64-apple-darwin
cargo install cbindgen
echo "Building..."
echo "cargo build --release --target x86_64-apple-darwin"
cargo build --release --target x86_64-apple-darwin
echo "cargo build --release --target aarch64-apple-darwin"
cargo build --release --target aarch64-apple-darwin
echo "cargo build --release --target aarch64-apple-ios"
cargo build --release --target aarch64-apple-ios --features mimalloc
echo "cargo build --release --target x86_64-apple-ios"
cargo build --release --target x86_64-apple-ios
echo "cargo build --release --target x86_64-apple-ios-sim"
cargo build --release --target aarch64-apple-ios-sim
echo "Generating includes..."
mkdir -p target/include/
rm -rf target/include/*
cbindgen --config cbindgen.toml -o target/include/tun2proxy.h
cat > target/include/tun2proxy.modulemap <<EOF
framework module tun2proxy {
umbrella header "tun2proxy.h"
export *
module * { export * }
}
EOF
echo "lipo..."
echo "Simulator"
lipo -create \
target/aarch64-apple-ios-sim/release/libtun2proxy.a \
target/x86_64-apple-ios/release/libtun2proxy.a \
-output ./target/libtun2proxy-ios-sim.a
echo "MacOS"
lipo -create \
target/aarch64-apple-darwin/release/libtun2proxy.a \
target/x86_64-apple-darwin/release/libtun2proxy.a \
-output ./target/libtun2proxy-macos.a
echo "Creating XCFramework"
rm -rf ./tun2proxy.xcframework
xcodebuild -create-xcframework \
-library ./target/aarch64-apple-ios/release/libtun2proxy.a -headers ./target/include/ \
-library ./target/libtun2proxy-ios-sim.a -headers ./target/include/ \
-library ./target/libtun2proxy-macos.a -headers ./target/include/ \
-output ./tun2proxy.xcframework

View file

@ -1,12 +1,21 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
if let Ok(git_hash) = get_git_hash() {
// Set the environment variables
println!("cargo:rustc-env=GIT_HASH={}", git_hash.trim());
}
// Get the build time
let build_time = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string();
println!("cargo:rustc-env=BUILD_TIME={build_time}");
#[cfg(target_os = "windows")]
if let Ok(cargo_target_dir) = get_cargo_target_dir() {
let mut f = std::fs::File::create(cargo_target_dir.join("build.log"))?;
use std::io::Write;
f.write_all(format!("CARGO_TARGET_DIR: '{}'\r\n", cargo_target_dir.display()).as_bytes())?;
// The wintun crate's root directory
let crate_dir = get_crate_dir("wintun")?;
// The wintun-bindings crate's root directory
let crate_dir = get_crate_dir("wintun-bindings")?;
// The path to the DLL file, relative to the crate root, depending on the target architecture
let dll_path = get_wintun_bin_relative_path()?;
@ -19,9 +28,13 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
// Copy to the target directory
if let Err(e) = std::fs::copy(src_path, &dst_path) {
f.write_all(format!("Failed to copy 'wintun.dll': {}\r\n", e).as_bytes())?;
f.write_all(format!("Failed to copy 'wintun.dll': {e}\r\n").as_bytes())?;
} else {
f.write_all(format!("Copied 'wintun.dll' to '{}'\r\n", dst_path.display()).as_bytes())?;
// Set the modified time to the current time, or the publishing process will fail.
let file = std::fs::OpenOptions::new().write(true).open(&dst_path)?;
file.set_modified(std::time::SystemTime::now())?;
}
}
Ok(())
@ -43,19 +56,18 @@ fn get_cargo_target_dir() -> Result<std::path::PathBuf, Box<dyn std::error::Erro
Ok(target_dir.ok_or("not found")?.to_path_buf())
}
#[cfg(target_os = "windows")]
#[allow(dead_code)]
fn get_wintun_bin_relative_path() -> Result<std::path::PathBuf, Box<dyn std::error::Error>> {
let dll_path = if cfg!(target_arch = "x86") {
"wintun/bin/x86/wintun.dll"
} else if cfg!(target_arch = "x86_64") {
"wintun/bin/amd64/wintun.dll"
} else if cfg!(target_arch = "arm") {
"wintun/bin/arm/wintun.dll"
} else if cfg!(target_arch = "aarch64") {
"wintun/bin/arm64/wintun.dll"
} else {
return Err("Unsupported architecture".into());
let target_arch = std::env::var("CARGO_CFG_TARGET_ARCH")?;
let dll_path = match target_arch.as_str() {
"x86" => "wintun/bin/x86/wintun.dll",
"x86_64" => "wintun/bin/amd64/wintun.dll",
"arm" => "wintun/bin/arm/wintun.dll",
"aarch64" => "wintun/bin/arm64/wintun.dll",
_ => return Err("Unsupported architecture".into()),
};
Ok(dll_path.into())
}
@ -82,3 +94,10 @@ fn get_crate_dir(crate_name: &str) -> Result<std::path::PathBuf, Box<dyn std::er
}
Ok(crate_dir.ok_or("crate_dir")?)
}
fn get_git_hash() -> std::io::Result<String> {
use std::process::Command;
let git_hash = Command::new("git").args(["rev-parse", "--short", "HEAD"]).output()?.stdout;
let git_hash = String::from_utf8(git_hash).map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
Ok(git_hash)
}

View file

@ -1,6 +1,25 @@
language = "C"
cpp_compat = true
[export]
include = ["tun2proxy_run", "tun2proxy_stop", "tun2proxy_set_log_callback"]
include = [
"tun2proxy_run_with_cli",
"tun2proxy_with_fd_run",
"tun2proxy_with_name_run",
"tun2proxy_stop",
"tun2proxy_set_log_callback",
"tun2proxy_set_traffic_status_callback",
]
exclude = [
"Java_com_github_shadowsocks_bg_Tun2proxy_run",
"Java_com_github_shadowsocks_bg_Tun2proxy_stop",
"UdpFlag",
]
[export.rename]
"ArgVerbosity" = "Tun2proxyVerbosity"
"ArgDns" = "Tun2proxyDns"
"TrafficStatus" = "Tun2proxyTrafficStatus"
[enum]
prefix_with_name = true

View file

@ -8,7 +8,7 @@ echo $SCRIPT_DIR
netns="test"
dante="danted"
tun2proxy="${SCRIPT_DIR}/../target/release/tun2proxy"
tun2proxy="${SCRIPT_DIR}/../target/release/tun2proxy-bin"
ip netns add "$netns"
@ -39,7 +39,7 @@ sleep 1
ip tuntap add name tun0 mode tun
ip link set tun0 up
ip route add 10.0.0.4 dev tun0
"$tun2proxy" --proxy socks5://10.0.0.3:10800 -v off &
"$tun2proxy" --tun tun0 --proxy socks5://10.0.0.3:10800 -v off &
sleep 3
@ -51,4 +51,4 @@ sleep 3
iperf3 -c 10.0.0.4 -P 10
# Clean up
# sudo sh -c "pkill tun2proxy; pkill iperf3; pkill danted; ip link del tun0; ip netns del test"
# sudo sh -c "pkill tun2proxy-bin; pkill iperf3; pkill danted; ip link del tun0; ip netns del test"

View file

@ -30,7 +30,7 @@ function core_function() {
else
trap 'echo "" && echo "tun2proxy exited with code: $?" && restore' EXIT
local SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
local APP_BIN_PATH="${SCRIPT_DIR}/../target/release/tun2proxy"
local APP_BIN_PATH="${SCRIPT_DIR}/../target/release/tun2proxy-bin"
"${APP_BIN_PATH}" --tun tun0 --proxy "${PROXY_TYPE}://${PROXY_IP}:${PROXY_PORT}" -v trace
fi
}

View file

@ -29,7 +29,7 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
netns="test"
dante="danted"
tun2proxy="${SCRIPT_DIR}/../target/release/tun2proxy"
tun2proxy="${SCRIPT_DIR}/../target/release/tun2proxy-bin"
ip netns add "$netns"
@ -60,7 +60,7 @@ sleep 1
ip tuntap add name tun0 mode tun
ip link set tun0 up
ip route add 10.0.0.4 dev tun0
"$tun2proxy" --proxy socks5://10.0.0.3:10800 -v off &
"$tun2proxy" --tun tun0 --proxy socks5://10.0.0.3:10800 -v off &
sleep 3
@ -80,4 +80,4 @@ sleep 3
rperf -c 10.0.0.4 -v trace -P 1 -u -r
# Clean up
# sudo sh -c "pkill tun2proxy; pkill rperf; pkill danted; ip link del tun0; ip netns del test"
# sudo sh -c "pkill tun2proxy-bin; pkill rperf; pkill danted; ip link del tun0; ip netns del test"

View file

@ -1,26 +1,34 @@
#![cfg(target_os = "android")]
use crate::{
Args,
args::ArgProxy,
error::{Error, Result},
Args,
};
use jni::{
objects::{JClass, JString},
sys::jint,
JNIEnv,
objects::{JClass, JString},
sys::{jboolean, jchar, jint},
};
/// # Safety
///
/// Running tun2proxy
#[no_mangle]
/// Running tun2proxy with some arguments
/// Parameters:
/// - proxy_url: the proxy url, e.g. "socks5://127.0.0.1:1080"
/// - tun_fd: the tun file descriptor, it will be owned by tun2proxy
/// - close_fd_on_drop: whether close the tun_fd on drop
/// - tun_mtu: the tun mtu
/// - dns_strategy: the dns strategy, see ArgDns enum
/// - verbosity: the verbosity level, see ArgVerbosity enum
#[unsafe(no_mangle)]
pub unsafe extern "C" fn Java_com_github_shadowsocks_bg_Tun2proxy_run(
mut env: JNIEnv,
_clazz: JClass,
proxy_url: JString,
tun_fd: jint,
tun_mtu: jint,
close_fd_on_drop: jboolean,
tun_mtu: jchar,
verbosity: jint,
dns_strategy: jint,
) -> jint {
@ -35,22 +43,26 @@ pub unsafe extern "C" fn Java_com_github_shadowsocks_bg_Tun2proxy_run(
.with_filter(filter),
);
let proxy_url = get_java_string(&mut env, &proxy_url).unwrap();
let proxy = ArgProxy::from_url(proxy_url).unwrap();
let proxy = ArgProxy::try_from(proxy_url.as_str()).unwrap();
let close_fd_on_drop = close_fd_on_drop != 0;
let args = Args::new(Some(tun_fd), proxy, dns, verbosity);
crate::api::tun2proxy_internal_run(args, tun_mtu as _)
let mut args = Args::default();
args.proxy(proxy)
.tun_fd(Some(tun_fd))
.close_fd_on_drop(close_fd_on_drop)
.dns(dns)
.verbosity(verbosity);
crate::general_api::general_run_for_api(args, tun_mtu, false)
}
/// # Safety
///
/// Shutdown tun2proxy
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn Java_com_github_shadowsocks_bg_Tun2proxy_stop(_env: JNIEnv, _: JClass) -> jint {
crate::api::tun2proxy_internal_stop()
crate::general_api::tun2proxy_stop_internal()
}
unsafe fn get_java_string<'a>(env: &'a mut JNIEnv, string: &'a JString) -> Result<&'a str, Error> {
let str_ptr = env.get_string(string)?.as_ptr();
let s: &str = std::ffi::CStr::from_ptr(str_ptr).to_str()?;
Ok(s)
fn get_java_string(env: &mut JNIEnv, string: &JString) -> Result<String, Error> {
Ok(env.get_string(string)?.into())
}

View file

@ -1,70 +0,0 @@
#![cfg(any(target_os = "ios", target_os = "android"))]
use crate::{Args, Builder, Quit};
use std::{os::raw::c_int, sync::Arc};
static mut TUN_QUIT: Option<Arc<Quit>> = None;
pub(crate) fn tun2proxy_internal_run(args: Args, tun_mtu: usize) -> c_int {
if unsafe { TUN_QUIT.is_some() } {
log::error!("tun2proxy already started");
return -1;
}
let block = async move {
log::info!("Proxy {} server: {}", args.proxy.proxy_type, args.proxy.addr);
let mut config = tun2::Configuration::default();
config.raw_fd(args.tun_fd.ok_or(crate::Error::from("tun_fd"))?);
let device = tun2::create_as_async(&config).map_err(std::io::Error::from)?;
#[cfg(target_os = "android")]
let tun2proxy = Builder::new(device, args).mtu(tun_mtu).build();
#[cfg(target_os = "ios")]
let tun2proxy = Builder::new(device, args).mtu(tun_mtu).build();
let (join_handle, quit) = tun2proxy.start();
unsafe { TUN_QUIT = Some(Arc::new(quit)) };
join_handle.await
};
match tokio::runtime::Builder::new_multi_thread().enable_all().build() {
Err(_err) => {
log::error!("failed to create tokio runtime with error: {:?}", _err);
-1
}
Ok(rt) => match rt.block_on(block) {
Ok(_) => 0,
Err(_err) => {
log::error!("failed to run tun2proxy with error: {:?}", _err);
-2
}
},
}
}
pub(crate) fn tun2proxy_internal_stop() -> c_int {
let res = match unsafe { &TUN_QUIT } {
None => {
log::error!("tun2proxy not started");
-1
}
Some(tun_quit) => match tokio::runtime::Builder::new_multi_thread().enable_all().build() {
Err(_err) => {
log::error!("failed to create tokio runtime with error: {:?}", _err);
-2
}
Ok(rt) => match rt.block_on(async move { tun_quit.trigger().await }) {
Ok(_) => 0,
Err(_err) => {
log::error!("failed to stop tun2proxy with error: {:?}", _err);
-3
}
},
},
};
unsafe { TUN_QUIT = None };
res
}

View file

@ -1,33 +1,84 @@
use crate::{Error, Result};
use socks5_impl::protocol::UserKey;
use tproxy_config::IpCidr;
#[cfg(target_os = "linux")]
use std::ffi::OsString;
use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
use tproxy_config::TUN_NAME;
use std::str::FromStr;
#[macro_export]
macro_rules! version_info {
() => {
concat!(env!("CARGO_PKG_VERSION"), " (", env!("GIT_HASH"), " ", env!("BUILD_TIME"), ")")
};
}
fn about_info() -> &'static str {
concat!("Tunnel interface to proxy.\nVersion: ", version_info!())
}
#[derive(Debug, Clone, clap::Parser)]
#[command(author, version, about = "tun2proxy application.", long_about = None)]
#[command(author, version = version_info!(), about = about_info(), long_about = None)]
pub struct Args {
/// Proxy URL in the form proto://[username[:password]@]host:port,
/// where proto is one of socks4, socks5, http. For example:
/// socks5://myname:password@127.0.0.1:1080
#[arg(short, long, value_parser = ArgProxy::from_url, value_name = "URL")]
/// where proto is one of socks4, socks5, http.
/// Username and password are encoded in percent encoding. For example:
/// socks5://myname:pass%40word@127.0.0.1:1080
#[arg(short, long, value_parser = |s: &str| ArgProxy::try_from(s), value_name = "URL")]
pub proxy: ArgProxy,
/// Name of the tun interface
#[arg(short, long, value_name = "name", conflicts_with = "tun_fd", default_value = TUN_NAME)]
pub tun: String,
/// Name of the tun interface, such as tun0, utun4, etc.
/// If this option is not provided, the OS will generate a random one.
#[arg(short, long, value_name = "name", value_parser = validate_tun)]
#[cfg_attr(unix, arg(conflicts_with = "tun_fd"))]
pub tun: Option<String>,
/// File descriptor of the tun interface
#[cfg(unix)]
#[arg(long, value_name = "fd", conflicts_with = "tun")]
pub tun_fd: Option<i32>,
/// Set whether to close the received raw file descriptor on drop or not.
/// This setting is dependent on [tun_fd].
#[cfg(unix)]
#[arg(long, value_name = "true or false", conflicts_with = "tun", requires = "tun_fd")]
pub close_fd_on_drop: Option<bool>,
/// Create a tun interface in a newly created unprivileged namespace
/// while maintaining proxy connectivity via the global network namespace.
#[cfg(target_os = "linux")]
#[arg(long)]
pub unshare: bool,
/// Create a pidfile of `unshare` process when using `--unshare`.
#[cfg(target_os = "linux")]
#[arg(long)]
pub unshare_pidfile: Option<String>,
/// File descriptor for UNIX datagram socket meant to transfer
/// network sockets from global namespace to the new one.
/// See `unshare(1)`, `namespaces(7)`, `sendmsg(2)`, `unix(7)`.
#[cfg(target_os = "linux")]
#[arg(long, value_name = "fd", hide(true))]
pub socket_transfer_fd: Option<i32>,
/// Specify a command to run with root-like capabilities in the new namespace
/// when using `--unshare`.
/// This could be useful to start additional daemons, e.g. `openvpn` instance.
#[cfg(target_os = "linux")]
#[arg(requires = "unshare")]
pub admin_command: Vec<OsString>,
/// IPv6 enabled
#[arg(short = '6', long)]
pub ipv6_enabled: bool,
#[cfg(target_os = "linux")]
/// Routing and system setup, which decides whether to setup the routing and system configuration.
/// This option requires root-like privileges on every platform.
/// It is very important on Linux, see `capabilities(7)`.
#[arg(short, long)]
/// Routing and system setup, which decides whether to setup the routing and system configuration,
/// this option requires root privileges
pub setup: bool,
/// DNS handling strategy
@ -38,46 +89,181 @@ pub struct Args {
#[arg(long, value_name = "IP", default_value = "8.8.8.8")]
pub dns_addr: IpAddr,
/// IPs used in routing setup which should bypass the tunnel
#[arg(short, long, value_name = "IP")]
pub bypass: Vec<IpAddr>,
/// IP address pool to be used by virtual DNS in CIDR notation.
#[arg(long, value_name = "CIDR", default_value = "198.18.0.0/15")]
pub virtual_dns_pool: IpCidr,
/// IPs used in routing setup which should bypass the tunnel,
/// in the form of IP or IP/CIDR. Multiple IPs can be specified,
/// e.g. --bypass 3.4.5.0/24 --bypass 5.6.7.8
#[arg(short, long, value_name = "IP/CIDR")]
pub bypass: Vec<IpCidr>,
/// TCP timeout in seconds
#[arg(long, value_name = "seconds", default_value = "600")]
pub tcp_timeout: u64,
/// UDP timeout in seconds
#[arg(long, value_name = "seconds", default_value = "10")]
pub udp_timeout: u64,
/// Verbosity level
#[arg(short, long, value_name = "level", value_enum, default_value = "info")]
pub verbosity: ArgVerbosity,
/// Daemonize for unix family or run as Windows service
#[arg(long)]
pub daemonize: bool,
/// Exit immediately when fatal error occurs, useful for running as a service
#[arg(long)]
pub exit_on_fatal_error: bool,
/// Maximum number of sessions to be handled concurrently
#[arg(long, value_name = "number", default_value = "200")]
pub max_sessions: usize,
/// UDP gateway server address, forwards UDP packets via specified TCP server
#[cfg(feature = "udpgw")]
#[arg(long, value_name = "IP:PORT")]
pub udpgw_server: Option<SocketAddr>,
/// Max connections for the UDP gateway, default value is 5
#[cfg(feature = "udpgw")]
#[arg(long, value_name = "number", requires = "udpgw_server")]
pub udpgw_connections: Option<usize>,
/// Keepalive interval in seconds for the UDP gateway, default value is 30
#[cfg(feature = "udpgw")]
#[arg(long, value_name = "seconds", requires = "udpgw_server")]
pub udpgw_keepalive: Option<u64>,
}
fn validate_tun(p: &str) -> Result<String> {
#[cfg(target_os = "macos")]
if p.len() <= 4 || &p[..4] != "utun" {
return Err(Error::from("Invalid tun interface name, please use utunX"));
}
Ok(p.to_string())
}
impl Default for Args {
fn default() -> Self {
#[cfg(target_os = "linux")]
let setup = false;
#[cfg(not(target_os = "linux"))]
let setup = true;
Args {
proxy: ArgProxy::default(),
tun: TUN_NAME.to_string(),
tun: None,
#[cfg(unix)]
tun_fd: None,
ipv6_enabled: false,
#[cfg(unix)]
close_fd_on_drop: None,
#[cfg(target_os = "linux")]
setup: false,
unshare: false,
#[cfg(target_os = "linux")]
unshare_pidfile: None,
#[cfg(target_os = "linux")]
socket_transfer_fd: None,
#[cfg(target_os = "linux")]
admin_command: Vec::new(),
ipv6_enabled: false,
setup,
dns: ArgDns::default(),
dns_addr: "8.8.8.8".parse().unwrap(),
bypass: vec![],
tcp_timeout: 600,
udp_timeout: 10,
verbosity: ArgVerbosity::Info,
virtual_dns_pool: IpCidr::from_str("198.18.0.0/15").unwrap(),
daemonize: false,
exit_on_fatal_error: false,
max_sessions: 200,
#[cfg(feature = "udpgw")]
udpgw_server: None,
#[cfg(feature = "udpgw")]
udpgw_connections: None,
#[cfg(feature = "udpgw")]
udpgw_keepalive: None,
}
}
}
impl Args {
#[allow(clippy::let_and_return)]
pub fn parse_args() -> Self {
use clap::Parser;
Self::parse()
let args = <Self as ::clap::Parser>::parse();
#[cfg(target_os = "linux")]
if !args.setup && args.tun.is_none() {
eprintln!("Missing required argument, '--tun' must present when '--setup' is not used.");
std::process::exit(-1);
}
args
}
pub fn new(tun_fd: Option<i32>, proxy: ArgProxy, dns: ArgDns, verbosity: ArgVerbosity) -> Self {
Args {
proxy,
tun_fd,
dns,
verbosity,
..Args::default()
}
pub fn proxy(&mut self, proxy: ArgProxy) -> &mut Self {
self.proxy = proxy;
self
}
pub fn dns(&mut self, dns: ArgDns) -> &mut Self {
self.dns = dns;
self
}
#[cfg(feature = "udpgw")]
pub fn udpgw_server(&mut self, udpgw: SocketAddr) -> &mut Self {
self.udpgw_server = Some(udpgw);
self
}
#[cfg(feature = "udpgw")]
pub fn udpgw_connections(&mut self, udpgw_connections: usize) -> &mut Self {
self.udpgw_connections = Some(udpgw_connections);
self
}
#[cfg(unix)]
pub fn tun_fd(&mut self, tun_fd: Option<i32>) -> &mut Self {
self.tun_fd = tun_fd;
self
}
#[cfg(unix)]
pub fn close_fd_on_drop(&mut self, close_fd_on_drop: bool) -> &mut Self {
self.close_fd_on_drop = Some(close_fd_on_drop);
self
}
pub fn verbosity(&mut self, verbosity: ArgVerbosity) -> &mut Self {
self.verbosity = verbosity;
self
}
pub fn tun(&mut self, tun: String) -> &mut Self {
self.tun = Some(tun);
self
}
pub fn dns_addr(&mut self, dns_addr: IpAddr) -> &mut Self {
self.dns_addr = dns_addr;
self
}
pub fn bypass(&mut self, bypass: IpCidr) -> &mut Self {
self.bypass.push(bypass);
self
}
pub fn ipv6_enabled(&mut self, ipv6_enabled: bool) -> &mut Self {
self.ipv6_enabled = ipv6_enabled;
self
}
pub fn setup(&mut self, setup: bool) -> &mut Self {
self.setup = setup;
self
}
}
@ -190,42 +376,52 @@ impl Default for ArgProxy {
}
}
impl ArgProxy {
pub fn from_url(s: &str) -> Result<ArgProxy> {
impl std::fmt::Display for ArgProxy {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let auth = match &self.credentials {
Some(creds) => format!("{creds}"),
None => "".to_owned(),
};
if auth.is_empty() {
write!(f, "{}://{}", &self.proxy_type, &self.addr)
} else {
write!(f, "{}://{}@{}", &self.proxy_type, auth, &self.addr)
}
}
}
impl TryFrom<&str> for ArgProxy {
type Error = Error;
fn try_from(s: &str) -> Result<Self> {
if s == "none" {
return Ok(ArgProxy {
proxy_type: ProxyType::None,
addr: "0.0.0.0:0".parse().unwrap(),
credentials: None,
});
}
let e = format!("`{s}` is not a valid proxy URL");
let url = url::Url::parse(s).map_err(|_| Error::from(&e))?;
let e = format!("`{s}` does not contain a host");
let host = url.host_str().ok_or(Error::from(e))?;
let mut url_host = String::from(host);
let e = format!("`{s}` does not contain a port");
let port = url.port().ok_or(Error::from(&e))?;
url_host.push(':');
url_host.push_str(port.to_string().as_str());
let port = url.port_or_known_default().ok_or(Error::from(&e))?;
let e = format!("`{host}` could not be resolved");
let mut addr_iter = url_host.to_socket_addrs().map_err(|_| Error::from(&e))?;
let e = format!("`{host}` does not resolve to a usable IP address");
let addr = addr_iter.next().ok_or(Error::from(&e))?;
let e2 = format!("`{host}` does not resolve to a usable IP address");
let addr = (host, port).to_socket_addrs()?.next().ok_or(Error::from(&e2))?;
let credentials = if url.username() == "" && url.password().is_none() {
None
} else {
let username = String::from(url.username());
let password = String::from(url.password().unwrap_or(""));
use percent_encoding::percent_decode;
let username = percent_decode(url.username().as_bytes()).decode_utf8()?;
let password = percent_decode(url.password().unwrap_or("").as_bytes()).decode_utf8()?;
Some(UserKey::new(username, password))
};
let scheme = url.scheme();
let proxy_type = match url.scheme().to_ascii_lowercase().as_str() {
"socks4" => Some(ProxyType::Socks4),
"socks5" => Some(ProxyType::Socks5),
"http" => Some(ProxyType::Http),
_ => None,
}
.ok_or(Error::from(&format!("`{scheme}` is an invalid proxy type")))?;
let proxy_type = url.scheme().to_ascii_lowercase().as_str().try_into()?;
Ok(ArgProxy {
proxy_type,
@ -235,12 +431,27 @@ impl ArgProxy {
}
}
#[repr(C)]
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub enum ProxyType {
Http = 0,
Socks4,
#[default]
Socks5,
Http,
None,
}
impl TryFrom<&str> for ProxyType {
type Error = Error;
fn try_from(value: &str) -> Result<Self> {
match value {
"http" => Ok(ProxyType::Http),
"socks4" => Ok(ProxyType::Socks4),
"socks5" => Ok(ProxyType::Socks5),
"none" => Ok(ProxyType::None),
scheme => Err(Error::from(&format!("`{scheme}` is an invalid proxy type"))),
}
}
}
impl std::fmt::Display for ProxyType {
@ -249,6 +460,7 @@ impl std::fmt::Display for ProxyType {
ProxyType::Socks4 => write!(f, "socks4"),
ProxyType::Socks5 => write!(f, "socks5"),
ProxyType::Http => write!(f, "http"),
ProxyType::None => write!(f, "none"),
}
}
}

View file

@ -1,83 +1,145 @@
use tproxy_config::{TproxyArgs, TUN_GATEWAY, TUN_IPV4, TUN_NETMASK};
use tun2::DEFAULT_MTU as MTU;
use tun2proxy::{Args, Builder};
use tun2proxy::{ArgVerbosity, Args, BoxError};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
fn main() -> Result<(), BoxError> {
dotenvy::dotenv().ok();
let args = Args::parse_args();
let bypass_ips = args.bypass.clone();
// let default = format!("{}={:?}", module_path!(), args.verbosity);
let default = format!("{:?}", args.verbosity);
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(default)).init();
let mut config = tun2::Configuration::default();
config.address(TUN_IPV4).netmask(TUN_NETMASK).mtu(MTU).up();
config.destination(TUN_GATEWAY);
if let Some(tun_fd) = args.tun_fd {
config.raw_fd(tun_fd);
} else {
config.name(&args.tun);
#[cfg(unix)]
if args.daemonize {
let stdout = std::fs::File::create("/tmp/tun2proxy.out")?;
let stderr = std::fs::File::create("/tmp/tun2proxy.err")?;
let daemonize = daemonize::Daemonize::new()
.working_directory("/tmp")
.umask(0o777)
.stdout(stdout)
.stderr(stderr)
.privileged_action(|| "Executed before drop privileges");
let _ = daemonize.start()?;
}
#[cfg(target_os = "linux")]
config.platform_config(|config| {
#[allow(deprecated)]
config.packet_information(true);
config.ensure_root_privileges(args.setup);
});
#[cfg(target_os = "windows")]
config.platform_config(|config| {
config.device_guid(Some(12324323423423434234_u128));
if args.daemonize {
tun2proxy::win_svc::start_service()?;
return Ok(());
}
let rt = tokio::runtime::Builder::new_multi_thread().enable_all().build()?;
rt.block_on(main_async(args))
}
fn setup_logging(args: &Args) {
let avoid_trace = match args.verbosity {
ArgVerbosity::Trace => ArgVerbosity::Debug,
_ => args.verbosity,
};
let default = format!(
"{:?},hickory_proto=warn,ipstack={:?},netlink_proto={:?},netlink_sys={:?}",
args.verbosity, avoid_trace, avoid_trace, avoid_trace
);
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(default)).init();
}
async fn main_async(args: Args) -> Result<(), BoxError> {
setup_logging(&args);
let shutdown_token = tokio_util::sync::CancellationToken::new();
let main_loop_handle = tokio::spawn({
let args = args.clone();
let shutdown_token = shutdown_token.clone();
async move {
#[cfg(target_os = "linux")]
if args.unshare && args.socket_transfer_fd.is_none() {
if let Err(err) = namespace_proxy_main(args, shutdown_token).await {
log::error!("namespace proxy error: {err}");
}
return Ok(0);
}
unsafe extern "C" fn traffic_cb(status: *const tun2proxy::TrafficStatus, _: *mut std::ffi::c_void) {
let status = unsafe { &*status };
log::debug!("Traffic: ▲ {} : ▼ {}", status.tx, status.rx);
}
unsafe { tun2proxy::tun2proxy_set_traffic_status_callback(1, Some(traffic_cb), std::ptr::null_mut()) };
let ret = tun2proxy::general_run_async(args, tun::DEFAULT_MTU, cfg!(target_os = "macos"), shutdown_token).await;
if let Err(err) = &ret {
log::error!("main loop error: {err}");
}
ret
}
});
#[allow(unused_variables)]
let mut tproxy_args = TproxyArgs::new()
.tun_dns(args.dns_addr)
.proxy_addr(args.proxy.addr)
.bypass_ips(&bypass_ips);
#[allow(unused_assignments)]
if args.tun_fd.is_none() {
tproxy_args = tproxy_args.tun_name(&args.tun);
let ctrlc_fired = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let ctrlc_fired_clone = ctrlc_fired.clone();
let ctrlc_handel = ctrlc2::AsyncCtrlC::new(move || {
log::info!("Ctrl-C received, exiting...");
ctrlc_fired_clone.store(true, std::sync::atomic::Ordering::SeqCst);
shutdown_token.cancel();
true
})?;
let tasks = main_loop_handle.await??;
if ctrlc_fired.load(std::sync::atomic::Ordering::SeqCst) {
log::info!("Ctrl-C fired, waiting the handler to finish...");
ctrlc_handel.await?;
}
#[allow(unused_mut, unused_assignments, unused_variables)]
let mut setup = true;
#[cfg(target_os = "linux")]
{
setup = args.setup;
if setup {
tproxy_config::tproxy_setup(&tproxy_args)?;
}
}
let device = tun2::create_as_async(&config)?;
#[cfg(any(target_os = "windows", target_os = "macos"))]
if setup {
tproxy_config::tproxy_setup(&tproxy_args)?;
}
let tun2proxy = Builder::new(device, args).mtu(MTU as _).build();
let (join_handle, quit) = tun2proxy.start();
ctrlc2::set_async_handler(async move {
quit.trigger().await.expect("quit error");
})
.await;
if let Err(err) = join_handle.await {
log::trace!("main_entry error {}", err);
}
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
if setup {
tproxy_config::tproxy_remove(&tproxy_args)?;
if args.exit_on_fatal_error && tasks >= args.max_sessions {
// Because `main_async` function perhaps stuck in `await` state, so we need to exit the process forcefully
log::info!("Internal fatal error, max sessions reached ({tasks}/{})", args.max_sessions);
std::process::exit(-1);
}
Ok(())
}
#[cfg(target_os = "linux")]
async fn namespace_proxy_main(
_args: Args,
_shutdown_token: tokio_util::sync::CancellationToken,
) -> Result<std::process::ExitStatus, tun2proxy::Error> {
use nix::fcntl::{OFlag, open};
use nix::sys::stat::Mode;
use std::os::fd::AsRawFd;
let (socket, remote_fd) = tun2proxy::socket_transfer::create_transfer_socket_pair().await?;
let fd = open("/proc/self/exe", OFlag::O_PATH, Mode::empty())?;
let child = tokio::process::Command::new("unshare")
.args("--user --map-current-user --net --mount --keep-caps --kill-child --fork".split(' '))
.arg(format!("/proc/self/fd/{}", fd.as_raw_fd()))
.arg("--socket-transfer-fd")
.arg(remote_fd.as_raw_fd().to_string())
.args(std::env::args().skip(1))
.kill_on_drop(true)
.spawn();
let mut child = match child {
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
log::error!("`unshare(1)` executable wasn't located in PATH.");
log::error!("Consider installing linux utils package: `apt install util-linux`");
log::error!("Or similar for your distribution.");
return Err(err.into());
}
child => child?,
};
let unshare_pid = child.id().unwrap_or(0);
log::info!("The tun proxy is running in unprivileged mode. See `namespaces(7)`.");
log::info!("");
log::info!("If you need to run a process that relies on root-like capabilities (e.g. `openvpn`)");
log::info!("Use `tun2proxy-bin --unshare --setup [...] -- openvpn --config [...]`");
log::info!("");
log::info!("To run a new process in the created namespace (e.g. a flatpak app)");
log::info!("Use `nsenter --preserve-credentials --user --net --mount --target {unshare_pid} /bin/sh`");
log::info!("");
if let Some(pidfile) = _args.unshare_pidfile.as_ref() {
log::info!("Writing unshare pid to {pidfile}");
std::fs::write(pidfile, unshare_pid.to_string()).ok();
}
tokio::spawn(async move { tun2proxy::socket_transfer::process_socket_requests(&socket).await });
Ok(child.wait().await?)
}

271
src/bin/udpgw_server.rs Normal file
View file

@ -0,0 +1,271 @@
use socks5_impl::protocol::AsyncStreamOperation;
use std::net::SocketAddr;
use tokio::{
io::AsyncWriteExt,
net::{
UdpSocket,
tcp::{ReadHalf, WriteHalf},
},
sync::mpsc::{Receiver, Sender},
};
use tun2proxy::{
ArgVerbosity, BoxError, Error, Result,
udpgw::{Packet, UdpFlag},
};
pub(crate) const CLIENT_DISCONNECT_TIMEOUT: tokio::time::Duration = std::time::Duration::from_secs(60);
#[derive(Debug, Clone)]
pub struct Client {
addr: SocketAddr,
last_activity: std::time::Instant,
}
impl Client {
pub fn new(addr: SocketAddr) -> Self {
let last_activity = std::time::Instant::now();
Self { addr, last_activity }
}
}
fn about_info() -> &'static str {
concat!("UDP Gateway Server for tun2proxy\nVersion: ", tun2proxy::version_info!())
}
#[derive(Debug, Clone, clap::Parser)]
#[command(author, version = tun2proxy::version_info!(), about = about_info(), long_about = None)]
pub struct UdpGwArgs {
/// UDP gateway listen address
#[arg(short, long, value_name = "IP:PORT", default_value = "127.0.0.1:7300")]
pub listen_addr: SocketAddr,
/// UDP mtu
#[arg(short = 'm', long, value_name = "udp mtu", default_value = "10240")]
pub udp_mtu: u16,
/// UDP timeout in seconds
#[arg(short = 't', long, value_name = "seconds", default_value = "3")]
pub udp_timeout: u64,
/// Daemonize for unix family or run as Windows service
#[cfg(unix)]
#[arg(short, long)]
pub daemonize: bool,
/// Verbosity level
#[arg(short, long, value_name = "level", value_enum, default_value = "info")]
pub verbosity: ArgVerbosity,
}
impl UdpGwArgs {
pub fn parse_args() -> Self {
<Self as ::clap::Parser>::parse()
}
}
async fn send_error_response(tx: Sender<Packet>, conn_id: u16) {
let error_packet = Packet::build_error_packet(conn_id);
if let Err(e) = tx.send(error_packet).await {
log::error!("send error response error {e:?}");
}
}
async fn send_keepalive_response(tx: Sender<Packet>, conn_id: u16) {
let keepalive_packet = Packet::build_keepalive_packet(conn_id);
if let Err(e) = tx.send(keepalive_packet).await {
log::error!("send keepalive response error {e:?}");
}
}
/// Send data field of packet from client to destination server and receive response,
/// then wrap response data to the packet's data field and send packet back to client.
async fn process_udp(udp_mtu: u16, udp_timeout: u64, tx: Sender<Packet>, mut packet: Packet) -> Result<()> {
let Some(dst_addr) = &packet.address else {
return Err(std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "udp request address is None").into());
};
use std::net::ToSocketAddrs;
let Some(dst_addr) = dst_addr.to_socket_addrs()?.next() else {
return Err(std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "to_socket_addrs").into());
};
let std_sock = match dst_addr {
std::net::SocketAddr::V6(_) => std::net::UdpSocket::bind("[::]:0")?,
std::net::SocketAddr::V4(_) => std::net::UdpSocket::bind("0.0.0.0:0")?,
};
std_sock.set_nonblocking(true)?;
#[cfg(unix)]
nix::sys::socket::setsockopt(&std_sock, nix::sys::socket::sockopt::ReuseAddr, &true)?;
let socket = UdpSocket::from_std(std_sock)?;
// 1. send udp data to destination server
socket.send_to(&packet.data, &dst_addr).await?;
// 2. receive response from destination server
let mut buf = vec![0u8; udp_mtu as usize];
let (len, _addr) = tokio::time::timeout(tokio::time::Duration::from_secs(udp_timeout), socket.recv_from(&mut buf))
.await
.map_err(std::io::Error::from)??;
packet.data = buf[..len].to_vec();
// 3. send response back to client
use std::io::{Error, ErrorKind::BrokenPipe};
tx.send(packet).await.map_err(|e| Error::new(BrokenPipe, e))?;
Ok(())
}
fn mask_ip(ip: &str) -> String {
if ip.len() <= 2 {
return ip.to_string();
}
let mut masked_ip = String::new();
for (i, c) in ip.chars().enumerate() {
if i == 0 || i == ip.len() - 1 || c == '.' || c == ':' {
masked_ip.push(c);
} else {
masked_ip.push('*');
}
}
masked_ip
}
fn mask_socket_addr(socket_addr: std::net::SocketAddr) -> String {
match socket_addr {
std::net::SocketAddr::V4(addr) => {
let masked_ip = mask_ip(&addr.ip().to_string());
format!("{}:{}", masked_ip, addr.port())
}
std::net::SocketAddr::V6(addr) => {
let masked_ip = mask_ip(&addr.ip().to_string());
format!("[{}]:{}", masked_ip, addr.port())
}
}
}
async fn process_client_udp_req(args: &UdpGwArgs, tx: Sender<Packet>, mut client: Client, mut reader: ReadHalf<'_>) -> std::io::Result<()> {
let udp_timeout = args.udp_timeout;
let udp_mtu = args.udp_mtu;
let masked_addr = mask_socket_addr(client.addr);
loop {
let masked_addr = masked_addr.clone();
// 1. read udpgw packet from client
let res = tokio::time::timeout(tokio::time::Duration::from_secs(2), Packet::retrieve_from_async_stream(&mut reader)).await;
let packet = match res {
Ok(Ok(packet)) => packet,
Ok(Err(e)) => {
log::debug!("client {masked_addr} retrieve_from_async_stream \"{e}\"");
break;
}
Err(e) => {
if client.last_activity.elapsed() >= CLIENT_DISCONNECT_TIMEOUT {
log::debug!("client {masked_addr} last_activity elapsed \"{e}\"");
break;
}
continue;
}
};
client.last_activity = std::time::Instant::now();
let flags = packet.header.flags;
let conn_id = packet.header.conn_id;
if flags & UdpFlag::KEEPALIVE == UdpFlag::KEEPALIVE {
log::trace!("client {masked_addr} send keepalive");
// 2. if keepalive packet, do nothing, send keepalive response to client
send_keepalive_response(tx.clone(), conn_id).await;
continue;
}
log::trace!("client {masked_addr} received udp data {packet}");
// 3. process client udpgw packet in a new task
let tx = tx.clone();
tokio::spawn(async move {
if let Err(e) = process_udp(udp_mtu, udp_timeout, tx.clone(), packet).await {
send_error_response(tx, conn_id).await;
log::debug!("client {masked_addr} process udp function \"{e}\"");
}
});
}
Ok(())
}
async fn write_to_client(addr: SocketAddr, mut writer: WriteHalf<'_>, mut rx: Receiver<Packet>) -> std::io::Result<()> {
let masked_addr = mask_socket_addr(addr);
loop {
use std::io::{Error, ErrorKind::BrokenPipe};
let packet = rx.recv().await.ok_or(Error::new(BrokenPipe, "recv error"))?;
log::trace!("send response to client {masked_addr} with {packet}");
let data: Vec<u8> = packet.into();
let _r = writer.write(&data).await?;
}
}
async fn main_async(args: UdpGwArgs) -> Result<(), BoxError> {
log::info!("{} {} starting...", module_path!(), tun2proxy::version_info!());
log::info!("UDP Gateway Server running at {}", args.listen_addr);
let shutdown_token = tokio_util::sync::CancellationToken::new();
let main_loop_handle = tokio::spawn(run(args, shutdown_token.clone()));
let ctrlc_fired = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let ctrlc_fired_clone = ctrlc_fired.clone();
let ctrlc_handel = ctrlc2::AsyncCtrlC::new(move || {
log::info!("Ctrl-C received, exiting...");
ctrlc_fired_clone.store(true, std::sync::atomic::Ordering::SeqCst);
shutdown_token.cancel();
true
})?;
let _ = main_loop_handle.await?;
if ctrlc_fired.load(std::sync::atomic::Ordering::SeqCst) {
log::info!("Ctrl-C fired, waiting the handler to finish...");
ctrlc_handel.await?;
}
Ok(())
}
pub async fn run(args: UdpGwArgs, shutdown_token: tokio_util::sync::CancellationToken) -> crate::Result<()> {
let tcp_listener = tokio::net::TcpListener::bind(args.listen_addr).await?;
loop {
let (mut tcp_stream, addr) = tokio::select! {
v = tcp_listener.accept() => v?,
_ = shutdown_token.cancelled() => break,
};
let client = Client::new(addr);
let masked_addr = mask_socket_addr(addr);
log::info!("client {masked_addr} connected");
let params = args.clone();
tokio::spawn(async move {
let (tx, rx) = tokio::sync::mpsc::channel::<Packet>(100);
let (tcp_read_stream, tcp_write_stream) = tcp_stream.split();
let res = tokio::select! {
v = process_client_udp_req(&params, tx, client, tcp_read_stream) => v,
v = write_to_client(addr, tcp_write_stream, rx) => v,
};
log::info!("client {masked_addr} disconnected with {res:?}");
});
}
Ok::<(), Error>(())
}
fn main() -> Result<(), BoxError> {
dotenvy::dotenv().ok();
let args = UdpGwArgs::parse_args();
let default = format!("{:?}", args.verbosity);
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(default)).init();
#[cfg(unix)]
if args.daemonize {
let stdout = std::fs::File::create("/tmp/udpgw.out")?;
let stderr = std::fs::File::create("/tmp/udpgw.err")?;
let daemonize = daemonize::Daemonize::new()
.working_directory("/tmp")
.umask(0o777)
.stdout(stdout)
.stderr(stderr)
.privileged_action(|| "Executed before drop privileges");
let _ = daemonize.start().map_err(|e| format!("Failed to daemonize process, error:{e:?}"))?;
}
let rt = tokio::runtime::Builder::new_multi_thread().enable_all().build()?;
rt.block_on(main_async(args))
}

View file

@ -1,22 +1,16 @@
use std::{net::IpAddr, str::FromStr};
use trust_dns_proto::op::MessageType;
use trust_dns_proto::{
op::{Message, ResponseCode},
rr::{record_type::RecordType, Name, RData, Record},
use hickory_proto::{
op::{Message, MessageType, ResponseCode},
rr::{
Name, RData, Record,
rdata::{A, AAAA},
},
};
use std::{net::IpAddr, str::FromStr};
pub fn build_dns_response(mut request: Message, domain: &str, ip: IpAddr, ttl: u32) -> Result<Message, String> {
let record = match ip {
IpAddr::V4(ip) => {
let mut record = Record::with(Name::from_str(domain)?, RecordType::A, ttl);
record.set_data(Some(RData::A(ip.into())));
record
}
IpAddr::V6(ip) => {
let mut record = Record::with(Name::from_str(domain)?, RecordType::AAAA, ttl);
record.set_data(Some(RData::AAAA(ip.into())));
record
}
IpAddr::V4(ip) => Record::from_rdata(Name::from_str(domain)?, ttl, RData::A(A(ip))),
IpAddr::V6(ip) => Record::from_rdata(Name::from_str(domain)?, ttl, RData::AAAA(AAAA(ip))),
};
// We must indicate that this message is a response. Otherwise, implementations may not
@ -28,9 +22,7 @@ pub fn build_dns_response(mut request: Message, domain: &str, ip: IpAddr, ttl: u
}
pub fn remove_ipv6_entries(message: &mut Message) {
message
.answers_mut()
.retain(|answer| !matches!(answer.data(), Some(RData::AAAA(_))));
message.answers_mut().retain(|answer| !matches!(answer.data(), RData::AAAA(_)));
}
pub fn extract_ipaddr_from_dns_message(message: &Message) -> Result<IpAddr, String> {
@ -39,7 +31,7 @@ pub fn extract_ipaddr_from_dns_message(message: &Message) -> Result<IpAddr, Stri
}
let mut cname = None;
for answer in message.answers() {
match answer.data().ok_or("DNS response not contains answer data")? {
match answer.data() {
RData::A(addr) => {
return Ok(IpAddr::V4((*addr).into()));
}

View file

@ -9,7 +9,7 @@ pub(crate) static DUMP_CALLBACK: Mutex<Option<DumpCallback>> = Mutex::new(None);
/// # Safety
///
/// set dump log info callback.
#[no_mangle]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_set_log_callback(
callback: Option<unsafe extern "C" fn(ArgVerbosity, *const c_char, *mut c_void)>,
ctx: *mut c_void,
@ -23,7 +23,7 @@ pub struct DumpCallback(Option<unsafe extern "C" fn(ArgVerbosity, *const c_char,
impl DumpCallback {
unsafe fn call(self, dump_level: ArgVerbosity, info: *const c_char) {
if let Some(cb) = self.0 {
cb(dump_level, info, self.1);
unsafe { cb(dump_level, info, self.1) };
}
}
}

View file

@ -6,6 +6,10 @@ pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[cfg(unix)]
#[error("nix::errno::Errno {0:?}")]
NixErrno(#[from] nix::errno::Errno),
#[error("TryFromIntError {0:?}")]
TryFromInt(#[from] std::num::TryFromIntError),
@ -19,10 +23,10 @@ pub enum Error {
TryFromSlice(#[from] std::array::TryFromSliceError),
#[error("IpStackError {0:?}")]
IpStack(#[from] ipstack::IpStackError),
IpStack(#[from] Box<ipstack::IpStackError>),
#[error("DnsProtoError {0:?}")]
DnsProto(#[from] trust_dns_proto::error::ProtoError),
DnsProto(#[from] hickory_proto::ProtoError),
#[error("httparse::Error {0:?}")]
Httparse(#[from] httparse::Error),
@ -41,6 +45,12 @@ pub enum Error {
IntParseError(#[from] std::num::ParseIntError),
}
impl From<ipstack::IpStackError> for Error {
fn from(err: ipstack::IpStackError) -> Self {
Self::IpStack(Box::new(err))
}
}
impl From<&str> for Error {
fn from(err: &str) -> Self {
Self::String(err.to_string())
@ -63,9 +73,11 @@ impl From<Error> for std::io::Error {
fn from(err: Error) -> Self {
match err {
Error::Io(err) => err,
_ => std::io::Error::new(std::io::ErrorKind::Other, err),
_ => std::io::Error::other(err),
}
}
}
pub type BoxError = Box<dyn std::error::Error + Send + Sync + 'static>;
pub type Result<T, E = Error> = std::result::Result<T, E>;

269
src/general_api.rs Normal file
View file

@ -0,0 +1,269 @@
use crate::{
ArgVerbosity, Args,
args::{ArgDns, ArgProxy},
};
use std::os::raw::{c_char, c_int, c_ushort};
static TUN_QUIT: std::sync::Mutex<Option<tokio_util::sync::CancellationToken>> = std::sync::Mutex::new(None);
/// # Safety
///
/// Run the tun2proxy component with some arguments.
/// Parameters:
/// - proxy_url: the proxy url, e.g. "socks5://127.0.0.1:1080"
/// - tun: the tun device name, e.g. "utun5"
/// - bypass: the bypass IP/CIDR, e.g. "123.45.67.0/24"
/// - dns_strategy: the dns strategy, see ArgDns enum
/// - root_privilege: whether to run with root privilege
/// - verbosity: the verbosity level, see ArgVerbosity enum
#[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_with_name_run(
proxy_url: *const c_char,
tun: *const c_char,
bypass: *const c_char,
dns_strategy: ArgDns,
_root_privilege: bool,
verbosity: ArgVerbosity,
) -> c_int {
let proxy_url = unsafe { std::ffi::CStr::from_ptr(proxy_url) }.to_str().unwrap();
let proxy = ArgProxy::try_from(proxy_url).unwrap();
let tun = unsafe { std::ffi::CStr::from_ptr(tun) }.to_str().unwrap().to_string();
let mut args = Args::default();
if let Ok(bypass) = unsafe { std::ffi::CStr::from_ptr(bypass) }.to_str() {
args.bypass(bypass.parse().unwrap());
}
args.proxy(proxy).tun(tun).dns(dns_strategy).verbosity(verbosity);
#[cfg(target_os = "linux")]
args.setup(_root_privilege);
general_run_for_api(args, tun::DEFAULT_MTU, false)
}
/// # Safety
///
/// Run the tun2proxy component with some arguments.
/// Parameters:
/// - proxy_url: the proxy url, e.g. "socks5://127.0.0.1:1080"
/// - tun_fd: the tun file descriptor, it will be owned by tun2proxy
/// - close_fd_on_drop: whether close the tun_fd on drop
/// - packet_information: indicates whether exists packet information in packet from TUN device
/// - tun_mtu: the tun mtu
/// - dns_strategy: the dns strategy, see ArgDns enum
/// - verbosity: the verbosity level, see ArgVerbosity enum
#[cfg(unix)]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_with_fd_run(
proxy_url: *const c_char,
tun_fd: c_int,
close_fd_on_drop: bool,
packet_information: bool,
tun_mtu: c_ushort,
dns_strategy: ArgDns,
verbosity: ArgVerbosity,
) -> c_int {
let proxy_url = unsafe { std::ffi::CStr::from_ptr(proxy_url) }.to_str().unwrap();
let proxy = ArgProxy::try_from(proxy_url).unwrap();
let mut args = Args::default();
args.proxy(proxy)
.tun_fd(Some(tun_fd))
.close_fd_on_drop(close_fd_on_drop)
.dns(dns_strategy)
.verbosity(verbosity);
general_run_for_api(args, tun_mtu, packet_information)
}
/// # Safety
/// Run the tun2proxy component with command line arguments
/// Parameters:
/// - cli_args: The command line arguments,
/// e.g. `tun2proxy-bin --setup --proxy socks5://127.0.0.1:1080 --bypass 98.76.54.0/24 --dns over-tcp --verbosity trace`
/// - tun_mtu: The MTU of the TUN device, e.g. 1500
/// - packet_information: Whether exists packet information in packet from TUN device
#[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_run_with_cli_args(cli_args: *const c_char, tun_mtu: c_ushort, packet_information: bool) -> c_int {
let Ok(cli_args) = unsafe { std::ffi::CStr::from_ptr(cli_args) }.to_str() else {
log::error!("Failed to convert CLI arguments to string");
return -5;
};
let Some(args) = shlex::split(cli_args) else {
log::error!("Failed to split CLI arguments");
return -6;
};
let args = <Args as ::clap::Parser>::parse_from(args);
general_run_for_api(args, tun_mtu, packet_information)
}
pub fn general_run_for_api(args: Args, tun_mtu: u16, packet_information: bool) -> c_int {
log::set_max_level(args.verbosity.into());
if let Err(err) = log::set_boxed_logger(Box::<crate::dump_logger::DumpLogger>::default()) {
log::debug!("set logger error: {err}");
}
let shutdown_token = tokio_util::sync::CancellationToken::new();
if let Ok(mut lock) = TUN_QUIT.lock() {
if lock.is_some() {
log::error!("tun2proxy already started");
return -1;
}
*lock = Some(shutdown_token.clone());
} else {
log::error!("failed to lock tun2proxy quit token");
return -2;
}
let Ok(rt) = tokio::runtime::Builder::new_multi_thread().enable_all().build() else {
log::error!("failed to create tokio runtime with");
return -3;
};
match rt.block_on(async move {
let ret = general_run_async(args.clone(), tun_mtu, packet_information, shutdown_token).await;
match &ret {
Ok(sessions) => {
if args.exit_on_fatal_error && *sessions >= args.max_sessions {
log::error!("Forced exit due to max sessions reached ({sessions}/{})", args.max_sessions);
std::process::exit(-1);
}
log::debug!("tun2proxy exited normally, current sessions: {sessions}");
}
Err(err) => log::error!("main loop error: {err}"),
}
ret
}) {
Ok(_) => 0,
Err(e) => {
log::error!("failed to run tun2proxy with error: {e:?}");
-4
}
}
}
/// Run the tun2proxy component with some arguments.
pub async fn general_run_async(
args: Args,
tun_mtu: u16,
_packet_information: bool,
shutdown_token: tokio_util::sync::CancellationToken,
) -> std::io::Result<usize> {
let mut tun_config = tun::Configuration::default();
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
{
use tproxy_config::{TUN_GATEWAY, TUN_IPV4, TUN_NETMASK};
tun_config.address(TUN_IPV4).netmask(TUN_NETMASK).mtu(tun_mtu).up();
tun_config.destination(TUN_GATEWAY);
}
#[cfg(unix)]
if let Some(fd) = args.tun_fd {
tun_config.raw_fd(fd);
if let Some(v) = args.close_fd_on_drop {
tun_config.close_fd_on_drop(v);
};
} else if let Some(ref tun) = args.tun {
tun_config.tun_name(tun);
}
#[cfg(windows)]
if let Some(ref tun) = args.tun {
tun_config.tun_name(tun);
}
#[cfg(target_os = "linux")]
tun_config.platform_config(|cfg| {
#[allow(deprecated)]
cfg.packet_information(true);
cfg.ensure_root_privileges(args.setup);
});
#[cfg(target_os = "windows")]
tun_config.platform_config(|cfg| {
cfg.device_guid(12324323423423434234_u128);
});
#[cfg(any(target_os = "ios", target_os = "macos"))]
tun_config.platform_config(|cfg| {
cfg.packet_information(_packet_information);
});
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
#[allow(unused_variables)]
let mut tproxy_args = tproxy_config::TproxyArgs::new()
.tun_dns(args.dns_addr)
.proxy_addr(args.proxy.addr)
.bypass_ips(&args.bypass)
.ipv6_default_route(args.ipv6_enabled);
let device = tun::create_as_async(&tun_config)?;
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
if let Ok(tun_name) = tun::AbstractDevice::tun_name(&*device) {
// Above line is equivalent to: `use tun::AbstractDevice; if let Ok(tun_name) = device.tun_name() {`
tproxy_args = tproxy_args.tun_name(&tun_name);
}
// TproxyState implements the Drop trait to restore network configuration,
// so we need to assign it to a variable, even if it is not used.
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
let mut restore: Option<tproxy_config::TproxyState> = None;
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
if args.setup {
restore = Some(tproxy_config::tproxy_setup(&tproxy_args).await?);
}
#[cfg(target_os = "linux")]
{
let mut admin_command_args = args.admin_command.iter();
if let Some(command) = admin_command_args.next() {
let child = tokio::process::Command::new(command)
.args(admin_command_args)
.kill_on_drop(true)
.spawn();
match child {
Err(err) => {
log::warn!("Failed to start admin process: {err}");
}
Ok(mut child) => {
tokio::spawn(async move {
if let Err(err) = child.wait().await {
log::warn!("Admin process terminated: {err}");
}
});
}
};
}
}
let join_handle = tokio::spawn(crate::run(device, tun_mtu, args, shutdown_token.clone()));
match join_handle.await? {
Ok(sessions) => {
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
tproxy_config::tproxy_remove(restore).await?;
Ok(sessions)
}
Err(err) => Err(std::io::Error::from(err)),
}
}
/// # Safety
///
/// Shutdown the tun2proxy component.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_stop() -> c_int {
tun2proxy_stop_internal()
}
pub(crate) fn tun2proxy_stop_internal() -> c_int {
if let Ok(mut lock) = TUN_QUIT.lock() {
if let Some(shutdown_token) = lock.take() {
shutdown_token.cancel();
return 0;
}
}
-1
}

View file

@ -4,11 +4,10 @@ use crate::{
proxy_handler::{ProxyHandler, ProxyHandlerManager},
session_info::{IpProtocol, SessionInfo},
};
use base64::Engine;
use httparse::Response;
use socks5_impl::protocol::UserKey;
use std::{
collections::{hash_map::RandomState, HashMap, VecDeque},
collections::{HashMap, VecDeque, hash_map::RandomState},
iter::FromIterator,
net::SocketAddr,
str,
@ -38,6 +37,7 @@ enum HttpState {
pub(crate) type DigestState = digest_auth::WwwAuthenticateHeader;
pub struct HttpConnection {
server_addr: SocketAddr,
state: HttpState,
client_inbuf: VecDeque<u8>,
server_inbuf: VecDeque<u8>,
@ -61,12 +61,14 @@ static CONTENT_LENGTH: &str = "Content-Length";
impl HttpConnection {
async fn new(
server_addr: SocketAddr,
info: SessionInfo,
domain_name: Option<String>,
credentials: Option<UserKey>,
digest_state: Arc<Mutex<Option<DigestState>>>,
) -> Result<Self> {
let mut res = Self {
server_addr,
state: HttpState::ExpectResponseHeaders,
client_inbuf: VecDeque::default(),
server_inbuf: VecDeque::default(),
@ -138,10 +140,9 @@ impl HttpConnection {
.extend(format!("{}: {}\r\n", PROXY_AUTHORIZATION, response.to_header_string()).as_bytes());
}
AuthenticationScheme::Basic => {
let cred = format!("{}:{}", credentials.username, credentials.password);
let auth_b64 = base64::engine::general_purpose::STANDARD.encode(cred);
let auth_b64 = base64easy::encode(credentials.to_string(), base64easy::EngineKind::Standard);
self.server_outbuf
.extend(format!("{}: Basic {}\r\n", PROXY_AUTHORIZATION, auth_b64).as_bytes());
.extend(format!("{PROXY_AUTHORIZATION}: Basic {auth_b64}\r\n").as_bytes());
}
AuthenticationScheme::None => {}
}
@ -149,7 +150,6 @@ impl HttpConnection {
Ok(())
}
#[async_recursion::async_recursion]
async fn state_change(&mut self) -> Result<()> {
match self.state {
HttpState::ExpectResponseHeaders => {
@ -172,6 +172,8 @@ impl HttpConnection {
return Ok(());
}
let header_size = self.counter;
self.counter = 0;
self.crlf_state = 0;
@ -192,8 +194,10 @@ impl HttpConnection {
if status_code == 200 {
// Connection successful
self.state = HttpState::Established;
self.server_inbuf.clear();
return self.state_change().await;
// The server may have sent a banner already (SMTP, SSH, etc.).
// Therefore, server_inbuf must retain this data.
self.server_inbuf.drain(0..header_size);
return Box::pin(self.state_change()).await;
}
if status_code != 407 {
@ -246,11 +250,11 @@ impl HttpConnection {
}
// The HTTP/1.1 expected to be keep alive waiting for the next frame so, we must
// compute the lenght of the response in order to detect the next frame (response)
// compute the length of the response in order to detect the next frame (response)
// [RFC-9112](https://datatracker.ietf.org/doc/html/rfc9112#body.content-length)
// Transfer-Encoding isn't supported yet
if headers_map.get(&UniCase::new(TRANSFER_ENCODING)).is_some() {
if headers_map.contains_key(&UniCase::new(TRANSFER_ENCODING)) {
unimplemented!("Header Transfer-Encoding not supported");
}
@ -288,7 +292,7 @@ impl HttpConnection {
self.state = HttpState::ExpectResponse;
self.skip = content_length + len;
return self.state_change().await;
return Box::pin(self.state_change()).await;
}
HttpState::ExpectResponse => {
if self.skip > 0 {
@ -305,7 +309,7 @@ impl HttpConnection {
self.send_tunnel_request().await?;
self.state = HttpState::ExpectResponseHeaders;
return self.state_change().await;
return Box::pin(self.state_change()).await;
}
}
HttpState::Established => {
@ -316,7 +320,7 @@ impl HttpConnection {
}
HttpState::Reset => {
self.state = HttpState::ExpectResponseHeaders;
return self.state_change().await;
return Box::pin(self.state_change()).await;
}
_ => {}
}
@ -326,6 +330,10 @@ impl HttpConnection {
#[async_trait::async_trait]
impl ProxyHandler for HttpConnection {
fn get_server_addr(&self) -> SocketAddr {
self.server_addr
}
fn get_session_info(&self) -> SessionInfo {
self.info
}
@ -406,16 +414,12 @@ impl ProxyHandlerManager for HttpManager {
_udp_associate: bool,
) -> std::io::Result<Arc<Mutex<dyn ProxyHandler>>> {
if info.protocol != IpProtocol::Tcp {
return Err(Error::from("Invalid protocol").into());
return Err(Error::from("Protocol not supported by HTTP proxy").into());
}
Ok(Arc::new(Mutex::new(
HttpConnection::new(info, domain_name, self.credentials.clone(), self.digest_state.clone()).await?,
HttpConnection::new(self.server, info, domain_name, self.credentials.clone(), self.digest_state.clone()).await?,
)))
}
fn get_server_addr(&self) -> SocketAddr {
self.server
}
}
impl HttpManager {

View file

@ -1,37 +0,0 @@
#![cfg(target_os = "ios")]
use crate::{
args::{ArgDns, ArgProxy},
ArgVerbosity, Args,
};
use std::os::raw::{c_char, c_int, c_uint};
/// # Safety
///
/// Run the tun2proxy component with some arguments.
#[no_mangle]
pub unsafe extern "C" fn tun2proxy_run(
proxy_url: *const c_char,
tun_fd: c_int,
tun_mtu: c_uint,
dns_strategy: ArgDns,
verbosity: ArgVerbosity,
) -> c_int {
log::set_max_level(verbosity.into());
log::set_boxed_logger(Box::<crate::dump_logger::DumpLogger>::default()).unwrap();
let proxy_url = std::ffi::CStr::from_ptr(proxy_url).to_str().unwrap();
let proxy = ArgProxy::from_url(proxy_url).unwrap();
let args = Args::new(Some(tun_fd), proxy, dns_strategy, verbosity);
crate::api::tun2proxy_internal_run(args, tun_mtu as _)
}
/// # Safety
///
/// Shutdown the tun2proxy component.
#[no_mangle]
pub unsafe extern "C" fn tun2proxy_stop() -> c_int {
crate::api::tun2proxy_internal_stop()
}

View file

@ -1,161 +1,285 @@
#[cfg(feature = "udpgw")]
use crate::udpgw::UdpGwClient;
use crate::{
args::ProxyType,
directions::{IncomingDataEvent, IncomingDirection, OutgoingDirection},
http::HttpManager,
no_proxy::NoProxyManager,
session_info::{IpProtocol, SessionInfo},
virtual_dns::VirtualDns,
};
pub use clap;
use ipstack::stream::{IpStackStream, IpStackTcpStream, IpStackUdpStream};
use ipstack::{IpStackStream, IpStackTcpStream, IpStackUdpStream};
use proxy_handler::{ProxyHandler, ProxyHandlerManager};
use socks::SocksProxyManager;
use std::{collections::VecDeque, future::Future, net::SocketAddr, pin::Pin, sync::Arc};
pub use socks5_impl::protocol::UserKey;
#[cfg(feature = "udpgw")]
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6};
use std::{
collections::VecDeque,
io::ErrorKind,
net::{IpAddr, SocketAddr},
sync::Arc,
};
use tokio::{
io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt},
net::TcpStream,
sync::{
mpsc::{error::SendError, Receiver, Sender},
Mutex,
},
net::{TcpSocket, TcpStream, UdpSocket},
sync::{Mutex, mpsc::Receiver},
};
pub use tokio_util::sync::CancellationToken;
use tproxy_config::is_private_ip;
use udp_stream::UdpStream;
#[cfg(feature = "udpgw")]
use udpgw::{UDPGW_KEEPALIVE_TIME, UDPGW_MAX_CONNECTIONS, UdpGwClientStream, UdpGwResponse};
pub use {
args::{ArgVerbosity, Args},
error::{Error, Result},
args::{ArgDns, ArgProxy, ArgVerbosity, Args, ProxyType},
error::{BoxError, Error, Result},
traffic_status::{TrafficStatus, tun2proxy_set_traffic_status_callback},
};
#[cfg(feature = "mimalloc")]
#[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
pub use general_api::general_run_async;
mod android;
mod api;
mod args;
mod directions;
mod dns;
mod dump_logger;
mod error;
mod general_api;
mod http;
mod ios;
mod no_proxy;
mod proxy_handler;
mod session_info;
pub mod socket_transfer;
mod socks;
mod traffic_status;
#[cfg(feature = "udpgw")]
pub mod udpgw;
mod virtual_dns;
#[doc(hidden)]
pub mod win_svc;
const DNS_PORT: u16 = 53;
const MAX_SESSIONS: u64 = 200;
static TASK_COUNT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
use std::sync::atomic::Ordering::Relaxed;
pub struct Builder<D> {
device: D,
mtu: Option<usize>,
args: Args,
#[allow(unused)]
#[derive(Hash, Copy, Clone, Eq, PartialEq, Debug)]
#[cfg_attr(
target_os = "linux",
derive(bincode::Encode, bincode::Decode, serde::Serialize, serde::Deserialize)
)]
pub enum SocketProtocol {
Tcp,
Udp,
}
impl<D: AsyncRead + AsyncWrite + Unpin + Send + 'static> Builder<D> {
pub fn new(device: D, args: Args) -> Self {
Builder { device, args, mtu: None }
}
pub fn mtu(mut self, mtu: usize) -> Self {
self.mtu = Some(mtu);
self
}
pub fn build(self) -> Tun2Socks5<impl Future<Output = crate::Result<()>> + Send + 'static> {
let (tx, rx) = tokio::sync::mpsc::channel::<()>(1);
Tun2Socks5(run(self.device, self.mtu.unwrap_or(1500), self.args, rx), tx)
}
#[allow(unused)]
#[derive(Hash, Copy, Clone, Eq, PartialEq, Debug)]
#[cfg_attr(
target_os = "linux",
derive(bincode::Encode, bincode::Decode, serde::Serialize, serde::Deserialize)
)]
pub enum SocketDomain {
IpV4,
IpV6,
}
pub struct Tun2Socks5<F: Future>(F, Sender<()>);
impl<F: Future + Send + 'static> Tun2Socks5<F>
where
F::Output: Send,
{
pub fn start(self) -> (JoinHandle<F::Output>, Quit) {
let r = tokio::spawn(self.0);
(JoinHandle(r), Quit(self.1))
}
}
pub struct Quit(Sender<()>);
impl Quit {
pub async fn trigger(&self) -> Result<(), SendError<()>> {
self.0.send(()).await
}
}
#[repr(transparent)]
struct TokioJoinError(tokio::task::JoinError);
impl From<TokioJoinError> for crate::Result<()> {
fn from(value: TokioJoinError) -> Self {
Err(crate::Error::Io(value.0.into()))
}
}
pub struct JoinHandle<R>(tokio::task::JoinHandle<R>);
impl<R: From<TokioJoinError>> Future for JoinHandle<R> {
type Output = R;
fn poll(mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll<Self::Output> {
match std::task::ready!(Pin::new(&mut self.0).poll(cx)) {
Ok(r) => std::task::Poll::Ready(r),
Err(e) => std::task::Poll::Ready(TokioJoinError(e).into()),
impl From<IpAddr> for SocketDomain {
fn from(value: IpAddr) -> Self {
match value {
IpAddr::V4(_) => Self::IpV4,
IpAddr::V6(_) => Self::IpV6,
}
}
}
pub async fn run<D>(device: D, mtu: usize, args: Args, mut quit: Receiver<()>) -> crate::Result<()>
struct SocketQueue {
tcp_v4: Mutex<Receiver<TcpSocket>>,
tcp_v6: Mutex<Receiver<TcpSocket>>,
udp_v4: Mutex<Receiver<UdpSocket>>,
udp_v6: Mutex<Receiver<UdpSocket>>,
}
impl SocketQueue {
async fn recv_tcp(&self, domain: SocketDomain) -> Result<TcpSocket, std::io::Error> {
match domain {
SocketDomain::IpV4 => &self.tcp_v4,
SocketDomain::IpV6 => &self.tcp_v6,
}
.lock()
.await
.recv()
.await
.ok_or(ErrorKind::Other.into())
}
async fn recv_udp(&self, domain: SocketDomain) -> Result<UdpSocket, std::io::Error> {
match domain {
SocketDomain::IpV4 => &self.udp_v4,
SocketDomain::IpV6 => &self.udp_v6,
}
.lock()
.await
.recv()
.await
.ok_or(ErrorKind::Other.into())
}
}
async fn create_tcp_stream(socket_queue: &Option<Arc<SocketQueue>>, peer: SocketAddr) -> std::io::Result<TcpStream> {
match &socket_queue {
None => TcpStream::connect(peer).await,
Some(queue) => queue.recv_tcp(peer.ip().into()).await?.connect(peer).await,
}
}
async fn create_udp_stream(socket_queue: &Option<Arc<SocketQueue>>, peer: SocketAddr) -> std::io::Result<UdpStream> {
match &socket_queue {
None => UdpStream::connect(peer).await,
Some(queue) => {
let socket = queue.recv_udp(peer.ip().into()).await?;
socket.connect(peer).await?;
UdpStream::from_tokio(socket, peer).await
}
}
}
/// Run the proxy server
/// # Arguments
/// * `device` - The network device to use
/// * `mtu` - The MTU of the network device
/// * `args` - The arguments to use
/// * `shutdown_token` - The token to exit the server
/// # Returns
/// * The number of sessions while exiting
pub async fn run<D>(device: D, mtu: u16, args: Args, shutdown_token: CancellationToken) -> crate::Result<usize>
where
D: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
log::info!("{} {} starting...", env!("CARGO_PKG_NAME"), version_info!());
log::info!("Proxy {} server: {}", args.proxy.proxy_type, args.proxy.addr);
let server_addr = args.proxy.addr;
let key = args.proxy.credentials.clone();
let dns_addr = args.dns_addr;
let ipv6_enabled = args.ipv6_enabled;
let virtual_dns = if args.dns == args::ArgDns::Virtual {
Some(Arc::new(Mutex::new(VirtualDns::new())))
let virtual_dns = if args.dns == ArgDns::Virtual {
Some(Arc::new(Mutex::new(VirtualDns::new(args.virtual_dns_pool))))
} else {
None
};
#[cfg(target_os = "linux")]
let socket_queue = match args.socket_transfer_fd {
None => None,
Some(fd) => {
use crate::socket_transfer::{reconstruct_socket, reconstruct_transfer_socket, request_sockets};
use tokio::sync::mpsc::channel;
let fd = reconstruct_socket(fd)?;
let socket = reconstruct_transfer_socket(fd)?;
let socket = Arc::new(Mutex::new(socket));
macro_rules! create_socket_queue {
($domain:ident) => {{
const SOCKETS_PER_REQUEST: usize = 64;
let socket = socket.clone();
let (tx, rx) = channel(SOCKETS_PER_REQUEST);
tokio::spawn(async move {
loop {
let sockets =
match request_sockets(socket.lock().await, SocketDomain::$domain, SOCKETS_PER_REQUEST as u32).await {
Ok(sockets) => sockets,
Err(err) => {
log::warn!("Socket allocation request failed: {err}");
continue;
}
};
for s in sockets {
if let Err(_) = tx.send(s).await {
return;
}
}
}
});
Mutex::new(rx)
}};
}
Some(Arc::new(SocketQueue {
tcp_v4: create_socket_queue!(IpV4),
tcp_v6: create_socket_queue!(IpV6),
udp_v4: create_socket_queue!(IpV4),
udp_v6: create_socket_queue!(IpV6),
}))
}
};
#[cfg(not(target_os = "linux"))]
let socket_queue = None;
use socks5_impl::protocol::Version::{V4, V5};
let mgr = match args.proxy.proxy_type {
ProxyType::Socks5 => Arc::new(SocksProxyManager::new(server_addr, V5, key)) as Arc<dyn ProxyHandlerManager>,
ProxyType::Socks4 => Arc::new(SocksProxyManager::new(server_addr, V4, key)) as Arc<dyn ProxyHandlerManager>,
ProxyType::Http => Arc::new(HttpManager::new(server_addr, key)) as Arc<dyn ProxyHandlerManager>,
let mgr: Arc<dyn ProxyHandlerManager> = match args.proxy.proxy_type {
ProxyType::Socks5 => Arc::new(SocksProxyManager::new(server_addr, V5, key)),
ProxyType::Socks4 => Arc::new(SocksProxyManager::new(server_addr, V4, key)),
ProxyType::Http => Arc::new(HttpManager::new(server_addr, key)),
ProxyType::None => Arc::new(NoProxyManager::new()),
};
let mut ipstack_config = ipstack::IpStackConfig::default();
ipstack_config.mtu(mtu as _);
ipstack_config.tcp_timeout(std::time::Duration::from_secs(600)); // 10 minutes
ipstack_config.udp_timeout(std::time::Duration::from_secs(10)); // 10 seconds
ipstack_config.mtu(mtu);
ipstack_config.tcp_timeout(std::time::Duration::from_secs(args.tcp_timeout));
ipstack_config.udp_timeout(std::time::Duration::from_secs(args.udp_timeout));
let mut ip_stack = ipstack::IpStack::new(ipstack_config, device);
#[cfg(feature = "udpgw")]
let udpgw_client = args.udpgw_server.map(|addr| {
log::info!("UDP Gateway enabled, server: {addr}");
use std::time::Duration;
let client = Arc::new(UdpGwClient::new(
mtu,
args.udpgw_connections.unwrap_or(UDPGW_MAX_CONNECTIONS),
args.udpgw_keepalive.map(Duration::from_secs).unwrap_or(UDPGW_KEEPALIVE_TIME),
args.udp_timeout,
addr,
));
let client_keepalive = client.clone();
tokio::spawn(async move {
let _ = client_keepalive.heartbeat_task().await;
});
client
});
let task_count = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
use std::sync::atomic::Ordering::Relaxed;
loop {
let task_count = task_count.clone();
let virtual_dns = virtual_dns.clone();
let ip_stack_stream = tokio::select! {
_ = quit.recv() => {
log::info!("");
log::info!("Ctrl-C recieved, exiting...");
_ = shutdown_token.cancelled() => {
log::info!("Shutdown received");
break;
}
ip_stack_stream = ip_stack.accept() => {
ip_stack_stream?
}
};
let max_sessions = args.max_sessions;
match ip_stack_stream {
IpStackStream::Tcp(tcp) => {
if TASK_COUNT.load(Relaxed) > MAX_SESSIONS {
log::warn!("Too many sessions that over {MAX_SESSIONS}, dropping new session");
if task_count.load(Relaxed) >= max_sessions {
if args.exit_on_fatal_error {
log::info!("Too many sessions that over {max_sessions}, exiting...");
break;
}
log::warn!("Too many sessions that over {max_sessions}, dropping new session");
continue;
}
log::trace!("Session count {}", TASK_COUNT.fetch_add(1, Relaxed) + 1);
log::trace!("Session count {}", task_count.fetch_add(1, Relaxed).saturating_add(1));
let info = SessionInfo::new(tcp.local_addr(), tcp.peer_addr(), IpProtocol::Tcp);
let domain_name = if let Some(virtual_dns) = &virtual_dns {
let mut virtual_dns = virtual_dns.lock().await;
@ -165,46 +289,53 @@ where
None
};
let proxy_handler = mgr.new_proxy_handler(info, domain_name, false).await?;
let socket_queue = socket_queue.clone();
tokio::spawn(async move {
if let Err(err) = handle_tcp_session(tcp, server_addr, proxy_handler).await {
log::error!("{} error \"{}\"", info, err);
if let Err(err) = handle_tcp_session(tcp, proxy_handler, socket_queue).await {
log::error!("{info} error \"{err}\"");
}
log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1);
log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1));
});
}
IpStackStream::Udp(udp) => {
if TASK_COUNT.load(Relaxed) > MAX_SESSIONS {
log::warn!("Too many sessions that over {MAX_SESSIONS}, dropping new session");
if task_count.load(Relaxed) >= max_sessions {
if args.exit_on_fatal_error {
log::info!("Too many sessions that over {max_sessions}, exiting...");
break;
}
log::warn!("Too many sessions that over {max_sessions}, dropping new session");
continue;
}
log::trace!("Session count {}", TASK_COUNT.fetch_add(1, Relaxed) + 1);
log::trace!("Session count {}", task_count.fetch_add(1, Relaxed).saturating_add(1));
let mut info = SessionInfo::new(udp.local_addr(), udp.peer_addr(), IpProtocol::Udp);
if info.dst.port() == DNS_PORT {
if is_private_ip(info.dst.ip()) {
info.dst.set_ip(dns_addr);
info.dst.set_ip(dns_addr); // !!! Here we change the destination address to remote DNS server!!!
}
if args.dns == args::ArgDns::OverTcp {
if args.dns == ArgDns::OverTcp {
info.protocol = IpProtocol::Tcp;
let proxy_handler = mgr.new_proxy_handler(info, None, false).await?;
let socket_queue = socket_queue.clone();
tokio::spawn(async move {
if let Err(err) = handle_dns_over_tcp_session(udp, server_addr, proxy_handler, ipv6_enabled).await {
log::error!("{} error \"{}\"", info, err);
if let Err(err) = handle_dns_over_tcp_session(udp, proxy_handler, socket_queue, ipv6_enabled).await {
log::error!("{info} error \"{err}\"");
}
log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1);
log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1));
});
continue;
}
if args.dns == args::ArgDns::Virtual {
if args.dns == ArgDns::Virtual {
tokio::spawn(async move {
if let Some(virtual_dns) = virtual_dns {
if let Err(err) = handle_virtual_dns_session(udp, virtual_dns).await {
log::error!("{} error \"{}\"", info, err);
log::error!("{info} error \"{err}\"");
}
}
log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1);
log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1));
});
continue;
}
assert_eq!(args.dns, args::ArgDns::Direct);
assert_eq!(args.dns, ArgDns::Direct);
}
let domain_name = if let Some(virtual_dns) = &virtual_dns {
let mut virtual_dns = virtual_dns.lock().await;
@ -213,82 +344,301 @@ where
} else {
None
};
let proxy_handler = mgr.new_proxy_handler(info, domain_name, true).await?;
tokio::spawn(async move {
if let Err(err) = handle_udp_associate_session(udp, server_addr, proxy_handler, ipv6_enabled).await {
log::error!("{} error \"{}\"", info, err);
#[cfg(feature = "udpgw")]
if let Some(udpgw) = udpgw_client.clone() {
let tcp_src = match udp.peer_addr() {
SocketAddr::V4(_) => SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)),
SocketAddr::V6(_) => SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, 0, 0, 0)),
};
let tcpinfo = SessionInfo::new(tcp_src, udpgw.get_udpgw_server_addr(), IpProtocol::Tcp);
let proxy_handler = mgr.new_proxy_handler(tcpinfo, None, false).await?;
let queue = socket_queue.clone();
tokio::spawn(async move {
let dst = info.dst; // real UDP destination address
let dst_addr = match domain_name {
Some(ref d) => socks5_impl::protocol::Address::from((d.clone(), dst.port())),
None => dst.into(),
};
if let Err(e) = handle_udp_gateway_session(udp, udpgw, &dst_addr, proxy_handler, queue, ipv6_enabled).await {
log::info!("Ending {info} with \"{e}\"");
}
log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1));
});
continue;
}
match mgr.new_proxy_handler(info, domain_name, true).await {
Ok(proxy_handler) => {
let socket_queue = socket_queue.clone();
tokio::spawn(async move {
let ty = args.proxy.proxy_type;
if let Err(err) = handle_udp_associate_session(udp, ty, proxy_handler, socket_queue, ipv6_enabled).await {
log::info!("Ending {info} with \"{err}\"");
}
log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1));
});
}
log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1);
});
Err(e) => {
log::error!("Failed to create UDP connection: {e}");
}
}
}
_ => {
log::trace!("Unknown transport");
IpStackStream::UnknownTransport(u) => {
let len = u.payload().len();
log::info!("#0 unhandled transport - Ip Protocol {:?}, length {}", u.ip_protocol(), len);
continue;
}
IpStackStream::UnknownNetwork(pkt) => {
log::info!("#0 unknown transport - {} bytes", pkt.len());
continue;
}
}
}
Ok(())
Ok(task_count.load(Relaxed))
}
async fn handle_virtual_dns_session(mut udp: IpStackUdpStream, dns: Arc<Mutex<VirtualDns>>) -> crate::Result<()> {
let mut buf = [0_u8; 4096];
loop {
let len = udp.read(&mut buf).await?;
let len = match udp.read(&mut buf).await {
Err(e) => {
// indicate UDP read fails not an error.
log::debug!("Virtual DNS session error: {e}");
break;
}
Ok(len) => len,
};
if len == 0 {
break;
}
let (msg, qname, ip) = dns.lock().await.generate_query(&buf[..len])?;
udp.write_all(&msg).await?;
log::debug!("Virtual DNS query: {} -> {}", qname, ip);
log::debug!("Virtual DNS query: {qname} -> {ip}");
}
Ok(())
}
async fn copy_and_record_traffic<R, W>(reader: &mut R, writer: &mut W, is_tx: bool) -> tokio::io::Result<u64>
where
R: tokio::io::AsyncRead + Unpin + ?Sized,
W: tokio::io::AsyncWrite + Unpin + ?Sized,
{
let mut buf = vec![0; 8192];
let mut total = 0;
loop {
match reader.read(&mut buf).await? {
0 => break, // EOF
n => {
total += n as u64;
let (tx, rx) = if is_tx { (n, 0) } else { (0, n) };
if let Err(e) = crate::traffic_status::traffic_status_update(tx, rx) {
log::debug!("Record traffic status error: {e}");
}
writer.write_all(&buf[..n]).await?;
}
}
}
Ok(total)
}
async fn handle_tcp_session(
tcp_stack: IpStackTcpStream,
server_addr: SocketAddr,
mut tcp_stack: IpStackTcpStream,
proxy_handler: Arc<Mutex<dyn ProxyHandler>>,
socket_queue: Option<Arc<SocketQueue>>,
) -> crate::Result<()> {
let mut server = TcpStream::connect(server_addr).await?;
let (session_info, server_addr) = {
let handler = proxy_handler.lock().await;
let session_info = proxy_handler.lock().await.get_session_info();
log::info!("Beginning {}", session_info);
(handler.get_session_info(), handler.get_server_addr())
};
let _ = handle_proxy_session(&mut server, proxy_handler).await?;
let mut server = create_tcp_stream(&socket_queue, server_addr).await?;
log::info!("Beginning {session_info}");
if let Err(e) = handle_proxy_session(&mut server, proxy_handler).await {
tcp_stack.shutdown().await?;
return Err(e);
}
let (mut t_rx, mut t_tx) = tokio::io::split(tcp_stack);
let (mut s_rx, mut s_tx) = tokio::io::split(server);
let result = tokio::join! {
tokio::io::copy(&mut t_rx, &mut s_tx),
tokio::io::copy(&mut s_rx, &mut t_tx),
};
let result = match result {
(Ok(t), Ok(s)) => Ok((t, s)),
(Err(e), _) | (_, Err(e)) => Err(e),
let res = tokio::join!(
async move {
let r = copy_and_record_traffic(&mut t_rx, &mut s_tx, true).await;
if let Err(err) = s_tx.shutdown().await {
log::trace!("{session_info} s_tx shutdown error {err}");
}
r
},
async move {
let r = copy_and_record_traffic(&mut s_rx, &mut t_tx, false).await;
if let Err(err) = t_tx.shutdown().await {
log::trace!("{session_info} t_tx shutdown error {err}");
}
r
},
);
log::info!("Ending {session_info} with {res:?}");
Ok(())
}
#[cfg(feature = "udpgw")]
async fn handle_udp_gateway_session(
mut udp_stack: IpStackUdpStream,
udpgw_client: Arc<UdpGwClient>,
udp_dst: &socks5_impl::protocol::Address,
proxy_handler: Arc<Mutex<dyn ProxyHandler>>,
socket_queue: Option<Arc<SocketQueue>>,
ipv6_enabled: bool,
) -> crate::Result<()> {
let proxy_server_addr = { proxy_handler.lock().await.get_server_addr() };
let udp_mtu = udpgw_client.get_udp_mtu();
let udp_timeout = udpgw_client.get_udp_timeout();
let mut stream = loop {
match udpgw_client.pop_server_connection_from_queue().await {
Some(stream) => {
if stream.is_closed() {
continue;
} else {
break stream;
}
}
None => {
let mut tcp_server_stream = create_tcp_stream(&socket_queue, proxy_server_addr).await?;
if let Err(e) = handle_proxy_session(&mut tcp_server_stream, proxy_handler).await {
return Err(format!("udpgw connection error: {e}").into());
}
break UdpGwClientStream::new(tcp_server_stream);
}
}
};
log::info!("Ending {} with {:?}", session_info, result);
let tcp_local_addr = stream.local_addr();
let sn = stream.serial_number();
log::info!("[UdpGw] Beginning stream {} {} -> {}", sn, &tcp_local_addr, udp_dst);
let Some(mut reader) = stream.get_reader() else {
return Err("get reader failed".into());
};
let Some(mut writer) = stream.get_writer() else {
return Err("get writer failed".into());
};
let mut tmp_buf = vec![0; udp_mtu.into()];
loop {
tokio::select! {
len = udp_stack.read(&mut tmp_buf) => {
let read_len = match len {
Ok(0) => {
log::info!("[UdpGw] Ending stream {} {} <> {}", sn, &tcp_local_addr, udp_dst);
break;
}
Ok(n) => n,
Err(e) => {
log::info!("[UdpGw] Ending stream {} {} <> {} with udp stack \"{}\"", sn, &tcp_local_addr, udp_dst, e);
break;
}
};
crate::traffic_status::traffic_status_update(read_len, 0)?;
let sn = stream.serial_number();
if let Err(e) = UdpGwClient::send_udpgw_packet(ipv6_enabled, &tmp_buf[0..read_len], udp_dst, sn, &mut writer).await {
log::info!("[UdpGw] Ending stream {} {} <> {} with send_udpgw_packet {}", sn, &tcp_local_addr, udp_dst, e);
break;
}
log::debug!("[UdpGw] stream {} {} -> {} send len {}", sn, &tcp_local_addr, udp_dst, read_len);
stream.update_activity();
}
ret = UdpGwClient::recv_udpgw_packet(udp_mtu, udp_timeout, &mut reader) => {
if let Ok((len, _)) = ret {
crate::traffic_status::traffic_status_update(0, len)?;
}
match ret {
Err(e) => {
log::warn!("[UdpGw] Ending stream {} {} <> {} with recv_udpgw_packet {}", sn, &tcp_local_addr, udp_dst, e);
stream.close();
break;
}
Ok((_, packet)) => match packet {
//should not received keepalive
UdpGwResponse::KeepAlive => {
log::error!("[UdpGw] Ending stream {} {} <> {} with recv keepalive", sn, &tcp_local_addr, udp_dst);
stream.close();
break;
}
//server udp may be timeout,can continue to receive udp data?
UdpGwResponse::Error => {
log::info!("[UdpGw] Ending stream {} {} <> {} with recv udp error", sn, &tcp_local_addr, udp_dst);
stream.update_activity();
continue;
}
UdpGwResponse::TcpClose => {
log::error!("[UdpGw] Ending stream {} {} <> {} with tcp closed", sn, &tcp_local_addr, udp_dst);
stream.close();
break;
}
UdpGwResponse::Data(data) => {
use socks5_impl::protocol::StreamOperation;
let len = data.len();
let f = data.header.flags;
log::debug!("[UdpGw] stream {sn} {} <- {} receive {f} len {len}", &tcp_local_addr, udp_dst);
if let Err(e) = udp_stack.write_all(&data.data).await {
log::error!("[UdpGw] Ending stream {} {} <> {} with send_udp_packet {}", sn, &tcp_local_addr, udp_dst, e);
break;
}
}
}
}
stream.update_activity();
}
}
}
if !stream.is_closed() {
udpgw_client.store_server_connection_full(stream, reader, writer).await;
}
Ok(())
}
async fn handle_udp_associate_session(
mut udp_stack: IpStackUdpStream,
server_addr: SocketAddr,
proxy_type: ProxyType,
proxy_handler: Arc<Mutex<dyn ProxyHandler>>,
socket_queue: Option<Arc<SocketQueue>>,
ipv6_enabled: bool,
) -> crate::Result<()> {
use socks5_impl::protocol::{Address, StreamOperation, UdpHeader};
let mut server = TcpStream::connect(server_addr).await?;
let session_info = proxy_handler.lock().await.get_session_info();
let domain_name = proxy_handler.lock().await.get_domain_name();
log::info!("Beginning {}", session_info);
let udp_addr = handle_proxy_session(&mut server, proxy_handler).await?;
let udp_addr = udp_addr.ok_or("udp associate failed")?;
let (session_info, server_addr, domain_name, udp_addr) = {
let handler = proxy_handler.lock().await;
(
handler.get_session_info(),
handler.get_server_addr(),
handler.get_domain_name(),
handler.get_udp_associate(),
)
};
let mut udp_server = UdpStream::connect(udp_addr).await?;
log::info!("Beginning {session_info}");
// `_server` is meaningful here, it must be alive all the time
// to ensure that UDP transmission will not be interrupted accidentally.
let (_server, udp_addr) = match udp_addr {
Some(udp_addr) => (None, udp_addr),
None => {
let mut server = create_tcp_stream(&socket_queue, server_addr).await?;
let udp_addr = handle_proxy_session(&mut server, proxy_handler).await?;
(Some(server), udp_addr.ok_or("udp associate failed")?)
}
};
let mut udp_server = create_udp_stream(&socket_queue, udp_addr).await?;
let mut buf1 = [0_u8; 4096];
let mut buf2 = [0_u8; 4096];
@ -301,18 +651,24 @@ async fn handle_udp_associate_session(
}
let buf1 = &buf1[..len];
let s5addr = if let Some(domain_name) = &domain_name {
Address::DomainAddress(domain_name.clone(), session_info.dst.port())
crate::traffic_status::traffic_status_update(len, 0)?;
if let ProxyType::Socks4 | ProxyType::Socks5 = proxy_type {
let s5addr = if let Some(domain_name) = &domain_name {
Address::DomainAddress(domain_name.clone(), session_info.dst.port())
} else {
session_info.dst.into()
};
// Add SOCKS5 UDP header to the incoming data
let mut s5_udp_data = Vec::<u8>::new();
UdpHeader::new(0, s5addr).write_to_stream(&mut s5_udp_data)?;
s5_udp_data.extend_from_slice(buf1);
udp_server.write_all(&s5_udp_data).await?;
} else {
session_info.dst.into()
};
// Add SOCKS5 UDP header to the incoming data
let mut s5_udp_data = Vec::<u8>::new();
UdpHeader::new(0, s5addr).write_to_stream(&mut s5_udp_data)?;
s5_udp_data.extend_from_slice(buf1);
udp_server.write_all(&s5_udp_data).await?;
udp_server.write_all(buf1).await?;
}
}
len = udp_server.read(&mut buf2) => {
let len = len?;
@ -321,40 +677,51 @@ async fn handle_udp_associate_session(
}
let buf2 = &buf2[..len];
// Remove SOCKS5 UDP header from the server data
let header = UdpHeader::retrieve_from_stream(&mut &buf2[..])?;
let data = &buf2[header.len()..];
crate::traffic_status::traffic_status_update(0, len)?;
let buf = if session_info.dst.port() == DNS_PORT {
let mut message = dns::parse_data_to_dns_message(data, false)?;
if !ipv6_enabled {
dns::remove_ipv6_entries(&mut message);
}
message.to_vec()?
if let ProxyType::Socks4 | ProxyType::Socks5 = proxy_type {
// Remove SOCKS5 UDP header from the server data
let header = UdpHeader::retrieve_from_stream(&mut &buf2[..])?;
let data = &buf2[header.len()..];
let buf = if session_info.dst.port() == DNS_PORT {
let mut message = dns::parse_data_to_dns_message(data, false)?;
if !ipv6_enabled {
dns::remove_ipv6_entries(&mut message);
}
message.to_vec()?
} else {
data.to_vec()
};
udp_stack.write_all(&buf).await?;
} else {
data.to_vec()
};
udp_stack.write_all(&buf).await?;
udp_stack.write_all(buf2).await?;
}
}
}
}
log::info!("Ending {}", session_info);
log::info!("Ending {session_info}");
Ok(())
}
async fn handle_dns_over_tcp_session(
mut udp_stack: IpStackUdpStream,
server_addr: SocketAddr,
proxy_handler: Arc<Mutex<dyn ProxyHandler>>,
socket_queue: Option<Arc<SocketQueue>>,
ipv6_enabled: bool,
) -> crate::Result<()> {
let mut server = TcpStream::connect(server_addr).await?;
let (session_info, server_addr) = {
let handler = proxy_handler.lock().await;
let session_info = proxy_handler.lock().await.get_session_info();
log::info!("Beginning {}", session_info);
(handler.get_session_info(), handler.get_server_addr())
};
let mut server = create_tcp_stream(&socket_queue, server_addr).await?;
log::info!("Beginning {session_info}");
let _ = handle_proxy_session(&mut server, proxy_handler).await?;
@ -378,6 +745,8 @@ async fn handle_dns_over_tcp_session(
buf.extend_from_slice(buf1);
server.write_all(&buf).await?;
crate::traffic_status::traffic_status_update(buf.len(), 0)?;
}
len = server.read(&mut buf2) => {
let len = len?;
@ -386,6 +755,8 @@ async fn handle_dns_over_tcp_session(
}
let mut buf = buf2[..len].to_vec();
crate::traffic_status::traffic_status_update(0, len)?;
let mut to_send: VecDeque<Vec<u8>> = VecDeque::new();
loop {
if buf.len() < 2 {
@ -403,7 +774,7 @@ async fn handle_dns_over_tcp_session(
let name = dns::extract_domain_from_dns_message(&message)?;
let ip = dns::extract_ipaddr_from_dns_message(&message);
log::trace!("DNS over TCP query result: {} -> {:?}", name, ip);
log::trace!("DNS over TCP query result: {name} -> {ip:?}");
if !ipv6_enabled {
dns::remove_ipv6_entries(&mut message);
@ -423,15 +794,19 @@ async fn handle_dns_over_tcp_session(
}
}
log::info!("Ending {}", session_info);
log::info!("Ending {session_info}");
Ok(())
}
/// This function is used to handle the business logic of tun2proxy and SOCKS5 server.
/// When handling UDP proxy, the return value UDP associate IP address is the result of this business logic.
/// However, when handling TCP business logic, the return value Ok(None) is meaningless, just indicating that the operation was successful.
async fn handle_proxy_session(server: &mut TcpStream, proxy_handler: Arc<Mutex<dyn ProxyHandler>>) -> crate::Result<Option<SocketAddr>> {
let mut launched = false;
let mut proxy_handler = proxy_handler.lock().await;
let dir = OutgoingDirection::ToServer;
let (mut tx, mut rx) = (0, 0);
loop {
if proxy_handler.connection_established() {
@ -446,6 +821,7 @@ async fn handle_proxy_session(server: &mut TcpStream, proxy_handler: Arc<Mutex<d
}
server.write_all(data).await?;
proxy_handler.consume_data(dir, len);
tx += len;
launched = true;
}
@ -455,6 +831,7 @@ async fn handle_proxy_session(server: &mut TcpStream, proxy_handler: Arc<Mutex<d
if len == 0 {
return Err("server closed accidentially".into());
}
rx += len;
let event = IncomingDataEvent {
direction: IncomingDirection::FromServer,
buffer: &buf[..len],
@ -466,7 +843,9 @@ async fn handle_proxy_session(server: &mut TcpStream, proxy_handler: Arc<Mutex<d
if len > 0 {
server.write_all(data).await?;
proxy_handler.consume_data(dir, len);
tx += len;
}
}
crate::traffic_status::traffic_status_update(tx, rx)?;
Ok(proxy_handler.get_udp_associate())
}

107
src/no_proxy.rs Normal file
View file

@ -0,0 +1,107 @@
use crate::{
directions::{IncomingDataEvent, IncomingDirection, OutgoingDataEvent, OutgoingDirection},
proxy_handler::{ProxyHandler, ProxyHandlerManager},
session_info::SessionInfo,
};
use std::{collections::VecDeque, net::SocketAddr, sync::Arc};
use tokio::sync::Mutex;
struct NoProxyHandler {
info: SessionInfo,
domain_name: Option<String>,
client_outbuf: VecDeque<u8>,
server_outbuf: VecDeque<u8>,
udp_associate: bool,
}
#[async_trait::async_trait]
impl ProxyHandler for NoProxyHandler {
fn get_server_addr(&self) -> SocketAddr {
self.info.dst
}
fn get_session_info(&self) -> SessionInfo {
self.info
}
fn get_domain_name(&self) -> Option<String> {
self.domain_name.clone()
}
async fn push_data(&mut self, event: IncomingDataEvent<'_>) -> std::io::Result<()> {
let IncomingDataEvent { direction, buffer } = event;
match direction {
IncomingDirection::FromServer => {
self.client_outbuf.extend(buffer.iter());
}
IncomingDirection::FromClient => {
self.server_outbuf.extend(buffer.iter());
}
}
Ok(())
}
fn consume_data(&mut self, dir: OutgoingDirection, size: usize) {
let buffer = match dir {
OutgoingDirection::ToServer => &mut self.server_outbuf,
OutgoingDirection::ToClient => &mut self.client_outbuf,
};
buffer.drain(0..size);
}
fn peek_data(&mut self, dir: OutgoingDirection) -> OutgoingDataEvent {
let buffer = match dir {
OutgoingDirection::ToServer => &mut self.server_outbuf,
OutgoingDirection::ToClient => &mut self.client_outbuf,
};
OutgoingDataEvent {
direction: dir,
buffer: buffer.make_contiguous(),
}
}
fn connection_established(&self) -> bool {
true
}
fn data_len(&self, dir: OutgoingDirection) -> usize {
match dir {
OutgoingDirection::ToServer => self.server_outbuf.len(),
OutgoingDirection::ToClient => self.client_outbuf.len(),
}
}
fn reset_connection(&self) -> bool {
false
}
fn get_udp_associate(&self) -> Option<SocketAddr> {
self.udp_associate.then_some(self.info.dst)
}
}
pub(crate) struct NoProxyManager;
#[async_trait::async_trait]
impl ProxyHandlerManager for NoProxyManager {
async fn new_proxy_handler(
&self,
info: SessionInfo,
domain_name: Option<String>,
udp_associate: bool,
) -> std::io::Result<Arc<Mutex<dyn ProxyHandler>>> {
Ok(Arc::new(Mutex::new(NoProxyHandler {
info,
domain_name,
client_outbuf: VecDeque::default(),
server_outbuf: VecDeque::default(),
udp_associate,
})))
}
}
impl NoProxyManager {
pub(crate) fn new() -> Self {
Self
}
}

View file

@ -7,13 +7,16 @@ use tokio::sync::Mutex;
#[async_trait::async_trait]
pub(crate) trait ProxyHandler: Send + Sync {
fn get_server_addr(&self) -> SocketAddr;
fn get_session_info(&self) -> SessionInfo;
fn get_domain_name(&self) -> Option<String>;
async fn push_data(&mut self, event: IncomingDataEvent<'_>) -> std::io::Result<()>;
fn consume_data(&mut self, dir: OutgoingDirection, size: usize);
fn peek_data(&mut self, dir: OutgoingDirection) -> OutgoingDataEvent;
fn connection_established(&self) -> bool;
#[allow(dead_code)]
fn data_len(&self, dir: OutgoingDirection) -> usize;
#[allow(dead_code)]
fn reset_connection(&self) -> bool;
fn get_udp_associate(&self) -> Option<SocketAddr>;
}
@ -26,5 +29,4 @@ pub(crate) trait ProxyHandlerManager: Send + Sync {
domain_name: Option<String>,
udp_associate: bool,
) -> std::io::Result<Arc<Mutex<dyn ProxyHandler>>>;
fn get_server_addr(&self) -> SocketAddr;
}

View file

@ -16,7 +16,7 @@ impl std::fmt::Display for IpProtocol {
IpProtocol::Tcp => write!(f, "TCP"),
IpProtocol::Udp => write!(f, "UDP"),
IpProtocol::Icmp => write!(f, "ICMP"),
IpProtocol::Other(v) => write!(f, "Other({})", v),
IpProtocol::Other(v) => write!(f, "Other(0x{v:02X})"),
}
}
}

242
src/socket_transfer.rs Normal file
View file

@ -0,0 +1,242 @@
#![cfg(target_os = "linux")]
use crate::{SocketDomain, SocketProtocol, error};
use nix::{
errno::Errno,
fcntl::{self, FdFlag},
sys::socket::{ControlMessage, ControlMessageOwned, MsgFlags, SockType, cmsg_space, getsockopt, recvmsg, sendmsg, sockopt},
};
use serde::{Deserialize, Serialize};
use std::{
io::{ErrorKind, IoSlice, IoSliceMut, Result},
ops::DerefMut,
os::fd::{AsFd, AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd},
};
use tokio::net::{TcpSocket, UdpSocket, UnixDatagram};
const REQUEST_BUFFER_SIZE: usize = 64;
#[derive(bincode::Encode, bincode::Decode, Hash, Copy, Clone, Eq, PartialEq, Debug, Serialize, Deserialize)]
struct Request {
protocol: SocketProtocol,
domain: SocketDomain,
number: u32,
}
#[derive(bincode::Encode, bincode::Decode, PartialEq, Debug, Hash, Copy, Clone, Eq, Serialize, Deserialize)]
enum Response {
Ok,
}
/// Reconstruct socket from raw `fd`
pub fn reconstruct_socket(fd: RawFd) -> Result<OwnedFd> {
// `fd` is confirmed to be valid so it should be closed
let socket = unsafe { OwnedFd::from_raw_fd(fd) };
// Check if `fd` is valid
let fd_flags = fcntl::fcntl(socket.as_fd(), fcntl::F_GETFD)?;
// Insert CLOEXEC flag to the `fd` to prevent further propagation across `execve(2)` calls
let mut fd_flags = FdFlag::from_bits(fd_flags).ok_or(ErrorKind::Unsupported)?;
if !fd_flags.contains(FdFlag::FD_CLOEXEC) {
fd_flags.insert(FdFlag::FD_CLOEXEC);
fcntl::fcntl(socket.as_fd(), fcntl::F_SETFD(fd_flags))?;
}
Ok(socket)
}
/// Reconstruct transfer socket from `fd`
///
/// Panics if called outside of tokio runtime
pub fn reconstruct_transfer_socket(fd: OwnedFd) -> Result<UnixDatagram> {
// Check if socket of type DATAGRAM
let sock_type = getsockopt(&fd, sockopt::SockType)?;
if !matches!(sock_type, SockType::Datagram) {
return Err(ErrorKind::InvalidInput.into());
}
let std_socket: std::os::unix::net::UnixDatagram = fd.into();
std_socket.set_nonblocking(true)?;
// Fails if tokio context is absent
Ok(UnixDatagram::from_std(std_socket).unwrap())
}
/// Create pair of interconnected sockets one of which is set to stay open across `execve(2)` calls.
pub async fn create_transfer_socket_pair() -> std::io::Result<(UnixDatagram, OwnedFd)> {
let (local, remote) = tokio::net::UnixDatagram::pair()?;
let remote_fd: OwnedFd = remote.into_std().unwrap().into();
// Get `remote_fd` flags
let fd_flags = fcntl::fcntl(remote_fd.as_fd(), fcntl::F_GETFD)?;
// Remove CLOEXEC flag from the `remote_fd` to allow propagating across `execve(2)`
let mut fd_flags = FdFlag::from_bits(fd_flags).ok_or(ErrorKind::Unsupported)?;
fd_flags.remove(FdFlag::FD_CLOEXEC);
fcntl::fcntl(remote_fd.as_fd(), fcntl::F_SETFD(fd_flags))?;
Ok((local, remote_fd))
}
pub trait TransferableSocket: Sized {
fn from_fd(fd: OwnedFd) -> Result<Self>;
fn domain() -> SocketProtocol;
}
impl TransferableSocket for TcpSocket {
fn from_fd(fd: OwnedFd) -> Result<Self> {
// Check if socket is of type STREAM
let sock_type = getsockopt(&fd, sockopt::SockType)?;
if !matches!(sock_type, SockType::Stream) {
return Err(ErrorKind::InvalidInput.into());
}
let std_stream: std::net::TcpStream = fd.into();
std_stream.set_nonblocking(true)?;
Ok(TcpSocket::from_std_stream(std_stream))
}
fn domain() -> SocketProtocol {
SocketProtocol::Tcp
}
}
impl TransferableSocket for UdpSocket {
/// Panics if called outside of tokio runtime
fn from_fd(fd: OwnedFd) -> Result<Self> {
// Check if socket is of type DATAGRAM
let sock_type = getsockopt(&fd, sockopt::SockType)?;
if !matches!(sock_type, SockType::Datagram) {
return Err(ErrorKind::InvalidInput.into());
}
let std_socket: std::net::UdpSocket = fd.into();
std_socket.set_nonblocking(true)?;
Ok(UdpSocket::try_from(std_socket).unwrap())
}
fn domain() -> SocketProtocol {
SocketProtocol::Udp
}
}
/// Send [`Request`] to `socket` and return received [`TransferableSocket`]s
///
/// Panics if called outside of tokio runtime
pub async fn request_sockets<S, T>(mut socket: S, domain: SocketDomain, number: u32) -> error::Result<Vec<T>>
where
S: DerefMut<Target = UnixDatagram>,
T: TransferableSocket,
{
// Borrow socket as mut to prevent multiple simultaneous requests
let socket = socket.deref_mut();
let mut request = [0u8; 1000];
// Send request
let size = bincode::encode_into_slice(
Request {
protocol: T::domain(),
domain,
number,
},
&mut request,
bincode::config::standard(),
)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?;
socket.send(&request[..size]).await?;
// Receive response
loop {
socket.readable().await?;
let mut buf = [0_u8; REQUEST_BUFFER_SIZE];
let mut iov = [IoSliceMut::new(&mut buf[..])];
let mut cmsg = vec![0; cmsg_space::<RawFd>() * number as usize];
let msg = recvmsg::<()>(socket.as_fd().as_raw_fd(), &mut iov, Some(&mut cmsg), MsgFlags::empty());
let msg = match msg {
Err(Errno::EAGAIN) => continue,
msg => msg?,
};
// Parse response
let response = &msg.iovs().next().unwrap()[..msg.bytes];
let response: Response = bincode::decode_from_slice(response, bincode::config::standard())
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?
.0;
if !matches!(response, Response::Ok) {
return Err("Request for new sockets failed".into());
}
// Process received file descriptors
let mut sockets = Vec::<T>::with_capacity(number as usize);
for cmsg in msg.cmsgs()? {
if let ControlMessageOwned::ScmRights(fds) = cmsg {
for fd in fds {
if fd < 0 {
return Err("Received socket is invalid".into());
}
let owned_fd = reconstruct_socket(fd)?;
sockets.push(T::from_fd(owned_fd)?);
}
}
}
return Ok(sockets);
}
}
/// Process [`Request`]s received from `socket`
///
/// Panics if called outside of tokio runtime
pub async fn process_socket_requests(socket: &UnixDatagram) -> error::Result<()> {
loop {
let mut buf = [0_u8; REQUEST_BUFFER_SIZE];
let len = socket.recv(&mut buf[..]).await?;
let request: Request = bincode::decode_from_slice(&buf[..len], bincode::config::standard())
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?
.0;
let response = Response::Ok;
let mut buf = [0u8; 1000];
let size = bincode::encode_into_slice(response, &mut buf, bincode::config::standard())
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?;
let mut owned_fd_buf: Vec<OwnedFd> = Vec::with_capacity(request.number as usize);
for _ in 0..request.number {
let fd = match request.protocol {
SocketProtocol::Tcp => match request.domain {
SocketDomain::IpV4 => tokio::net::TcpSocket::new_v4(),
SocketDomain::IpV6 => tokio::net::TcpSocket::new_v6(),
}
.map(|s| unsafe { OwnedFd::from_raw_fd(s.into_raw_fd()) }),
SocketProtocol::Udp => match request.domain {
SocketDomain::IpV4 => tokio::net::UdpSocket::bind("0.0.0.0:0").await,
SocketDomain::IpV6 => tokio::net::UdpSocket::bind("[::]:0").await,
}
.map(|s| s.into_std().unwrap().into()),
};
match fd {
Err(err) => log::warn!("Failed to allocate socket: {err}"),
Ok(fd) => owned_fd_buf.push(fd),
};
}
socket.writable().await?;
let raw_fd_buf: Vec<RawFd> = owned_fd_buf.iter().map(|fd| fd.as_raw_fd()).collect();
let cmsg = ControlMessage::ScmRights(&raw_fd_buf[..]);
let iov = [IoSlice::new(&buf[..size])];
sendmsg::<()>(socket.as_raw_fd(), &iov, &[cmsg], MsgFlags::empty(), None)?;
}
}

View file

@ -4,7 +4,7 @@ use crate::{
proxy_handler::{ProxyHandler, ProxyHandlerManager},
session_info::SessionInfo,
};
use socks5_impl::protocol::{self, handshake, password_method, Address, AuthMethod, StreamOperation, UserKey, Version};
use socks5_impl::protocol::{self, Address, AuthMethod, StreamOperation, UserKey, Version, handshake, password_method};
use std::{collections::VecDeque, net::SocketAddr, sync::Arc};
use tokio::sync::Mutex;
@ -20,6 +20,7 @@ enum SocksState {
}
struct SocksProxyImpl {
server_addr: SocketAddr,
info: SessionInfo,
domain_name: Option<String>,
state: SocksState,
@ -35,6 +36,7 @@ struct SocksProxyImpl {
impl SocksProxyImpl {
fn new(
server_addr: SocketAddr,
info: SessionInfo,
domain_name: Option<String>,
credentials: Option<UserKey>,
@ -42,6 +44,7 @@ impl SocksProxyImpl {
command: protocol::Command,
) -> Result<Self> {
let mut result = Self {
server_addr,
info,
domain_name,
state: SocksState::ClientHello,
@ -75,7 +78,7 @@ impl SocksProxyImpl {
}
}
SocketAddr::V6(addr) => {
return Err(format!("SOCKS4 does not support IPv6: {}", addr).into());
return Err(format!("SOCKS4 does not support IPv6: {addr}").into());
}
}
self.server_outbuf.extend(ip_vec);
@ -133,7 +136,7 @@ impl SocksProxyImpl {
let response = handshake::Response::retrieve_from_stream(&mut self.server_inbuf.clone());
if let Err(e) = response {
if e.kind() == std::io::ErrorKind::UnexpectedEof {
log::trace!("receive_server_hello_socks5 needs more data \"{}\"...", e);
log::trace!("receive_server_hello_socks5 needs more data \"{e}\"...");
return Ok(());
} else {
return Err(e);
@ -178,7 +181,7 @@ impl SocksProxyImpl {
let response = Response::retrieve_from_stream(&mut self.server_inbuf.clone());
if let Err(e) = response {
if e.kind() == std::io::ErrorKind::UnexpectedEof {
log::trace!("receive_auth_data needs more data \"{}\"...", e);
log::trace!("receive_auth_data needs more data \"{e}\"...");
return Ok(());
} else {
return Err(e);
@ -210,7 +213,7 @@ impl SocksProxyImpl {
let response = protocol::Response::retrieve_from_stream(&mut self.server_inbuf.clone());
if let Err(e) = response {
if e.kind() == std::io::ErrorKind::UnexpectedEof {
log::trace!("receive_connection_status needs more data \"{}\"...", e);
log::trace!("receive_connection_status needs more data \"{e}\"...");
return Ok(());
} else {
return Err(e);
@ -260,6 +263,10 @@ impl SocksProxyImpl {
#[async_trait::async_trait]
impl ProxyHandler for SocksProxyImpl {
fn get_server_addr(&self) -> SocketAddr {
self.server_addr
}
fn get_session_info(&self) -> SessionInfo {
self.info
}
@ -339,6 +346,7 @@ impl ProxyHandlerManager for SocksProxyManager {
let command = if udp_associate { UdpAssociate } else { Connect };
let credentials = self.credentials.clone();
Ok(Arc::new(Mutex::new(SocksProxyImpl::new(
self.server,
info,
domain_name,
credentials,
@ -346,10 +354,6 @@ impl ProxyHandlerManager for SocksProxyManager {
command,
)?)))
}
fn get_server_addr(&self) -> SocketAddr {
self.server
}
}
impl SocksProxyManager {

86
src/traffic_status.rs Normal file
View file

@ -0,0 +1,86 @@
use crate::error::{Error, Result};
use std::os::raw::c_void;
use std::sync::{LazyLock, Mutex};
/// # Safety
///
/// set traffic status callback.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_set_traffic_status_callback(
send_interval_secs: u32,
callback: Option<unsafe extern "C" fn(*const TrafficStatus, *mut c_void)>,
ctx: *mut c_void,
) {
if let Ok(mut cb) = TRAFFIC_STATUS_CALLBACK.lock() {
*cb = Some(TrafficStatusCallback(callback, ctx));
} else {
log::error!("set traffic status callback failed");
}
if send_interval_secs > 0 {
SEND_INTERVAL_SECS.store(send_interval_secs as u64, std::sync::atomic::Ordering::Relaxed);
}
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone)]
pub struct TrafficStatus {
pub tx: u64,
pub rx: u64,
}
#[derive(Clone)]
struct TrafficStatusCallback(Option<unsafe extern "C" fn(*const TrafficStatus, *mut c_void)>, *mut c_void);
impl TrafficStatusCallback {
unsafe fn call(self, info: &TrafficStatus) {
if let Some(cb) = self.0 {
unsafe { cb(info, self.1) };
}
}
}
unsafe impl Send for TrafficStatusCallback {}
unsafe impl Sync for TrafficStatusCallback {}
static TRAFFIC_STATUS_CALLBACK: std::sync::Mutex<Option<TrafficStatusCallback>> = std::sync::Mutex::new(None);
static SEND_INTERVAL_SECS: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(1);
static TRAFFIC_STATUS: LazyLock<Mutex<TrafficStatus>> = LazyLock::new(|| Mutex::new(TrafficStatus::default()));
static TIME_STAMP: LazyLock<Mutex<std::time::Instant>> = LazyLock::new(|| Mutex::new(std::time::Instant::now()));
pub(crate) fn traffic_status_update(delta_tx: usize, delta_rx: usize) -> Result<()> {
{
let is_none_or_error = TRAFFIC_STATUS_CALLBACK.lock().map(|guard| guard.is_none()).unwrap_or_else(|e| {
log::error!("Failed to acquire lock: {e}");
true
});
if is_none_or_error {
return Ok(());
}
}
let traffic_status = {
let mut traffic_status = TRAFFIC_STATUS.lock().map_err(|e| Error::from(e.to_string()))?;
traffic_status.tx += delta_tx as u64;
traffic_status.rx += delta_rx as u64;
*traffic_status
};
let old_time = { *TIME_STAMP.lock().map_err(|e| Error::from(e.to_string()))? };
let interval_secs = SEND_INTERVAL_SECS.load(std::sync::atomic::Ordering::Relaxed);
if std::time::Instant::now().duration_since(old_time).as_secs() >= interval_secs {
send_traffic_stat(&traffic_status)?;
{
let mut time_stamp = TIME_STAMP.lock().map_err(|e| Error::from(e.to_string()))?;
*time_stamp = std::time::Instant::now();
}
}
Ok(())
}
fn send_traffic_stat(traffic_status: &TrafficStatus) -> Result<()> {
if let Ok(cb) = TRAFFIC_STATUS_CALLBACK.lock() {
if let Some(cb) = cb.clone() {
unsafe { cb.call(traffic_status) };
}
}
Ok(())
}

578
src/udpgw.rs Normal file
View file

@ -0,0 +1,578 @@
use crate::error::Result;
use socks5_impl::protocol::{Address, AsyncStreamOperation, BufMut, StreamOperation};
use std::{collections::VecDeque, hash::Hash, net::SocketAddr, sync::atomic::Ordering::Relaxed};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
net::{
TcpStream,
tcp::{OwnedReadHalf, OwnedWriteHalf},
},
sync::Mutex,
time::{Duration, sleep},
};
pub(crate) const UDPGW_LENGTH_FIELD_SIZE: usize = std::mem::size_of::<u16>();
pub(crate) const UDPGW_MAX_CONNECTIONS: usize = 5;
pub(crate) const UDPGW_KEEPALIVE_TIME: tokio::time::Duration = std::time::Duration::from_secs(30);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct UdpFlag(pub u8);
impl UdpFlag {
pub const ZERO: UdpFlag = UdpFlag(0x00);
pub const KEEPALIVE: UdpFlag = UdpFlag(0x01);
pub const ERR: UdpFlag = UdpFlag(0x20);
pub const DATA: UdpFlag = UdpFlag(0x02);
}
impl std::fmt::Display for UdpFlag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let flag = match self.0 {
0x00 => "ZERO",
0x01 => "KEEPALIVE",
0x20 => "ERR",
0x02 => "DATA",
n => return write!(f, "Unknown UdpFlag(0x{n:02X})"),
};
write!(f, "{flag}")
}
}
impl std::ops::BitAnd for UdpFlag {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
UdpFlag(self.0 & rhs.0)
}
}
impl std::ops::BitOr for UdpFlag {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
UdpFlag(self.0 | rhs.0)
}
}
/// UDP Gateway Packet Format
///
/// The format is referenced from SOCKS5 packet format, with additional flags and connection ID fields.
///
/// `LEN`: This field is indicated the length of the packet, not including the length field itself.
///
/// `FLAGS`: This field is used to indicate the packet type. The flags are defined as follows:
/// - `0x01`: Keepalive packet without address and data
/// - `0x20`: Error packet without address and data
/// - `0x02`: Data packet with address and data
///
/// `CONN_ID`: This field is used to indicate the unique connection ID for the packet.
///
/// `ATYP` & `DST.ADDR` & `DST.PORT`: This fields are used to indicate the remote address and port.
/// It can be either an IPv4 address, an IPv6 address, or a domain name, depending on the `ATYP` field.
/// The address format directly uses the address format of the [SOCKS5](https://datatracker.ietf.org/doc/html/rfc1928#section-4) protocol.
/// - `ATYP`: Address Type, 1 byte, indicating the type of address ( 0x01-IPv4, 0x04-IPv6, or 0x03-domain name )
/// - `DST.ADDR`: Destination Address. If `ATYP` is 0x01 or 0x04, it is 4 or 16 bytes of IP address;
/// If `ATYP` is 0x03, it is a domain name, `DST.ADDR` is a variable length field,
/// it begins with a 1-byte length field and then the domain name without null-termination,
/// since the length field is 1 byte, the maximum length of the domain name is 255 bytes.
/// - `DST.PORT`: Destination Port, 2 bytes, the port number of the destination address.
///
/// `DATA`: The data field, a variable length field, the length is determined by the `LEN` field.
///
/// All the digits fields are in big-endian byte order.
///
/// ```plain
/// +-----+ +-------+---------+ +------+----------+----------+ +----------+
/// | LEN | | FLAGS | CONN_ID | | ATYP | DST.ADDR | DST.PORT | | DATA |
/// +-----+ +-------+---------+ +------+----------+----------+ +----------+
/// | 2 | | 1 | 2 | | 1 | Variable | 2 | | Variable |
/// +-----+ +-------+---------+ +------+----------+----------+ +----------+
/// ```
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Packet {
pub header: UdpgwHeader,
pub address: Option<Address>,
pub data: Vec<u8>,
}
impl std::fmt::Display for Packet {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let addr = self.address.as_ref().map_or("None".to_string(), |addr| addr.to_string());
let len = self.data.len();
write!(f, "Packet {{ {}, address: {}, payload length: {} }}", self.header, addr, len)
}
}
impl From<Packet> for Vec<u8> {
fn from(packet: Packet) -> Vec<u8> {
(&packet).into()
}
}
impl From<&Packet> for Vec<u8> {
fn from(packet: &Packet) -> Vec<u8> {
let mut bytes: Vec<u8> = vec![];
packet.write_to_buf(&mut bytes);
bytes
}
}
impl TryFrom<&[u8]> for Packet {
type Error = std::io::Error;
fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
if value.len() < UDPGW_LENGTH_FIELD_SIZE {
return Err(std::io::ErrorKind::InvalidData.into());
}
let mut iter = std::io::Cursor::new(value);
use tokio_util::bytes::Buf;
let length = iter.get_u16();
if value.len() < length as usize + UDPGW_LENGTH_FIELD_SIZE {
return Err(std::io::ErrorKind::InvalidData.into());
}
let header = UdpgwHeader::retrieve_from_stream(&mut iter)?;
let address = if header.flags & UdpFlag::DATA != UdpFlag::ZERO {
Some(Address::retrieve_from_stream(&mut iter)?)
} else {
None
};
Ok(Packet::new(header, address, iter.chunk()))
}
}
impl Packet {
pub fn new(header: UdpgwHeader, address: Option<Address>, data: &[u8]) -> Self {
let data = data.to_vec();
Packet { header, address, data }
}
pub fn build_keepalive_packet(conn_id: u16) -> Self {
Packet::new(UdpgwHeader::new(UdpFlag::KEEPALIVE, conn_id), None, &[])
}
pub fn build_error_packet(conn_id: u16) -> Self {
Packet::new(UdpgwHeader::new(UdpFlag::ERR, conn_id), None, &[])
}
pub fn build_packet_from_address(conn_id: u16, remote_addr: &Address, data: &[u8]) -> std::io::Result<Self> {
use socks5_impl::protocol::Address::{DomainAddress, SocketAddress};
let packet = match remote_addr {
SocketAddress(addr) => Packet::build_ip_packet(conn_id, *addr, data),
DomainAddress(domain, port) => Packet::build_domain_packet(conn_id, *port, domain, data)?,
};
Ok(packet)
}
pub fn build_ip_packet(conn_id: u16, remote_addr: SocketAddr, data: &[u8]) -> Self {
let addr: Address = remote_addr.into();
Packet::new(UdpgwHeader::new(UdpFlag::DATA, conn_id), Some(addr), data)
}
pub fn build_domain_packet(conn_id: u16, port: u16, domain: &str, data: &[u8]) -> std::io::Result<Self> {
if domain.len() > 255 {
return Err(std::io::ErrorKind::InvalidInput.into());
}
let addr = Address::from((domain, port));
Ok(Packet::new(UdpgwHeader::new(UdpFlag::DATA, conn_id), Some(addr), data))
}
}
impl StreamOperation for Packet {
fn retrieve_from_stream<R>(stream: &mut R) -> std::io::Result<Self>
where
R: std::io::Read,
Self: Sized,
{
let mut buf = [0; UDPGW_LENGTH_FIELD_SIZE];
stream.read_exact(&mut buf)?;
let length = u16::from_be_bytes(buf) as usize;
let header = UdpgwHeader::retrieve_from_stream(stream)?;
let address = if header.flags & UdpFlag::DATA == UdpFlag::DATA {
Some(Address::retrieve_from_stream(stream)?)
} else {
None
};
let read_len = header.len() + address.as_ref().map_or(0, |addr| addr.len());
if length < read_len {
return Err(std::io::ErrorKind::InvalidData.into());
}
let mut data = vec![0; length - read_len];
stream.read_exact(&mut data)?;
Ok(Packet::new(header, address, &data))
}
fn write_to_buf<B: BufMut>(&self, buf: &mut B) {
let len = self.len() - UDPGW_LENGTH_FIELD_SIZE;
buf.put_u16(len as u16);
self.header.write_to_buf(buf);
if let Some(addr) = &self.address {
addr.write_to_buf(buf);
}
buf.put_slice(&self.data);
}
fn len(&self) -> usize {
UDPGW_LENGTH_FIELD_SIZE + self.header.len() + self.address.as_ref().map_or(0, |addr| addr.len()) + self.data.len()
}
}
#[async_trait::async_trait]
impl AsyncStreamOperation for Packet {
async fn retrieve_from_async_stream<R>(r: &mut R) -> std::io::Result<Self>
where
R: tokio::io::AsyncRead + Unpin + Send + ?Sized,
Self: Sized,
{
let mut buf = [0; UDPGW_LENGTH_FIELD_SIZE];
r.read_exact(&mut buf).await?;
let length = u16::from_be_bytes(buf) as usize;
let header = UdpgwHeader::retrieve_from_async_stream(r).await?;
let address = if header.flags & UdpFlag::DATA == UdpFlag::DATA {
Some(Address::retrieve_from_async_stream(r).await?)
} else {
None
};
let read_len = header.len() + address.as_ref().map_or(0, |addr| addr.len());
if length < read_len {
return Err(std::io::ErrorKind::InvalidData.into());
}
let mut data = vec![0; length - read_len];
r.read_exact(&mut data).await?;
Ok(Packet::new(header, address, &data))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct UdpgwHeader {
pub flags: UdpFlag,
pub conn_id: u16,
}
impl std::fmt::Display for UdpgwHeader {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} conn_id: {}", self.flags, self.conn_id)
}
}
impl StreamOperation for UdpgwHeader {
fn retrieve_from_stream<R>(stream: &mut R) -> std::io::Result<Self>
where
R: std::io::Read,
Self: Sized,
{
let mut buf = [0; UdpgwHeader::static_len()];
stream.read_exact(&mut buf)?;
UdpgwHeader::try_from(&buf[..])
}
fn write_to_buf<B: BufMut>(&self, buf: &mut B) {
let bytes: Vec<u8> = self.into();
buf.put_slice(&bytes);
}
fn len(&self) -> usize {
Self::static_len()
}
}
#[async_trait::async_trait]
impl AsyncStreamOperation for UdpgwHeader {
async fn retrieve_from_async_stream<R>(r: &mut R) -> std::io::Result<Self>
where
R: tokio::io::AsyncRead + Unpin + Send + ?Sized,
Self: Sized,
{
let mut buf = [0; UdpgwHeader::static_len()];
r.read_exact(&mut buf).await?;
UdpgwHeader::try_from(&buf[..])
}
}
impl UdpgwHeader {
pub fn new(flags: UdpFlag, conn_id: u16) -> Self {
UdpgwHeader { flags, conn_id }
}
pub const fn static_len() -> usize {
std::mem::size_of::<u8>() + std::mem::size_of::<u16>()
}
}
impl TryFrom<&[u8]> for UdpgwHeader {
type Error = std::io::Error;
fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
if value.len() < UdpgwHeader::static_len() {
return Err(std::io::ErrorKind::InvalidData.into());
}
let conn_id = u16::from_be_bytes([value[1], value[2]]);
Ok(UdpgwHeader::new(UdpFlag(value[0]), conn_id))
}
}
impl From<&UdpgwHeader> for Vec<u8> {
fn from(header: &UdpgwHeader) -> Vec<u8> {
let mut bytes = vec![0; header.len()];
bytes[0] = header.flags.0;
bytes[1..3].copy_from_slice(&header.conn_id.to_be_bytes());
bytes
}
}
#[allow(dead_code)]
#[derive(Debug)]
pub(crate) enum UdpGwResponse {
KeepAlive,
Error,
TcpClose,
Data(Packet),
}
impl std::fmt::Display for UdpGwResponse {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
UdpGwResponse::KeepAlive => write!(f, "KeepAlive"),
UdpGwResponse::Error => write!(f, "Error"),
UdpGwResponse::TcpClose => write!(f, "TcpClose"),
UdpGwResponse::Data(packet) => write!(f, "Data({packet})"),
}
}
}
static SERIAL_NUMBER: std::sync::atomic::AtomicU16 = std::sync::atomic::AtomicU16::new(1);
#[derive(Debug)]
pub(crate) struct UdpGwClientStream {
local_addr: SocketAddr,
writer: Option<OwnedWriteHalf>,
reader: Option<OwnedReadHalf>,
closed: bool,
last_activity: std::time::Instant,
serial_number: u16,
}
impl UdpGwClientStream {
pub fn close(&mut self) {
self.closed = true;
}
pub fn get_reader(&mut self) -> Option<OwnedReadHalf> {
self.reader.take()
}
pub fn set_reader(&mut self, reader: Option<OwnedReadHalf>) {
self.reader = reader;
}
pub fn set_writer(&mut self, writer: Option<OwnedWriteHalf>) {
self.writer = writer;
}
pub fn get_writer(&mut self) -> Option<OwnedWriteHalf> {
self.writer.take()
}
pub fn local_addr(&self) -> SocketAddr {
self.local_addr
}
pub fn update_activity(&mut self) {
self.last_activity = std::time::Instant::now();
}
pub fn is_closed(&self) -> bool {
self.closed
}
pub fn serial_number(&self) -> u16 {
self.serial_number
}
pub fn new(tcp_server_stream: TcpStream) -> Self {
let default = "0.0.0.0:0".parse::<SocketAddr>().unwrap();
let local_addr = tcp_server_stream.local_addr().unwrap_or(default);
let (reader, writer) = tcp_server_stream.into_split();
let serial_number = SERIAL_NUMBER.fetch_add(1, Relaxed);
UdpGwClientStream {
local_addr,
reader: Some(reader),
writer: Some(writer),
last_activity: std::time::Instant::now(),
closed: false,
serial_number,
}
}
}
#[derive(Debug)]
pub(crate) struct UdpGwClient {
udp_mtu: u16,
max_connections: usize,
udp_timeout: u64,
keepalive_time: Duration,
udpgw_server: SocketAddr,
server_connections: Mutex<VecDeque<UdpGwClientStream>>,
}
impl UdpGwClient {
pub fn new(udp_mtu: u16, max_connections: usize, keepalive_time: Duration, udp_timeout: u64, udpgw_server: SocketAddr) -> Self {
let server_connections = Mutex::new(VecDeque::with_capacity(max_connections));
UdpGwClient {
udp_mtu,
max_connections,
udp_timeout,
udpgw_server,
keepalive_time,
server_connections,
}
}
pub(crate) fn get_udp_mtu(&self) -> u16 {
self.udp_mtu
}
pub(crate) fn get_udp_timeout(&self) -> u64 {
self.udp_timeout
}
pub(crate) async fn pop_server_connection_from_queue(&self) -> Option<UdpGwClientStream> {
self.server_connections.lock().await.pop_front()
}
pub(crate) async fn store_server_connection(&self, stream: UdpGwClientStream) {
if self.server_connections.lock().await.len() < self.max_connections {
self.server_connections.lock().await.push_back(stream);
}
}
pub(crate) async fn store_server_connection_full(&self, mut stream: UdpGwClientStream, reader: OwnedReadHalf, writer: OwnedWriteHalf) {
if self.server_connections.lock().await.len() < self.max_connections {
stream.set_reader(Some(reader));
stream.set_writer(Some(writer));
self.server_connections.lock().await.push_back(stream);
}
}
pub(crate) fn get_udpgw_server_addr(&self) -> SocketAddr {
self.udpgw_server
}
/// Heartbeat task asynchronous function to periodically check and maintain the active state of the server connection.
pub(crate) async fn heartbeat_task(&self) -> std::io::Result<()> {
loop {
sleep(self.keepalive_time).await;
let mut streams = Vec::new();
while let Some(stream) = self.pop_server_connection_from_queue().await {
if !stream.is_closed() {
streams.push(stream);
}
}
let (mut tx, mut rx) = (0, 0);
for mut stream in streams {
if stream.last_activity.elapsed() < self.keepalive_time {
self.store_server_connection(stream).await;
continue;
}
let Some(mut stream_reader) = stream.get_reader() else {
continue;
};
let Some(mut stream_writer) = stream.get_writer() else {
continue;
};
let local_addr = stream_writer.local_addr()?;
let sn = stream.serial_number();
let keepalive_packet: Vec<u8> = Packet::build_keepalive_packet(sn).into();
tx += keepalive_packet.len();
if let Err(e) = stream_writer.write_all(&keepalive_packet).await {
log::warn!("stream {sn} {local_addr:?} send keepalive failed: {e}");
continue;
}
match UdpGwClient::recv_udpgw_packet(self.udp_mtu, self.udp_timeout, &mut stream_reader).await {
Ok((len, UdpGwResponse::KeepAlive)) => {
stream.update_activity();
self.store_server_connection_full(stream, stream_reader, stream_writer).await;
log::trace!("stream {sn} {local_addr:?} send keepalive and recieve it successfully");
rx += len;
}
Ok((len, v)) => {
log::debug!("stream {sn} {local_addr:?} keepalive unexpected response: {v}");
rx += len;
}
Err(e) => log::debug!("stream {sn} {local_addr:?} keepalive no response, error \"{e}\""),
}
}
crate::traffic_status::traffic_status_update(tx, rx)?;
}
}
/// Parses the UDP response data.
pub(crate) fn parse_udp_response(udp_mtu: u16, packet: Packet) -> Result<UdpGwResponse> {
let flags = packet.header.flags;
if flags & UdpFlag::ERR == UdpFlag::ERR {
return Ok(UdpGwResponse::Error);
}
if flags & UdpFlag::KEEPALIVE == UdpFlag::KEEPALIVE {
return Ok(UdpGwResponse::KeepAlive);
}
if packet.data.len() > udp_mtu as usize {
return Err("too much data".into());
}
Ok(UdpGwResponse::Data(packet))
}
/// Receives a UDP gateway packet.
///
/// This function is responsible for receiving packets from the UDP gateway
///
/// # Arguments
/// - `udp_mtu`: The maximum transmission unit size for UDP packets.
/// - `udp_timeout`: The timeout in seconds for receiving UDP packets.
/// - `stream`: A mutable reference to the UDP gateway client stream reader.
///
/// # Returns
/// - `Result<UdpGwResponse>`: Returns a result type containing the parsed UDP gateway response, or an error if one occurs.
pub(crate) async fn recv_udpgw_packet(udp_mtu: u16, udp_timeout: u64, stream: &mut OwnedReadHalf) -> Result<(usize, UdpGwResponse)> {
let packet = tokio::time::timeout(
tokio::time::Duration::from_secs(udp_timeout + 2),
Packet::retrieve_from_async_stream(stream),
)
.await
.map_err(std::io::Error::from)??;
Ok((packet.len(), UdpGwClient::parse_udp_response(udp_mtu, packet)?))
}
/// Sends a UDP gateway packet.
///
/// This function constructs and sends a UDP gateway packet based on the IPv6 enabled status, data length,
/// remote address, domain (if any), connection ID, and the UDP gateway client writer stream.
///
/// # Arguments
///
/// * `ipv6_enabled` - Whether IPv6 is enabled
/// * `data` - The data packet
/// * `remote_addr` - Remote address
/// * `conn_id` - Connection ID
/// * `stream` - UDP gateway client writer stream
///
/// # Returns
///
/// Returns `Ok(())` if the packet is sent successfully, otherwise returns an error.
pub(crate) async fn send_udpgw_packet(
ipv6_enabled: bool,
data: &[u8],
remote_addr: &socks5_impl::protocol::Address,
conn_id: u16,
stream: &mut OwnedWriteHalf,
) -> Result<()> {
if !ipv6_enabled && remote_addr.get_type() == socks5_impl::protocol::AddressType::IPv6 {
return Err("ipv6 not support".into());
}
let out_data: Vec<u8> = Packet::build_packet_from_address(conn_id, remote_addr, data)?.into();
stream.write_all(&out_data).await?;
Ok(())
}
}

View file

@ -1,12 +1,12 @@
use crate::error::Result;
use hashlink::{linked_hash_map::RawEntryMut, LruCache};
use hashlink::{LruCache, linked_hash_map::RawEntryMut};
use std::{
collections::HashMap,
convert::TryInto,
net::{IpAddr, Ipv4Addr, Ipv6Addr},
str::FromStr,
time::{Duration, Instant},
};
use tproxy_config::IpCidr;
const MAPPING_TIMEOUT: u64 = 60; // Mapping timeout in seconds
@ -19,6 +19,7 @@ struct NameCacheEntry {
/// The IP addresses are in the range of private IP addresses.
/// The DNS server is implemented as a LRU cache.
pub struct VirtualDns {
trailing_dot: bool,
lru_cache: LruCache<IpAddr, NameCacheEntry>,
name_to_ip: HashMap<String, IpAddr>,
network_addr: IpAddr,
@ -26,35 +27,24 @@ pub struct VirtualDns {
next_addr: IpAddr,
}
impl Default for VirtualDns {
fn default() -> Self {
let start_addr = Ipv4Addr::from_str("198.18.0.0").unwrap();
let prefix_len = 15;
let network_addr = calculate_network_addr(start_addr, prefix_len);
let broadcast_addr = calculate_broadcast_addr(start_addr, prefix_len);
impl VirtualDns {
pub fn new(ip_pool: IpCidr) -> Self {
Self {
next_addr: start_addr.into(),
trailing_dot: false,
next_addr: ip_pool.first_address(),
name_to_ip: HashMap::default(),
network_addr: IpAddr::from(network_addr),
broadcast_addr: IpAddr::from(broadcast_addr),
network_addr: ip_pool.first_address(),
broadcast_addr: ip_pool.last_address(),
lru_cache: LruCache::new_unbounded(),
}
}
}
impl VirtualDns {
pub fn new() -> Self {
VirtualDns::default()
}
/// Returns the DNS response to send back to the client.
pub fn generate_query(&mut self, data: &[u8]) -> Result<(Vec<u8>, String, IpAddr)> {
use crate::dns;
let message = dns::parse_data_to_dns_message(data, false)?;
let qname = dns::extract_domain_from_dns_message(&message)?;
let ip = self.allocate_ip(qname.clone())?;
let ip = self.find_or_allocate_ip(qname.clone())?;
let message = dns::build_dns_response(message, &qname, ip, 5)?;
Ok((message.to_vec()?, qname, ip))
}
@ -100,36 +90,50 @@ impl VirtualDns {
self.lru_cache.get(addr).map(|entry| &entry.name)
}
fn allocate_ip(&mut self, name: String) -> Result<IpAddr> {
fn find_or_allocate_ip(&mut self, name: String) -> Result<IpAddr> {
// This function is a search and creation function.
// Thus, it is sufficient to canonicalize the name here.
let insert_name = if name.ends_with('.') && !self.trailing_dot {
String::from(name.trim_end_matches('.'))
} else {
name
};
let now = Instant::now();
// Iterate through all entries of the LRU cache and remove those that have expired.
loop {
let (ip, entry) = match self.lru_cache.iter().next() {
None => break,
Some((ip, entry)) => (ip, entry),
};
// The entry has expired.
if now > entry.expiry {
let name = entry.name.clone();
self.lru_cache.remove(&ip.clone());
self.name_to_ip.remove(&name);
continue;
continue; // There might be another expired entry after this one.
}
break;
break; // The entry has not expired and all following entries are newer.
}
if let Some(ip) = self.name_to_ip.get(&name) {
// Return the IP if it is stored inside our LRU cache.
if let Some(ip) = self.name_to_ip.get(&insert_name) {
let ip = *ip;
self.touch_ip(&ip);
return Ok(ip);
}
// Otherwise, store name and IP pair inside the LRU cache.
let started_at = self.next_addr;
loop {
if let RawEntryMut::Vacant(vacant) = self.lru_cache.raw_entry_mut().from_key(&self.next_addr) {
let expiry = Instant::now() + Duration::from_secs(MAPPING_TIMEOUT);
let name0 = name.clone();
vacant.insert(self.next_addr, NameCacheEntry { name, expiry });
let name0 = insert_name.clone();
vacant.insert(self.next_addr, NameCacheEntry { name: insert_name, expiry });
self.name_to_ip.insert(name0, self.next_addr);
return Ok(self.next_addr);
}
@ -144,30 +148,3 @@ impl VirtualDns {
}
}
}
fn calculate_network_addr(ip: std::net::Ipv4Addr, prefix_len: u8) -> std::net::Ipv4Addr {
let mask = (!0u32) << (32 - prefix_len);
let ip_u32 = u32::from_be_bytes(ip.octets());
std::net::Ipv4Addr::from((ip_u32 & mask).to_be_bytes())
}
fn calculate_broadcast_addr(ip: std::net::Ipv4Addr, prefix_len: u8) -> std::net::Ipv4Addr {
let mask = (!0u32) >> prefix_len;
let ip_u32 = u32::from_be_bytes(ip.octets());
std::net::Ipv4Addr::from((ip_u32 | mask).to_be_bytes())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cidr_addr() {
let start_addr = Ipv4Addr::from_str("198.18.0.0").unwrap();
let prefix_len = 15;
let network_addr = calculate_network_addr(start_addr, prefix_len);
let broadcast_addr = calculate_broadcast_addr(start_addr, prefix_len);
assert_eq!(network_addr, Ipv4Addr::from_str("198.18.0.0").unwrap());
assert_eq!(broadcast_addr, Ipv4Addr::from_str("198.19.255.255").unwrap());
}
}

101
src/win_svc.rs Normal file
View file

@ -0,0 +1,101 @@
#![cfg(windows)]
const SERVICE_NAME: &str = "tun2proxy";
windows_service::define_windows_service!(ffi_service_main, my_service_main);
pub fn start_service() -> Result<(), windows_service::Error> {
// Register generated `ffi_service_main` with the system and start the service,
// blocking this thread until the service is stopped.
windows_service::service_dispatcher::start(SERVICE_NAME, ffi_service_main)?;
Ok(())
}
fn my_service_main(arguments: Vec<std::ffi::OsString>) {
// The entry point where execution will start on a background thread after a call to
// `service_dispatcher::start` from `main`.
if let Err(_e) = run_service(arguments) {
log::error!("Error: {_e:?}");
}
}
fn run_service(_arguments: Vec<std::ffi::OsString>) -> Result<(), crate::BoxError> {
use windows_service::service::ServiceControl;
use windows_service::service_control_handler::{self, ServiceControlHandlerResult};
let shutdown_token = crate::CancellationToken::new();
let shutdown_token_clone = shutdown_token.clone();
let event_handler = move |control_event| -> ServiceControlHandlerResult {
match control_event {
ServiceControl::Stop => {
// Handle stop event and return control back to the system.
shutdown_token_clone.cancel();
ServiceControlHandlerResult::NoError
}
// All services must accept Interrogate even if it's a no-op.
ServiceControl::Interrogate => ServiceControlHandlerResult::NoError,
_ => ServiceControlHandlerResult::NotImplemented,
}
};
// Register system service event handler
let status_handle = service_control_handler::register(SERVICE_NAME, event_handler)?;
let mut next_status = windows_service::service::ServiceStatus {
// Should match the one from system service registry
service_type: windows_service::service::ServiceType::OWN_PROCESS,
// The new state
current_state: windows_service::service::ServiceState::Running,
// Accept stop events when running
controls_accepted: windows_service::service::ServiceControlAccept::STOP,
// Used to report an error when starting or stopping only, otherwise must be zero
exit_code: windows_service::service::ServiceExitCode::Win32(0),
// Only used for pending states, otherwise must be zero
checkpoint: 0,
// Only used for pending states, otherwise must be zero
wait_hint: std::time::Duration::default(),
// Unused for setting status
process_id: None,
};
// Tell the system that the service is running now
status_handle.set_service_status(next_status.clone())?;
// main logic here
{
let args = crate::Args::parse_args();
let default = format!("{:?},trust_dns_proto=warn", args.verbosity);
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(default)).init();
let rt = tokio::runtime::Builder::new_multi_thread().enable_all().build()?;
rt.block_on(async {
unsafe extern "C" fn traffic_cb(status: *const crate::TrafficStatus, _: *mut std::ffi::c_void) {
let status = unsafe { &*status };
log::debug!("Traffic: ▲ {} : ▼ {}", status.tx, status.rx);
}
unsafe { crate::tun2proxy_set_traffic_status_callback(1, Some(traffic_cb), std::ptr::null_mut()) };
let ret = crate::general_run_async(args.clone(), tun::DEFAULT_MTU, false, shutdown_token).await;
match &ret {
Ok(sessions) => {
if args.exit_on_fatal_error && *sessions >= args.max_sessions {
log::error!("Forced exit due to max sessions reached ({sessions}/{})", args.max_sessions);
std::process::exit(-1);
}
log::debug!("tun2proxy exited normally, current sessions: {sessions}");
}
Err(err) => log::error!("main loop error: {err}"),
}
Ok::<(), crate::Error>(())
})?;
}
// Tell the system that the service is stopped now
next_status.current_state = windows_service::service::ServiceState::Stopped;
status_handle.set_service_status(next_status)?;
Ok(())
}

View file

@ -8,7 +8,7 @@ echo $SCRIPT_DIR
netns="test"
dante="danted"
tun2proxy="${SCRIPT_DIR}/../../target/release/tun2proxy"
tun2proxy="${SCRIPT_DIR}/../../target/release/tun2proxy-bin"
ip netns add "$netns"
@ -47,4 +47,4 @@ iperf3 -c 10.0.0.4
iperf3 -c 10.0.0.4 -R -P 10
# Clean up
# sudo sh -c "pkill tun2proxy; pkill iperf3; pkill danted; ip link del tun0; ip netns del test"
# sudo sh -c "pkill tun2proxy-bin; pkill iperf3; pkill danted; ip link del tun0; ip netns del test"

3
tests/requirements.txt Normal file
View file

@ -0,0 +1,3 @@
requests
python-dotenv
psutil

78
tests/tests.py Normal file
View file

@ -0,0 +1,78 @@
import glob
import itertools
import os
import subprocess
import time
import unittest
import psutil
import dotenv
import requests
dotenv.load_dotenv()
def get_ip(version=None):
"""provider = 'https://%swtfismyip.com/text'
prefix = {
None: '',
4: 'ipv4.',
6: 'ipv6.'
}[version]"""
provider = 'https://%sipify.org'
prefix = {
None: 'api64.',
4: 'api4.',
6: 'api6.'
}[version]
result = requests.Session().get(provider % prefix).text.strip()
return result
def get_tool_path():
default = glob.glob(os.path.join(os.path.dirname(__file__), '..', 'target', '*', 'tun2proxy-bin'))
default = default[0] if len(default) > 0 else 'tun2proxy-bin'
return os.environ.get('TOOL_PATH', default)
def sudo_kill_process_and_children(proc):
try:
for child in psutil.Process(proc.pid).children(recursive=True):
if child.name() == 'tun2proxy-bin':
subprocess.run(['sudo', 'kill', str(child.pid)])
subprocess.run(['sudo', 'kill', str(proc.pid)])
except psutil.NoSuchProcess:
pass
class Tun2ProxyTest(unittest.TestCase):
@staticmethod
def _test(ip_version, dns, proxy_var):
ip_noproxy = get_ip(ip_version)
additional = ['-6'] if ip_version == 6 else []
p = subprocess.Popen(
['sudo', get_tool_path(), "--proxy", os.getenv(proxy_var), '--setup', '-v', 'trace', '--dns', dns, *additional])
try:
time.sleep(1)
ip_withproxy = get_ip(ip_version)
assert ip_noproxy != ip_withproxy
except Exception as e:
raise e
finally:
sudo_kill_process_and_children(p)
p.terminate()
p.wait()
@classmethod
def add_tests(cls):
ip_options = [None, 4]
if bool(int(os.environ.get('IPV6', 1))):
ip_options.append(6)
for ip_version, dns, proxy_var in itertools.product(ip_options, ['virtual', 'over-tcp'],
['SOCKS5_PROXY', 'HTTP_PROXY']):
setattr(cls, 'test_ipv%s_dns%s_proxy%s' % (ip_version, dns, proxy_var),
lambda self: cls._test(ip_version, dns, proxy_var))
if __name__ == '__main__':
Tun2ProxyTest.add_tests()
unittest.main()