Compare commits

..

111 commits

Author SHA1 Message Date
ssrlive
1880396822 use ctrlc2 async feature
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-06-14 13:05:54 +08:00
Paper-Dragon
8b4ecabd8f
build image based on alpine/musl (#212)
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-06-11 14:12:02 +08:00
B. Blechschmidt
fbc47a3001 fix(ci): account for change in load_dotenv
Some checks are pending
Push or PR / build_n_test (macos-latest) (push) Waiting to run
Push or PR / build_n_test (ubuntu-latest) (push) Waiting to run
Push or PR / build_n_test (windows-latest) (push) Waiting to run
Push or PR / build_n_test_android (push) Waiting to run
Push or PR / build_n_test_ios (push) Waiting to run
Push or PR / Check semver (push) Waiting to run
Integration Tests / Proxy Tests (push) Waiting to run
2025-06-11 00:21:03 +02:00
ssrlive
88d31ce168 Bump version 0.7.10
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-06-03 14:05:28 +08:00
dependabot[bot]
ddebf5ee50
Update tun requirement from 0.7 to 0.8 (#209)
Some checks are pending
Push or PR / build_n_test (macos-latest) (push) Waiting to run
Push or PR / build_n_test (ubuntu-latest) (push) Waiting to run
Push or PR / build_n_test (windows-latest) (push) Waiting to run
Push or PR / build_n_test_android (push) Waiting to run
Push or PR / build_n_test_ios (push) Waiting to run
Push or PR / Check semver (push) Waiting to run
Integration Tests / Proxy Tests (push) Waiting to run
2025-06-03 12:02:31 +08:00
ssrlive
8cdb4f535d
Significant change in --setup parameter (#207)
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-06-02 10:31:44 +08:00
ssrlive
6a5692cea0 refine code
Some checks failed
Push or PR / Check semver (push) Has been cancelled
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-05-21 15:45:48 +08:00
ssrlive
3dc8f222cb Bump version 0.7.9
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-05-08 10:26:00 +08:00
ssrlive
7c32b62727 Exclude dependabot[bot] in Integration Tests
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-05-02 16:49:34 +08:00
dependabot[bot]
cf4a565f93
Update socks5-impl requirement from 0.6 to 0.7 (#201)
Some checks failed
Push or PR / Check semver (push) Has been cancelled
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-05-01 08:30:43 +08:00
ssrlive
54f7dbc81b update nix deps
Some checks are pending
Push or PR / build_n_test (macos-latest) (push) Waiting to run
Push or PR / Check semver (push) Waiting to run
Push or PR / build_n_test (ubuntu-latest) (push) Waiting to run
Push or PR / build_n_test (windows-latest) (push) Waiting to run
Push or PR / build_n_test_android (push) Waiting to run
Push or PR / build_n_test_ios (push) Waiting to run
Integration Tests / Proxy Tests (push) Waiting to run
2025-04-30 10:59:48 +08:00
ssrlive
b71f479bf3 close-stale-issues.yml
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-04-23 13:58:13 +08:00
ssrlive
2ead13a3f4 version_info & about_info 2025-04-22 14:58:52 +08:00
ssrlive
88423039c6 make TASK_COUNT as local task_count variable
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-04-20 19:56:36 +08:00
ssrlive
7121a80300 Bump version 0.7.8
Some checks are pending
Push or PR / build_n_test (macos-latest) (push) Waiting to run
Push or PR / build_n_test (ubuntu-latest) (push) Waiting to run
Push or PR / build_n_test (windows-latest) (push) Waiting to run
Push or PR / build_n_test_android (push) Waiting to run
Push or PR / build_n_test_ios (push) Waiting to run
Push or PR / Check semver (push) Waiting to run
Integration Tests / Proxy Tests (push) Waiting to run
2025-04-19 17:50:56 +08:00
ssrlive
9e75475a23 force exit process while fatal error
Some checks are pending
Push or PR / Check semver (push) Waiting to run
Push or PR / build_n_test (macos-latest) (push) Waiting to run
Push or PR / build_n_test (ubuntu-latest) (push) Waiting to run
Push or PR / build_n_test (windows-latest) (push) Waiting to run
Push or PR / build_n_test_android (push) Waiting to run
Push or PR / build_n_test_ios (push) Waiting to run
Integration Tests / Proxy Tests (push) Waiting to run
2025-04-18 16:09:35 +08:00
ssrlive
7657f1603f Bump version 0.7.7
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-03-28 20:23:47 +08:00
ssrlive
a380817951 update hickory-proto (DNS parser)
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-03-19 08:36:29 +08:00
ssrlive
a2399c8b28 log ipstack info adjusted
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-03-12 11:18:47 +08:00
ssrlive
61bbafcf82 version_info method
Some checks are pending
Push or PR / build_n_test (macos-latest) (push) Waiting to run
Push or PR / build_n_test (ubuntu-latest) (push) Waiting to run
Push or PR / build_n_test (windows-latest) (push) Waiting to run
Push or PR / build_n_test_android (push) Waiting to run
Push or PR / build_n_test_ios (push) Waiting to run
Push or PR / Check semver (push) Waiting to run
Integration Tests / Proxy Tests (push) Waiting to run
2025-03-11 12:41:57 +08:00
ssrlive
ca7cd25c4e Bump version 0.7.6
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-03-07 14:15:14 +08:00
ssrlive
68716bdc9f update deps 2025-03-07 14:07:55 +08:00
ssrlive
e556f7657b Bump version 0.7.5
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-02-27 14:56:14 +08:00
ssrlive
fd7dca9988 unsafe_in_unsafe issues 2025-02-27 14:40:09 +08:00
ssrlive
9a018f2393 update ipstack
Some checks failed
Push or PR / build_n_test (macos-latest) (push) Has been cancelled
Push or PR / build_n_test (ubuntu-latest) (push) Has been cancelled
Push or PR / build_n_test (windows-latest) (push) Has been cancelled
Push or PR / build_n_test_android (push) Has been cancelled
Push or PR / build_n_test_ios (push) Has been cancelled
Push or PR / Check semver (push) Has been cancelled
Integration Tests / Proxy Tests (push) Has been cancelled
2025-02-19 21:45:23 +08:00
ssrlive
c5d907551b ubuntu-20.04 used in publish script 2025-02-12 20:54:53 +08:00
ssrlive
6b038c2a80 Bump version 0.7.4 2025-02-12 18:09:37 +08:00
ssrlive
5287bef3c0 PI issues for macOS 2025-01-10 18:48:32 +08:00
ssrlive
04db15f553 Bump version 0.7.3 2025-01-07 21:15:44 +08:00
Ahmed Elsayed
f8c902b61c
use shlex instead of split whitespaces. (#179) 2025-01-07 21:03:25 +08:00
ssrlive
8ba2c1a2b7 Bump version 0.7.2 2025-01-03 15:30:41 +08:00
ssrlive
e939f5f3dc remove mod mobile 2025-01-03 15:22:14 +08:00
ssrlive
ecd1ab80bf base64 removed 2025-01-03 12:32:28 +08:00
Mostafa Kazemi
51de01854b
Fix typo in comment (#178) 2025-01-03 11:00:19 +08:00
ssrlive
bac54ec56c Bump version 0.7.1 2025-01-03 02:26:51 +08:00
ssrlive
6034870264 rename desktop_run_async to general_run_async 2025-01-03 02:05:50 +08:00
ssrlive
e933e5d4c0 iOS & Android testing suits 2025-01-03 00:41:29 +08:00
ssrlive
7136e2a20c refactor desktop_run_async 2025-01-02 23:44:57 +08:00
ssrlive
2a8e31225c refine clap::Parser 2025-01-02 17:08:19 +08:00
ssrlive
ea5ee834db Bump version 0.6.7 2024-12-27 17:15:48 +08:00
ssrlive
4d4a0ce85c minor changes 2024-12-26 20:38:13 +08:00
ssrlive
258637a52e upgrade dependencies 2024-12-17 11:35:58 +08:00
ssrlive
a01de17b36 minor changes 2024-11-26 13:32:39 +08:00
Paper-Dragon
724557b30e
docker-compose.yaml support (#166) 2024-11-26 13:12:04 +08:00
ssrlive
7a7293effd Refine code 2024-11-26 12:58:26 +08:00
ssrlive
46bf4434ef Bump version 0.6.6 2024-11-26 12:28:03 +08:00
ssrlive
d37cb44b62 Fix #165 2024-11-26 12:17:16 +08:00
ssrlive
987635d3dc Contributors in README 2024-11-24 15:49:10 +08:00
ssrlive
ebd3128778 Bump version 0.6.5 2024-11-21 14:15:37 +08:00
ssrlive
ee4df8f97b cbindgen issues 2024-11-17 17:54:30 +08:00
ssrlive
7314906841 mask_socket_addr function 2024-11-11 15:11:06 +08:00
ssrlive
23d4e59367 minor changes 2024-11-11 11:51:28 +08:00
ssrlive
28d54be638 Bump version 0.6.4 2024-11-09 18:30:56 +08:00
dependabot[bot]
8c98d1dc74
Update thiserror requirement from 1 to 2 (#162) 2024-11-07 10:37:46 +08:00
ssrlive
1a508918a2
Auto merge script 2024-11-05 17:31:33 +08:00
ssrlive
c2382ee29b minor changes 2024-11-04 22:08:21 +08:00
ssrlive
21355e37da Bump version 0.6.3 2024-11-03 10:36:07 +08:00
ssrlive
e8143a691b remove useless is_in_heartbeat in udpgw 2024-11-02 17:16:54 +08:00
ssrlive
53f60ffda6 readme on udpgw 2024-11-02 13:55:47 +08:00
ssrlive
9088cf6fe5 minor changes 2024-11-02 07:25:46 +08:00
ssrlive
d7e3913450 Bump version 0.6.2 2024-11-01 15:08:49 +08:00
ssrlive
52d814ce79 refine udpgw 2024-11-01 15:02:18 +08:00
ssrlive
b4142453fd Bump version 0.6.1 2024-10-30 19:10:04 +08:00
ssrlive
0aad0d1709 refactor udpgw 2024-10-30 19:00:28 +08:00
ssrlive
3fb02f0fc7 switch to tun crate instead of tun2 2024-10-28 14:03:35 +08:00
ssrlive
b9cf06da33 refine code 2024-10-27 15:27:50 +08:00
ssrlive
2ade72e79d publish version 0.6.0 2024-10-26 11:04:26 +08:00
ssrlive
e3cc5ea1ce fix daemonize issues 2024-10-26 09:51:00 +08:00
sujiacong
b6bb9bedfc
support udp gateway mode (#155) 2024-10-26 02:15:52 +08:00
ssrlive
f823202b33 Bump version 0.5.4 2024-10-10 00:35:30 +08:00
ssrlive
9aa2afb0fd Fix daemonize issues 2024-10-09 23:54:25 +08:00
ssrlive
918e6137ab Bump version 0.5.3 2024-10-09 17:00:40 +08:00
ssrlive
d093973160 refine ctrl-c logic 2024-10-09 16:58:37 +08:00
ssrlive
4ef71a5b4c --max-sessions option 2024-10-09 16:57:14 +08:00
ssrlive
b03032b8cd Bump version 0.5.2 2024-10-07 13:34:48 +08:00
ssrlive
c991006f4c --exit-on-fatal-error option 2024-10-07 13:29:32 +08:00
ssrlive
fe32a65291 Bump version 0.5.1 2024-10-03 08:50:51 +08:00
ssrlive
93e15e0a8b build x86_64-win7-windows-msvc target 2024-10-03 08:50:23 +08:00
ssrlive
b74aeab182 target armv7-unknown-linux-musleabi 2024-09-30 17:12:46 +08:00
ssrlive
c9b24a865c minor changes 2024-09-29 18:17:45 +08:00
ssrlive
2396d769d2 Bump version 0.5.0 2024-09-26 12:29:56 +08:00
ssrlive
b24d48a042 testing python script issues 2024-09-26 11:51:52 +08:00
ssrlive
6c8ae7a33f rename target from 'tun2proxy' to 'tun2proxy-bin'
make rust compiler happy
2024-09-26 10:54:54 +08:00
ssrlive
77d651dc70 minor changes 2024-09-26 10:06:52 +08:00
ssrlive
febd654f35 CI testing scripts 2024-09-23 16:01:34 +08:00
ssrlive
143f203fde
Bump version 0.4.5 2024-09-16 17:18:14 +08:00
Paper-Dragon
a5bc8f49b4
multi-arch build docker images (#141) 2024-09-16 17:10:56 +08:00
ssrlive
1ccba18273 Bump version 0.4.4 2024-09-14 22:12:10 +08:00
ssrlive
607d709c03 Apply daemonize for unix 2024-09-14 22:02:05 +08:00
ssrlive
e817257866 refine code 2024-09-14 21:38:15 +08:00
ssrlive
c583e884b5 Bump version 0.4.3 2024-09-14 16:10:33 +08:00
ssrlive
1e6c6f4f66 Fix #144 2024-09-14 16:08:54 +08:00
ssrlive
c167f45a5e Bump version 0.4.2 2024-09-14 10:11:25 +08:00
ssrlive
02b15951b6 update Semver checking script 2024-09-14 10:05:47 +08:00
ssrlive
6dadc1504a Support windows service, fix #143 2024-09-14 09:55:27 +08:00
ssrlive
187e251142 Bump version 0.4.1 2024-09-02 23:22:11 +08:00
ssrlive
15646925a7 issues of parameter constraint for 'tun' 2024-08-29 10:01:58 +08:00
ssrlive
beb3d364a8 fix windows issues 2024-08-29 09:23:33 +08:00
B. Blechschmidt
8334acd085 Update version 2024-08-28 23:17:59 +02:00
B. Blechschmidt
1e7f649192 Remove unused import with updated dependencies 2024-08-28 23:12:21 +02:00
B. Blechschmidt
8c28f2e000 Implement --virtual-dns-pool 2024-08-28 23:06:37 +02:00
ssrlive
3f76ccec97 Apply mimalloc to iOS only 2024-08-03 18:21:02 +08:00
ssrlive
f787ff6d23 rust toolchain version issues 2024-08-02 10:35:05 +08:00
ssrlive
1dd6746bbc mimalloc usage 2024-08-02 10:18:11 +08:00
ssrlive
6567b6bc00 LazyLock usage 2024-07-27 00:08:07 +08:00
ssrlive
016aaa6128 Bump version 0.3.1 2024-07-24 21:11:23 +08:00
ssrlive
824b443d2b Bump version 0.3.0 2024-07-22 09:32:02 +08:00
ssrlive
06ed994655 Check semver script 2024-07-20 12:55:01 +08:00
ssrlive
e879599e6b close_fd_on_drop issues 2024-07-20 12:28:52 +08:00
ssrlive
0ca92dcdc2 minor changes 2024-07-18 19:35:14 +08:00
BlackbirdBop
635c7e557f
Make close_fd_on_drop configurable (#132) 2024-07-18 19:01:11 +08:00
43 changed files with 2104 additions and 616 deletions

20
.github/workflows/auto-merge.yaml vendored Normal file
View file

@ -0,0 +1,20 @@
name: Dependabot Auto Merge
on:
pull_request_target:
types: [labeled]
jobs:
auto:
if: github.actor == 'dependabot[bot]'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Auto approve pull request, then squash and merge
uses: ahmadnassri/action-dependabot-auto-merge@v2
with:
# target: minor
# here `PAT_REPO_ADMIN` is a user's passkey provided by github.
github-token: ${{ secrets.PAT_REPO_ADMIN }}

View file

@ -0,0 +1,26 @@
name: Close stale issues and PRs
on:
schedule:
- cron: "0 0 * * *" # run a cron job every day at midnight
jobs:
stale:
runs-on: ubuntu-latest
steps:
- name: Close stale issues and PRs
uses: actions/stale@v9
with:
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
stale-pr-message: 'This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.'
close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.'
days-before-issue-stale: 30
days-before-pr-stale: 45
days-before-issue-close: 5
days-before-pr-close: 10
stale-issue-label: 'no-issue-activity'
exempt-issue-labels: 'keep-open,awaiting-approval,work-in-progress'
stale-pr-label: 'no-pr-activity'
exempt-pr-labels: 'awaiting-approval,work-in-progress'
# only-labels: 'awaiting-feedback,awaiting-answers'

View file

@ -23,6 +23,15 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v4
# Add support for more platforms with QEMU (optional)
# https://github.com/docker/setup-qemu-action
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
- name: Log in to the Container registry - name: Log in to the Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
@ -30,19 +39,33 @@ jobs:
registry: ${{ env.REGISTRY }} registry: ${{ env.REGISTRY }}
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
- name: Extract metadata (tags, labels) for Docker - name: Extract metadata (tags, labels) for Docker
id: meta id: meta
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with: with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
- name: Build and push Docker image - name: Build gnu and push Docker image
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
with: with:
platforms: linux/amd64,linux/arm64
context: . context: .
file: Dockerfile.ubuntu
push: true push: true
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}-ubuntu
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
- name: Build musl and push Docker image
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
with:
platforms: linux/amd64
context: .
file: Dockerfile.alpine
push: true
tags: ${{ steps.meta.outputs.tags }}-alpine
labels: ${{ steps.meta.outputs.labels }}

View file

@ -1,4 +1,5 @@
on: on:
workflow_dispatch:
push: push:
tags: tags:
- "v*.*.*" - "v*.*.*"
@ -15,28 +16,35 @@ jobs:
attestations: write attestations: write
strategy: strategy:
fail-fast: false
matrix: matrix:
target: target:
- x86_64-unknown-linux-gnu - x86_64-unknown-linux-gnu
- x86_64-unknown-linux-musl - x86_64-unknown-linux-musl
- i686-unknown-linux-musl - i686-unknown-linux-musl
- aarch64-unknown-linux-gnu - aarch64-unknown-linux-gnu
- armv7-unknown-linux-gnueabihf - armv7-unknown-linux-musleabi
- armv7-unknown-linux-musleabihf
- x86_64-apple-darwin - x86_64-apple-darwin
- aarch64-apple-darwin - aarch64-apple-darwin
- x86_64-pc-windows-msvc - x86_64-pc-windows-msvc
- i686-pc-windows-msvc - i686-pc-windows-msvc
- aarch64-pc-windows-msvc
- x86_64-win7-windows-msvc
- i686-win7-windows-msvc
include: include:
- target: x86_64-unknown-linux-gnu - target: x86_64-unknown-linux-gnu
host_os: ubuntu-latest host_os: ubuntu-22.04
- target: x86_64-unknown-linux-musl - target: x86_64-unknown-linux-musl
host_os: ubuntu-latest host_os: ubuntu-latest
- target: i686-unknown-linux-musl - target: i686-unknown-linux-musl
host_os: ubuntu-latest host_os: ubuntu-latest
- target: aarch64-unknown-linux-gnu - target: aarch64-unknown-linux-gnu
host_os: ubuntu-latest host_os: ubuntu-latest
- target: armv7-unknown-linux-gnueabihf - target: armv7-unknown-linux-musleabi
host_os: ubuntu-latest
- target: armv7-unknown-linux-musleabihf
host_os: ubuntu-latest host_os: ubuntu-latest
- target: x86_64-apple-darwin - target: x86_64-apple-darwin
host_os: macos-latest host_os: macos-latest
@ -46,44 +54,60 @@ jobs:
host_os: windows-latest host_os: windows-latest
- target: i686-pc-windows-msvc - target: i686-pc-windows-msvc
host_os: windows-latest host_os: windows-latest
- target: aarch64-pc-windows-msvc
host_os: windows-latest
- target: x86_64-win7-windows-msvc
host_os: windows-latest
- target: i686-win7-windows-msvc
host_os: windows-latest
runs-on: ${{ matrix.host_os }} runs-on: ${{ matrix.host_os }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Prepare - name: Prepare
shell: bash shell: bash
run: | run: |
mkdir mypubdir4 mkdir mypubdir4
rustup target add ${{ matrix.target }} if [[ "${{ matrix.target }}" != "x86_64-win7-windows-msvc" && "${{ matrix.target }}" != "i686-win7-windows-msvc" ]]; then
rustup target add ${{ matrix.target }}
fi
cargo install cbindgen cargo install cbindgen
if [[ "${{ matrix.host_os }}" == "ubuntu-latest" ]]; then if [[ "${{ contains(matrix.host_os, 'ubuntu') }}" == "true" && "${{ matrix.host_os }}" != "ubuntu-22.04" ]]; then
sudo .github/workflows/install-cross.sh sudo .github/workflows/install-cross.sh
fi fi
- name: Build - name: Build
if: ${{ !cancelled() }}
shell: bash shell: bash
run: | run: |
if [[ "${{ matrix.host_os }}" == "ubuntu-latest" ]]; then if [[ "${{ contains(matrix.host_os, 'ubuntu') }}" == "true" && "${{ matrix.host_os }}" != "ubuntu-22.04" ]]; then
cross build --all-features --release --target ${{ matrix.target }} cross build --all-features --release --target ${{ matrix.target }}
else else
cargo build --all-features --release --target ${{ matrix.target }} if [[ "${{ matrix.target }}" == "x86_64-win7-windows-msvc" || "${{ matrix.target }}" == "i686-win7-windows-msvc" ]]; then
rustup toolchain install nightly
rustup component add rust-src --toolchain nightly
cargo +nightly build --release -Z build-std --target ${{ matrix.target }}
else
cargo build --all-features --release --target ${{ matrix.target }}
fi
fi fi
cbindgen --config cbindgen.toml -l C --cpp-compat -o target/tun2proxy-ffi.h cbindgen --config cbindgen.toml -o target/tun2proxy.h
if [[ "${{ matrix.host_os }}" == "windows-latest" ]]; then if [[ "${{ matrix.host_os }}" == "windows-latest" ]]; then
powershell -Command "(Get-Item README.md).LastWriteTime = Get-Date" powershell -Command "(Get-Item README.md).LastWriteTime = Get-Date"
powershell -Command "(Get-Item target/${{ matrix.target }}/release/wintun.dll).LastWriteTime = Get-Date" powershell -Command "(Get-Item target/${{ matrix.target }}/release/wintun.dll).LastWriteTime = Get-Date"
powershell Compress-Archive -Path target/${{ matrix.target }}/release/tun2proxy.exe, README.md, target/tun2proxy-ffi.h, target/${{ matrix.target }}/release/tun2proxy.dll, target/${{ matrix.target }}/release/wintun.dll -DestinationPath mypubdir4/tun2proxy-${{ matrix.target }}.zip powershell Compress-Archive -Path target/${{ matrix.target }}/release/tun2proxy-bin.exe, target/${{ matrix.target }}/release/udpgw-server.exe, README.md, target/tun2proxy.h, target/${{ matrix.target }}/release/tun2proxy.dll, target/${{ matrix.target }}/release/wintun.dll -DestinationPath mypubdir4/tun2proxy-${{ matrix.target }}.zip
elif [[ "${{ matrix.host_os }}" == "macos-latest" ]]; then elif [[ "${{ matrix.host_os }}" == "macos-latest" ]]; then
zip -j mypubdir4/tun2proxy-${{ matrix.target }}.zip target/${{ matrix.target }}/release/tun2proxy README.md target/tun2proxy-ffi.h target/${{ matrix.target }}/release/libtun2proxy.dylib zip -j mypubdir4/tun2proxy-${{ matrix.target }}.zip target/${{ matrix.target }}/release/tun2proxy-bin target/${{ matrix.target }}/release/udpgw-server README.md target/tun2proxy.h target/${{ matrix.target }}/release/libtun2proxy.dylib
if [[ "${{ matrix.target }}" == "x86_64-apple-darwin" ]]; then if [[ "${{ matrix.target }}" == "x86_64-apple-darwin" ]]; then
./build-aarch64-apple-ios.sh ./build-aarch64-apple-ios.sh
zip -r mypubdir4/tun2proxy-aarch64-apple-ios-xcframework.zip ./tun2proxy.xcframework/ zip -r mypubdir4/tun2proxy-aarch64-apple-ios-xcframework.zip ./tun2proxy.xcframework/
./build-apple.sh ./build-apple.sh
zip -r mypubdir4/tun2proxy-apple-xcframework.zip ./tun2proxy.xcframework/ zip -r mypubdir4/tun2proxy-apple-xcframework.zip ./tun2proxy.xcframework/
fi fi
elif [[ "${{ matrix.host_os }}" == "ubuntu-latest" ]]; then elif [[ "${{ contains(matrix.host_os, 'ubuntu') }}" == "true" ]]; then
zip -j mypubdir4/tun2proxy-${{ matrix.target }}.zip target/${{ matrix.target }}/release/tun2proxy README.md target/tun2proxy-ffi.h target/${{ matrix.target }}/release/libtun2proxy.so zip -j mypubdir4/tun2proxy-${{ matrix.target }}.zip target/${{ matrix.target }}/release/tun2proxy-bin target/${{ matrix.target }}/release/udpgw-server README.md target/tun2proxy.h target/${{ matrix.target }}/release/libtun2proxy.so
if [[ "${{ matrix.target }}" == "x86_64-unknown-linux-gnu" ]]; then if [[ "${{ matrix.target }}" == "x86_64-unknown-linux-gnu" ]]; then
./build-android.sh ./build-android.sh
cp ./tun2proxy-android-libs.zip ./mypubdir4/ cp ./tun2proxy-android-libs.zip ./mypubdir4/
@ -91,20 +115,26 @@ jobs:
fi fi
- name: Upload artifacts - name: Upload artifacts
if: ${{ !cancelled() }}
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: bin-${{ matrix.target }} name: bin-${{ matrix.target }}
path: mypubdir4/* path: mypubdir4/*
- name: Generate artifact attestation - name: Generate artifact attestation
if: ${{ !cancelled() }}
uses: actions/attest-build-provenance@v1 uses: actions/attest-build-provenance@v1
with: with:
subject-path: mypubdir4/* subject-path: mypubdir4/*
- name: Publish - name: Publish
if: ${{ !cancelled() }}
uses: softprops/action-gh-release@v1 uses: softprops/action-gh-release@v1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: with:
files: mypubdir4/* files: mypubdir4/*
- name: Abort on error
if: ${{ failure() }}
run: echo "Some of jobs failed" && false

View file

@ -1,7 +1,13 @@
name: Push or PR name: Push or PR
on: on:
[push, pull_request] workflow_dispatch:
push:
branches:
- '**'
pull_request:
branches:
- '**'
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
@ -16,7 +22,8 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: rustfmt - name: rustfmt
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
@ -32,8 +39,75 @@ jobs:
- name: Build - name: Build
if: ${{ !cancelled() }} if: ${{ !cancelled() }}
run: cargo build --verbose --tests --all-features run: |
cargo build --verbose --tests --all-features
cargo clean
cargo build --verbose
- name: Abort on error - name: Abort on error
if: ${{ failure() }} if: ${{ failure() }}
run: echo "Some of jobs failed" && false run: echo "Some of jobs failed" && false
build_n_test_android:
strategy:
fail-fast: false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install cargo ndk and rust compiler for android target
if: ${{ !cancelled() }}
run: |
cargo install --locked cargo-ndk
rustup target add x86_64-linux-android
- name: clippy
if: ${{ !cancelled() }}
run: cargo ndk -t x86_64 clippy --all-features -- -D warnings
- name: Build
if: ${{ !cancelled() }}
run: |
cargo ndk -t x86_64 rustc --verbose --all-features --lib --crate-type=cdylib
- name: Abort on error
if: ${{ failure() }}
run: echo "Android build job failed" && false
build_n_test_ios:
strategy:
fail-fast: false
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install cargo lipo and rust compiler for ios target
if: ${{ !cancelled() }}
run: |
cargo install --locked cargo-lipo
rustup target add x86_64-apple-ios aarch64-apple-ios
- name: clippy
if: ${{ !cancelled() }}
run: cargo clippy --target x86_64-apple-ios --all-features -- -D warnings
- name: Build
if: ${{ !cancelled() }}
run: |
cargo lipo --verbose --all-features
- name: Abort on error
if: ${{ failure() }}
run: echo "iOS build job failed" && false
semver:
name: Check semver
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Check semver
if: ${{ !cancelled() }}
uses: obi1kenobi/cargo-semver-checks-action@v2
- name: Abort on error
if: ${{ failure() }}
run: echo "Semver check failed" && false

View file

@ -12,22 +12,34 @@ jobs:
proxy_tests: proxy_tests:
name: Proxy Tests name: Proxy Tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'safe to test') if: (github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'safe to test')) && github.actor != 'dependabot[bot]' && github.actor != 'github-actions[bot]'
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@stable
with:
profile: minimal
toolchain: stable
override: true
- name: Populate .env - name: Populate .env
env: env:
DOTENV: ${{ secrets.DOTENV }} DOTENV: ${{ secrets.DOTENV }}
run: echo "$DOTENV" > .env run: |
echo "$DOTENV" > tests/.env
ln -s tests/.env
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.x'
- name: Create virtual environment
run: python -m venv venv
- name: Activate virtual environment and install dependencies
run: |
source venv/bin/activate
pip install -r tests/requirements.txt
- name: Build project
run: cargo build --release
- name: Run tests - name: Run tests
run: >- run: |
pwd; source venv/bin/activate
ls -la; python tests/tests.py
sudo python -m pip install -r tests/requirements.txt;
cargo build --release;
sudo python tests/tests.py

1
.gitignore vendored
View file

@ -4,6 +4,7 @@ tun2proxy.xcframework/
.env .env
project.xcworkspace/ project.xcworkspace/
xcuserdata/ xcuserdata/
.vs/
.vscode/ .vscode/
.VSCodeCounter/ .VSCodeCounter/
build/ build/

View file

@ -1,63 +1,82 @@
[package] [package]
name = "tun2proxy" name = "tun2proxy"
version = "0.2.24" version = "0.7.10"
edition = "2021" edition = "2024"
license = "MIT" license = "MIT"
repository = "https://github.com/blechschmidt/tun2proxy" repository = "https://github.com/tun2proxy/tun2proxy"
homepage = "https://github.com/blechschmidt/tun2proxy" homepage = "https://github.com/tun2proxy/tun2proxy"
authors = ["B. Blechschmidt", "ssrlive"] authors = ["B. Blechschmidt", "ssrlive"]
description = "Tunnel interface to proxy" description = "Tunnel interface to proxy"
readme = "README.md" readme = "README.md"
rust-version = "1.77" rust-version = "1.85"
[lib] [lib]
crate-type = ["staticlib", "cdylib", "lib"] crate-type = ["staticlib", "cdylib", "lib"]
[[bin]]
name = "tun2proxy-bin"
path = "src/bin/main.rs"
[[bin]]
name = "udpgw-server"
path = "src/bin/udpgw_server.rs"
required-features = ["udpgw"]
[features]
default = ["udpgw"]
udpgw = []
[dependencies] [dependencies]
async-trait = "0.1" async-trait = "0.1"
base64 = { version = "0.22" } base64easy = "0.1"
chrono = "0.4" chrono = "0.4"
clap = { version = "4", features = ["derive", "wrap_help", "color"] } clap = { version = "4", features = ["derive", "wrap_help", "color"] }
ctrlc2 = { version = "3", features = ["tokio", "termination"] } ctrlc2 = { version = "3.6.5", features = ["async", "termination"] }
digest_auth = "0.3" digest_auth = "0.3"
dotenvy = "0.15" dotenvy = "0.15"
env_logger = "0.11" env_logger = "0.11"
hashlink = "0.9" hashlink = "0.10"
hickory-proto = "0.25"
httparse = "1" httparse = "1"
ipstack = { version = "0.0.10" } ipstack = { version = "0.4" }
lazy_static = "1"
log = { version = "0.4", features = ["std"] } log = { version = "0.4", features = ["std"] }
mimalloc = { version = "0.1", default-features = false, optional = true }
percent-encoding = "2" percent-encoding = "2"
socks5-impl = { version = "0.5" } shlex = "1.3.0"
thiserror = "1" socks5-impl = { version = "0.7", default-features = false, features = [
"tokio",
] }
thiserror = "2"
tokio = { version = "1", features = ["full"] } tokio = { version = "1", features = ["full"] }
tokio-util = "0.7" tokio-util = "0.7"
tproxy-config = { version = "6", default-features = false } tproxy-config = { version = "6", default-features = false }
trust-dns-proto = "0.23" tun = { version = "0.8", features = ["async"] }
tun2 = { version = "2", features = ["async"] }
udp-stream = { version = "0.0.12", default-features = false } udp-stream = { version = "0.0.12", default-features = false }
unicase = "2" unicase = "2"
url = "2" url = "2"
[target.'cfg(target_os="android")'.dependencies]
android_logger = "0.15"
jni = { version = "0.21", default-features = false }
[target.'cfg(target_os="linux")'.dependencies] [target.'cfg(target_os="linux")'.dependencies]
bincode = "2"
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
bincode = "1"
nix = { version = "0.29", default-features = false, features = [ [target.'cfg(target_os="windows")'.dependencies]
windows-service = "0.8"
[target.'cfg(unix)'.dependencies]
daemonize = "0.5"
nix = { version = "0.30", default-features = false, features = [
"fs", "fs",
"socket", "socket",
"uio", "uio",
] } ] }
[target.'cfg(target_os="android")'.dependencies]
android_logger = "0.14"
jni = { version = "0.21", default-features = false }
[build-dependencies] [build-dependencies]
chrono = "0.4"
serde_json = "1" serde_json = "1"
[[bin]] # [profile.release]
name = "tun2proxy" # strip = "symbols"
path = "src/bin/main.rs"
[profile.release]
strip = "symbols"

18
Dockerfile.alpine Normal file
View file

@ -0,0 +1,18 @@
####################################################################################################
## Builder
####################################################################################################
FROM rust:latest AS builder
WORKDIR /worker
COPY ./ .
RUN rustup target add x86_64-unknown-linux-musl
RUN cargo build --release --target x86_64-unknown-linux-musl
####################################################################################################
## Final image
####################################################################################################
FROM alpine:latest
RUN apk add --no-cache iproute2
COPY --from=builder /worker/target/x86_64-unknown-linux-musl/release/tun2proxy-bin /usr/bin/tun2proxy-bin
ENTRYPOINT ["/usr/bin/tun2proxy-bin", "--setup"]

View file

@ -5,7 +5,7 @@ FROM rust:latest AS builder
WORKDIR /worker WORKDIR /worker
COPY ./ . COPY ./ .
RUN cargo build --release --target x86_64-unknown-linux-gnu RUN cargo build --release
#################################################################################################### ####################################################################################################
@ -15,6 +15,6 @@ FROM ubuntu:latest
RUN apt update && apt install -y iproute2 && apt clean all RUN apt update && apt install -y iproute2 && apt clean all
COPY --from=builder /worker/target/x86_64-unknown-linux-gnu/release/tun2proxy /usr/bin/tun2proxy COPY --from=builder /worker/target/release/tun2proxy-bin /usr/bin/tun2proxy-bin
ENTRYPOINT ["/usr/bin/tun2proxy", "--setup"] ENTRYPOINT ["/usr/bin/tun2proxy-bin", "--setup"]

116
README.md
View file

@ -1,13 +1,15 @@
[![tun2proxy](https://socialify.git.ci/tun2proxy/tun2proxy/image?description=1&language=1&name=1&stargazers=1&theme=Light)](https://github.com/tun2proxy/tun2proxy)
# tun2proxy # tun2proxy
A tunnel interface for HTTP and SOCKS proxies on Linux, Android, macOS, iOS and Windows. A tunnel interface for HTTP and SOCKS proxies on Linux, Android, macOS, iOS and Windows.
[![Crates.io](https://img.shields.io/crates/v/tun2proxy.svg)](https://crates.io/crates/tun2proxy) [![Crates.io](https://img.shields.io/crates/v/tun2proxy.svg)](https://crates.io/crates/tun2proxy)
![tun2proxy](https://docs.rs/tun2proxy/badge.svg) [![tun2proxy](https://docs.rs/tun2proxy/badge.svg)](https://docs.rs/tun2proxy)
[![Documentation](https://img.shields.io/badge/docs-release-brightgreen.svg?style=flat)](https://docs.rs/tun2proxy) [![Documentation](https://img.shields.io/badge/docs-release-brightgreen.svg?style=flat)](https://docs.rs/tun2proxy)
[![Download](https://img.shields.io/crates/d/tun2proxy.svg)](https://crates.io/crates/tun2proxy) [![Download](https://img.shields.io/crates/d/tun2proxy.svg)](https://crates.io/crates/tun2proxy)
[![License](https://img.shields.io/crates/l/tun2proxy.svg?style=flat)](https://github.com/blechschmidt/tun2proxy/blob/master/LICENSE) [![License](https://img.shields.io/crates/l/tun2proxy.svg?style=flat)](https://github.com/tun2proxy/tun2proxy/blob/master/LICENSE)
> Additional information can be found in the [wiki](https://github.com/blechschmidt/tun2proxy/wiki) > Additional information can be found in the [wiki](https://github.com/tun2proxy/tun2proxy/wiki)
## Features ## Features
- HTTP proxy support (unauthenticated, basic and digest auth) - HTTP proxy support (unauthenticated, basic and digest auth)
@ -15,9 +17,10 @@ A tunnel interface for HTTP and SOCKS proxies on Linux, Android, macOS, iOS and
- SOCKS4a and SOCKS5h support (through the virtual DNS feature) - SOCKS4a and SOCKS5h support (through the virtual DNS feature)
- Minimal configuration setup for routing all traffic - Minimal configuration setup for routing all traffic
- IPv4 and IPv6 support - IPv4 and IPv6 support
- GFW evasion mechanism for certain use cases (see [issue #35](https://github.com/blechschmidt/tun2proxy/issues/35)) - GFW evasion mechanism for certain use cases (see [issue #35](https://github.com/tun2proxy/tun2proxy/issues/35))
- SOCKS5 UDP support - SOCKS5 UDP support
- Native support for proxying DNS over TCP - Native support for proxying DNS over TCP
- UdpGW (UDP gateway) support for UDP over TCP, see the [wiki](https://github.com/tun2proxy/tun2proxy/wiki/UDP-gateway-feature) for more information
## Build ## Build
Clone the repository and `cd` into the project folder. Then run the following: Clone the repository and `cd` into the project folder. Then run the following:
@ -35,7 +38,7 @@ To build an XCFramework for macOS and iOS, run the following:
### Install from binary ### Install from binary
Download the binary from [releases](https://github.com/blechschmidt/tun2proxy/releases) and put it in your `PATH`. Download the binary from [releases](https://github.com/tun2proxy/tun2proxy/releases) and put it in your `PATH`.
<details> <details>
<summary>Authenticity Verification</summary> <summary>Authenticity Verification</summary>
@ -66,7 +69,7 @@ describing the manual setup, except that a bind mount is used to overlay the `/e
You would then run the tool as follows: You would then run the tool as follows:
```bash ```bash
sudo ./target/release/tun2proxy --setup --proxy "socks5://1.2.3.4:1080" sudo ./target/release/tun2proxy-bin --setup --proxy "socks5://1.2.3.4:1080"
``` ```
Apart from SOCKS5, SOCKS4 and HTTP are supported. Apart from SOCKS5, SOCKS4 and HTTP are supported.
@ -105,7 +108,7 @@ sudo ip route add 8000::/1 dev tun0
# Make sure that DNS queries are routed through the tunnel. # Make sure that DNS queries are routed through the tunnel.
sudo sh -c "echo nameserver 198.18.0.1 > /etc/resolv.conf" sudo sh -c "echo nameserver 198.18.0.1 > /etc/resolv.conf"
./target/release/tun2proxy --tun tun0 --proxy "$PROXY_TYPE://$PROXY_IP:$PROXY_PORT" ./target/release/tun2proxy-bin --tun tun0 --proxy "$PROXY_TYPE://$PROXY_IP:$PROXY_PORT"
``` ```
This tool implements a virtual DNS feature that is used by switch `--dns virtual`. When a DNS packet to port 53 is detected, an IP This tool implements a virtual DNS feature that is used by switch `--dns virtual`. When a DNS packet to port 53 is detected, an IP
@ -126,39 +129,51 @@ sudo ip link del tun0
``` ```
Tunnel interface to proxy. Tunnel interface to proxy.
Usage: tun2proxy [OPTIONS] --proxy <URL> [ADMIN_COMMAND]... Usage: tun2proxy-bin [OPTIONS] --proxy <URL> [ADMIN_COMMAND]...
Arguments: Arguments:
[ADMIN_COMMAND]... Specify a command to run with root-like capabilities in the new namespace when using `--unshare`. [ADMIN_COMMAND]... Specify a command to run with root-like capabilities in the new namespace when using `--unshare`. This could be
This could be useful to start additional daemons, e.g. `openvpn` instance useful to start additional daemons, e.g. `openvpn` instance
Options: Options:
-p, --proxy <URL> Proxy URL in the form proto://[username[:password]@]host:port, where proto is one of -p, --proxy <URL> Proxy URL in the form proto://[username[:password]@]host:port, where proto is one of
socks4, socks5, http. For example: socks5://myname:password@127.0.0.1:1080 socks4, socks5, http. Username and password are encoded in percent encoding. For example:
-t, --tun <name> Name of the tun interface, such as tun0, utun4, etc. If this option is not provided, the socks5://myname:pass%40word@127.0.0.1:1080
OS will generate a random one -t, --tun <name> Name of the tun interface, such as tun0, utun4, etc. If this option is not provided, the
--tun-fd <fd> File descriptor of the tun interface OS will generate a random one
--unshare Create a tun interface in a newly created unprivileged namespace while maintaining proxy --tun-fd <fd> File descriptor of the tun interface
connectivity via the global network namespace --close-fd-on-drop <true or false> Set whether to close the received raw file descriptor on drop or not. This setting is
-6, --ipv6-enabled IPv6 enabled dependent on [tun_fd] [possible values: true, false]
-s, --setup Routing and system setup, which decides whether to setup the routing and system --unshare Create a tun interface in a newly created unprivileged namespace while maintaining proxy
configuration. This option is only available on Linux and requires root-like privileges. connectivity via the global network namespace
See `capabilities(7)` --unshare-pidfile <UNSHARE_PIDFILE> Create a pidfile of `unshare` process when using `--unshare`
-d, --dns <strategy> DNS handling strategy [default: direct] [possible values: virtual, over-tcp, direct] -6, --ipv6-enabled IPv6 enabled
--dns-addr <IP> DNS resolver address [default: 8.8.8.8] -s, --setup Routing and system setup, which decides whether to setup the routing and system
-b, --bypass <IP/CIDR> IPs used in routing setup which should bypass the tunnel, in the form of IP or IP/CIDR. configuration. This option requires root-like privileges on every platform.
Multiple IPs can be specified, e.g. --bypass 3.4.5.0/24 --bypass 5.6.7.8 It is very important on Linux, see `capabilities(7)`
--tcp-timeout <seconds> TCP timeout in seconds [default: 600] -d, --dns <strategy> DNS handling strategy [default: direct] [possible values: virtual, over-tcp, direct]
--udp-timeout <seconds> UDP timeout in seconds [default: 10] --dns-addr <IP> DNS resolver address [default: 8.8.8.8]
-v, --verbosity <level> Verbosity level [default: info] [possible values: off, error, warn, info, debug, trace] --virtual-dns-pool <CIDR> IP address pool to be used by virtual DNS in CIDR notation [default: 198.18.0.0/15]
-h, --help Print help -b, --bypass <IP/CIDR> IPs used in routing setup which should bypass the tunnel, in the form of IP or IP/CIDR.
-V, --version Print version Multiple IPs can be specified, e.g. --bypass 3.4.5.0/24 --bypass 5.6.7.8
--tcp-timeout <seconds> TCP timeout in seconds [default: 600]
--udp-timeout <seconds> UDP timeout in seconds [default: 10]
-v, --verbosity <level> Verbosity level [default: info] [possible values: off, error, warn, info, debug, trace]
--daemonize Daemonize for unix family or run as Windows service
--exit-on-fatal-error Exit immediately when fatal error occurs, useful for running as a service
--max-sessions <number> Maximum number of sessions to be handled concurrently [default: 200]
--udpgw-server <IP:PORT> UDP gateway server address, forwards UDP packets via specified TCP server
--udpgw-connections <number> Max connections for the UDP gateway, default value is 5
--udpgw-keepalive <seconds> Keepalive interval in seconds for the UDP gateway, default value is 30
-h, --help Print help
-V, --version Print version
``` ```
Currently, tun2proxy supports HTTP, SOCKS4/SOCKS4a and SOCKS5. A proxy is supplied to the `--proxy` argument in the Currently, tun2proxy supports HTTP, SOCKS4/SOCKS4a and SOCKS5. A proxy is supplied to the `--proxy` argument in the
URL format. For example, an HTTP proxy at `1.2.3.4:3128` with a username of `john.doe` and a password of `secret` is URL format. For example, an HTTP proxy at `1.2.3.4:3128` with a username of `john.doe` and a password of `secret` is
supplied as `--proxy http://john.doe:secret@1.2.3.4:3128`. This works analogously to curl's `--proxy` argument. supplied as `--proxy http://john.doe:secret@1.2.3.4:3128`. This works analogously to curl's `--proxy` argument.
## Docker Support ## Container Support
### Docker
Tun2proxy can serve as a proxy for other Docker containers. To make use of that feature, first build the image: Tun2proxy can serve as a proxy for other Docker containers. To make use of that feature, first build the image:
```bash ```bash
@ -173,7 +188,7 @@ docker run -d \
--sysctl net.ipv6.conf.default.disable_ipv6=0 \ --sysctl net.ipv6.conf.default.disable_ipv6=0 \
--cap-add NET_ADMIN \ --cap-add NET_ADMIN \
--name tun2proxy \ --name tun2proxy \
tun2proxy --proxy proto://[username[:password]@]host:port tun2proxy-bin --proxy proto://[username[:password]@]host:port
``` ```
You can then provide the running container's network to another worker container by sharing the network namespace (like kubernetes sidecar): You can then provide the running container's network to another worker container by sharing the network namespace (like kubernetes sidecar):
@ -183,6 +198,36 @@ docker run -it \
--network "container:tun2proxy" \ --network "container:tun2proxy" \
ubuntu:latest ubuntu:latest
``` ```
### Docker Compose
Write a `docker-compose.yaml` file with the following content:
```yaml
services:
tun2proxy:
volumes:
- /dev/net/tun:/dev/net/tun
sysctls:
- net.ipv6.conf.default.disable_ipv6=0
cap_add:
- NET_ADMIN
container_name: tun2proxy
image: ghcr.io/tun2proxy/tun2proxy:latest-ubuntu
command: --proxy proto://[username[:password]@]host:port
alpine:
stdin_open: true
tty: true
network_mode: container:tun2proxy
image: alpine:latest
command: apk add curl && curl ifconfig.icu && sleep 10
```
Then run the compose file
```bash
docker compose up -d tun2proxy
docker compose up alpine
```
## Configuration Tips ## Configuration Tips
### DNS ### DNS
@ -204,3 +249,10 @@ asked to open connections to IPv6 destinations. In such a case, you can disable
either through `sysctl -w net.ipv6.conf.all.disable_ipv6=1` and `sysctl -w net.ipv6.conf.default.disable_ipv6=1` either through `sysctl -w net.ipv6.conf.all.disable_ipv6=1` and `sysctl -w net.ipv6.conf.default.disable_ipv6=1`
or through `ip -6 route del default`, which causes the `libc` resolver (and other software) to not issue DNS AAAA or through `ip -6 route del default`, which causes the `libc` resolver (and other software) to not issue DNS AAAA
requests for IPv6 addresses. requests for IPv6 addresses.
## Contributors ✨
Thanks goes to these wonderful people:
<a href="https://github.com/tun2proxy/tun2proxy/graphs/contributors">
<img src="https://contrib.rocks/image?repo=tun2proxy/tun2proxy" />
</a>

View file

@ -5,16 +5,15 @@ rustup target add aarch64-apple-ios
cargo install cbindgen cargo install cbindgen
echo "Building target aarch64-apple-ios..." echo "Building target aarch64-apple-ios..."
cargo build --target aarch64-apple-ios cargo build --target aarch64-apple-ios --features mimalloc
echo "Generating includes..." echo "Generating includes..."
mkdir -p target/include/ mkdir -p target/include/
rm -rf target/include/* rm -rf target/include/*
cbindgen --config cbindgen.toml -l C --cpp-compat -o target/include/tun2proxy.h cbindgen --config cbindgen.toml -o target/include/tun2proxy.h
cat > target/include/tun2proxy.modulemap <<EOF cat > target/include/tun2proxy.modulemap <<EOF
framework module tun2proxy { framework module tun2proxy {
umbrella header "tun2proxy.h" umbrella header "tun2proxy.h"
export * export *
module * { export * } module * { export * }
} }

View file

@ -5,16 +5,15 @@ rustup target add aarch64-apple-ios
cargo install cbindgen cargo install cbindgen
echo "Building target aarch64-apple-ios..." echo "Building target aarch64-apple-ios..."
cargo build --release --target aarch64-apple-ios cargo build --release --target aarch64-apple-ios --features mimalloc
echo "Generating includes..." echo "Generating includes..."
mkdir -p target/include/ mkdir -p target/include/
rm -rf target/include/* rm -rf target/include/*
cbindgen --config cbindgen.toml -l C --cpp-compat -o target/include/tun2proxy.h cbindgen --config cbindgen.toml -o target/include/tun2proxy.h
cat > target/include/tun2proxy.modulemap <<EOF cat > target/include/tun2proxy.modulemap <<EOF
framework module tun2proxy { framework module tun2proxy {
umbrella header "tun2proxy.h" umbrella header "tun2proxy.h"
export * export *
module * { export * } module * { export * }
} }

View file

@ -108,7 +108,7 @@ function build_android() {
cp $BASE/target/$target/${mode2}/lib${name}.a $android_libs/${target_dir}/lib${name}.a cp $BASE/target/$target/${mode2}/lib${name}.a $android_libs/${target_dir}/lib${name}.a
done done
cbindgen -c $BASE/cbindgen.toml -l C --cpp-compat -o $android_libs/$name.h cbindgen -c $BASE/cbindgen.toml -o $android_libs/$name.h
} }
function main() { function main() {

View file

@ -13,7 +13,7 @@ echo "cargo build --release --target aarch64-apple-darwin"
cargo build --release --target aarch64-apple-darwin cargo build --release --target aarch64-apple-darwin
echo "cargo build --release --target aarch64-apple-ios" echo "cargo build --release --target aarch64-apple-ios"
cargo build --release --target aarch64-apple-ios cargo build --release --target aarch64-apple-ios --features mimalloc
echo "cargo build --release --target x86_64-apple-ios" echo "cargo build --release --target x86_64-apple-ios"
cargo build --release --target x86_64-apple-ios cargo build --release --target x86_64-apple-ios
@ -24,11 +24,10 @@ cargo build --release --target aarch64-apple-ios-sim
echo "Generating includes..." echo "Generating includes..."
mkdir -p target/include/ mkdir -p target/include/
rm -rf target/include/* rm -rf target/include/*
cbindgen --config cbindgen.toml -l C --cpp-compat -o target/include/tun2proxy.h cbindgen --config cbindgen.toml -o target/include/tun2proxy.h
cat > target/include/tun2proxy.modulemap <<EOF cat > target/include/tun2proxy.modulemap <<EOF
framework module tun2proxy { framework module tun2proxy {
umbrella header "tun2proxy.h" umbrella header "tun2proxy.h"
export * export *
module * { export * } module * { export * }
} }

View file

@ -1,12 +1,21 @@
fn main() -> Result<(), Box<dyn std::error::Error>> { fn main() -> Result<(), Box<dyn std::error::Error>> {
if let Ok(git_hash) = get_git_hash() {
// Set the environment variables
println!("cargo:rustc-env=GIT_HASH={}", git_hash.trim());
}
// Get the build time
let build_time = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string();
println!("cargo:rustc-env=BUILD_TIME={}", build_time);
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
if let Ok(cargo_target_dir) = get_cargo_target_dir() { if let Ok(cargo_target_dir) = get_cargo_target_dir() {
let mut f = std::fs::File::create(cargo_target_dir.join("build.log"))?; let mut f = std::fs::File::create(cargo_target_dir.join("build.log"))?;
use std::io::Write; use std::io::Write;
f.write_all(format!("CARGO_TARGET_DIR: '{}'\r\n", cargo_target_dir.display()).as_bytes())?; f.write_all(format!("CARGO_TARGET_DIR: '{}'\r\n", cargo_target_dir.display()).as_bytes())?;
// The wintun crate's root directory // The wintun-bindings crate's root directory
let crate_dir = get_crate_dir("wintun")?; let crate_dir = get_crate_dir("wintun-bindings")?;
// The path to the DLL file, relative to the crate root, depending on the target architecture // The path to the DLL file, relative to the crate root, depending on the target architecture
let dll_path = get_wintun_bin_relative_path()?; let dll_path = get_wintun_bin_relative_path()?;
@ -85,3 +94,10 @@ fn get_crate_dir(crate_name: &str) -> Result<std::path::PathBuf, Box<dyn std::er
} }
Ok(crate_dir.ok_or("crate_dir")?) Ok(crate_dir.ok_or("crate_dir")?)
} }
fn get_git_hash() -> std::io::Result<String> {
use std::process::Command;
let git_hash = Command::new("git").args(["rev-parse", "--short", "HEAD"]).output()?.stdout;
let git_hash = String::from_utf8(git_hash).map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
Ok(git_hash)
}

View file

@ -1,15 +1,19 @@
language = "C"
cpp_compat = true
[export] [export]
include = [ include = [
"tun2proxy_run_with_cli",
"tun2proxy_with_fd_run", "tun2proxy_with_fd_run",
"tun2proxy_with_name_run", "tun2proxy_with_name_run",
"tun2proxy_with_name_stop", "tun2proxy_stop",
"tun2proxy_with_fd_stop",
"tun2proxy_set_log_callback", "tun2proxy_set_log_callback",
"tun2proxy_set_traffic_status_callback", "tun2proxy_set_traffic_status_callback",
] ]
exclude = [ exclude = [
"Java_com_github_shadowsocks_bg_Tun2proxy_run", "Java_com_github_shadowsocks_bg_Tun2proxy_run",
"Java_com_github_shadowsocks_bg_Tun2proxy_stop", "Java_com_github_shadowsocks_bg_Tun2proxy_stop",
"UdpFlag",
] ]
[export.rename] [export.rename]

View file

@ -8,7 +8,7 @@ echo $SCRIPT_DIR
netns="test" netns="test"
dante="danted" dante="danted"
tun2proxy="${SCRIPT_DIR}/../target/release/tun2proxy" tun2proxy="${SCRIPT_DIR}/../target/release/tun2proxy-bin"
ip netns add "$netns" ip netns add "$netns"
@ -51,4 +51,4 @@ sleep 3
iperf3 -c 10.0.0.4 -P 10 iperf3 -c 10.0.0.4 -P 10
# Clean up # Clean up
# sudo sh -c "pkill tun2proxy; pkill iperf3; pkill danted; ip link del tun0; ip netns del test" # sudo sh -c "pkill tun2proxy-bin; pkill iperf3; pkill danted; ip link del tun0; ip netns del test"

View file

@ -30,7 +30,7 @@ function core_function() {
else else
trap 'echo "" && echo "tun2proxy exited with code: $?" && restore' EXIT trap 'echo "" && echo "tun2proxy exited with code: $?" && restore' EXIT
local SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" local SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
local APP_BIN_PATH="${SCRIPT_DIR}/../target/release/tun2proxy" local APP_BIN_PATH="${SCRIPT_DIR}/../target/release/tun2proxy-bin"
"${APP_BIN_PATH}" --tun tun0 --proxy "${PROXY_TYPE}://${PROXY_IP}:${PROXY_PORT}" -v trace "${APP_BIN_PATH}" --tun tun0 --proxy "${PROXY_TYPE}://${PROXY_IP}:${PROXY_PORT}" -v trace
fi fi
} }

View file

@ -29,7 +29,7 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
netns="test" netns="test"
dante="danted" dante="danted"
tun2proxy="${SCRIPT_DIR}/../target/release/tun2proxy" tun2proxy="${SCRIPT_DIR}/../target/release/tun2proxy-bin"
ip netns add "$netns" ip netns add "$netns"
@ -80,4 +80,4 @@ sleep 3
rperf -c 10.0.0.4 -v trace -P 1 -u -r rperf -c 10.0.0.4 -v trace -P 1 -u -r
# Clean up # Clean up
# sudo sh -c "pkill tun2proxy; pkill rperf; pkill danted; ip link del tun0; ip netns del test" # sudo sh -c "pkill tun2proxy-bin; pkill rperf; pkill danted; ip link del tun0; ip netns del test"

View file

@ -1,25 +1,33 @@
#![cfg(target_os = "android")] #![cfg(target_os = "android")]
use crate::{ use crate::{
Args,
args::ArgProxy, args::ArgProxy,
error::{Error, Result}, error::{Error, Result},
Args,
}; };
use jni::{ use jni::{
objects::{JClass, JString},
sys::{jchar, jint},
JNIEnv, JNIEnv,
objects::{JClass, JString},
sys::{jboolean, jchar, jint},
}; };
/// # Safety /// # Safety
/// ///
/// Running tun2proxy /// Running tun2proxy with some arguments
#[no_mangle] /// Parameters:
/// - proxy_url: the proxy url, e.g. "socks5://127.0.0.1:1080"
/// - tun_fd: the tun file descriptor, it will be owned by tun2proxy
/// - close_fd_on_drop: whether close the tun_fd on drop
/// - tun_mtu: the tun mtu
/// - dns_strategy: the dns strategy, see ArgDns enum
/// - verbosity: the verbosity level, see ArgVerbosity enum
#[unsafe(no_mangle)]
pub unsafe extern "C" fn Java_com_github_shadowsocks_bg_Tun2proxy_run( pub unsafe extern "C" fn Java_com_github_shadowsocks_bg_Tun2proxy_run(
mut env: JNIEnv, mut env: JNIEnv,
_clazz: JClass, _clazz: JClass,
proxy_url: JString, proxy_url: JString,
tun_fd: jint, tun_fd: jint,
close_fd_on_drop: jboolean,
tun_mtu: jchar, tun_mtu: jchar,
verbosity: jint, verbosity: jint,
dns_strategy: jint, dns_strategy: jint,
@ -36,18 +44,23 @@ pub unsafe extern "C" fn Java_com_github_shadowsocks_bg_Tun2proxy_run(
); );
let proxy_url = get_java_string(&mut env, &proxy_url).unwrap(); let proxy_url = get_java_string(&mut env, &proxy_url).unwrap();
let proxy = ArgProxy::try_from(proxy_url.as_str()).unwrap(); let proxy = ArgProxy::try_from(proxy_url.as_str()).unwrap();
let close_fd_on_drop = close_fd_on_drop != 0;
let mut args = Args::default(); let mut args = Args::default();
args.proxy(proxy).tun_fd(Some(tun_fd)).dns(dns).verbosity(verbosity); args.proxy(proxy)
crate::mobile_api::mobile_run(args, tun_mtu, false) .tun_fd(Some(tun_fd))
.close_fd_on_drop(close_fd_on_drop)
.dns(dns)
.verbosity(verbosity);
crate::general_api::general_run_for_api(args, tun_mtu, false)
} }
/// # Safety /// # Safety
/// ///
/// Shutdown tun2proxy /// Shutdown tun2proxy
#[no_mangle] #[unsafe(no_mangle)]
pub unsafe extern "C" fn Java_com_github_shadowsocks_bg_Tun2proxy_stop(_env: JNIEnv, _: JClass) -> jint { pub unsafe extern "C" fn Java_com_github_shadowsocks_bg_Tun2proxy_stop(_env: JNIEnv, _: JClass) -> jint {
crate::mobile_api::mobile_stop() crate::general_api::tun2proxy_stop_internal()
} }
fn get_java_string(env: &mut JNIEnv, string: &JString) -> Result<String, Error> { fn get_java_string(env: &mut JNIEnv, string: &JString) -> Result<String, Error> {

View file

@ -1,48 +0,0 @@
#![cfg(any(target_os = "android", target_os = "ios", target_os = "macos"))]
use crate::{
args::{ArgDns, ArgProxy},
ArgVerbosity, Args,
};
use std::os::raw::{c_char, c_int, c_ushort};
/// # Safety
///
/// Run the tun2proxy component with some arguments.
/// Parameters:
/// - proxy_url: the proxy url, e.g. "socks5://127.0.0.1:1080"
/// - tun_fd: the tun file descriptor, it will be owned by tun2proxy
/// - packet_information: whether exists packet information in tun_fd
/// - tun_mtu: the tun mtu
/// - dns_strategy: the dns strategy, see ArgDns enum
/// - verbosity: the verbosity level, see ArgVerbosity enum
#[no_mangle]
pub unsafe extern "C" fn tun2proxy_with_fd_run(
proxy_url: *const c_char,
tun_fd: c_int,
packet_information: bool,
tun_mtu: c_ushort,
dns_strategy: ArgDns,
verbosity: ArgVerbosity,
) -> c_int {
log::set_max_level(verbosity.into());
if let Err(err) = log::set_boxed_logger(Box::<crate::dump_logger::DumpLogger>::default()) {
log::warn!("failed to set logger: {:?}", err);
}
let proxy_url = std::ffi::CStr::from_ptr(proxy_url).to_str().unwrap();
let proxy = ArgProxy::try_from(proxy_url).unwrap();
let mut args = Args::default();
args.proxy(proxy).tun_fd(Some(tun_fd)).dns(dns_strategy).verbosity(verbosity);
crate::mobile_api::mobile_run(args, tun_mtu, packet_information)
}
/// # Safety
///
/// Shutdown the tun2proxy component.
#[no_mangle]
pub unsafe extern "C" fn tun2proxy_with_fd_stop() -> c_int {
crate::mobile_api::mobile_stop()
}

View file

@ -6,9 +6,21 @@ use tproxy_config::IpCidr;
use std::ffi::OsString; use std::ffi::OsString;
use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
use std::str::FromStr;
#[macro_export]
macro_rules! version_info {
() => {
concat!(env!("CARGO_PKG_VERSION"), " (", env!("GIT_HASH"), " ", env!("BUILD_TIME"), ")")
};
}
fn about_info() -> &'static str {
concat!("Tunnel interface to proxy.\nVersion: ", version_info!())
}
#[derive(Debug, Clone, clap::Parser)] #[derive(Debug, Clone, clap::Parser)]
#[command(author, version, about = "Tunnel interface to proxy.", long_about = None)] #[command(author, version = version_info!(), about = about_info(), long_about = None)]
pub struct Args { pub struct Args {
/// Proxy URL in the form proto://[username[:password]@]host:port, /// Proxy URL in the form proto://[username[:password]@]host:port,
/// where proto is one of socks4, socks5, http. /// where proto is one of socks4, socks5, http.
@ -19,13 +31,21 @@ pub struct Args {
/// Name of the tun interface, such as tun0, utun4, etc. /// Name of the tun interface, such as tun0, utun4, etc.
/// If this option is not provided, the OS will generate a random one. /// If this option is not provided, the OS will generate a random one.
#[arg(short, long, value_name = "name", conflicts_with = "tun_fd", value_parser = validate_tun)] #[arg(short, long, value_name = "name", value_parser = validate_tun)]
#[cfg_attr(unix, arg(conflicts_with = "tun_fd"))]
pub tun: Option<String>, pub tun: Option<String>,
/// File descriptor of the tun interface /// File descriptor of the tun interface
#[cfg(unix)]
#[arg(long, value_name = "fd", conflicts_with = "tun")] #[arg(long, value_name = "fd", conflicts_with = "tun")]
pub tun_fd: Option<i32>, pub tun_fd: Option<i32>,
/// Set whether to close the received raw file descriptor on drop or not.
/// This setting is dependent on [tun_fd].
#[cfg(unix)]
#[arg(long, value_name = "true or false", conflicts_with = "tun", requires = "tun_fd")]
pub close_fd_on_drop: Option<bool>,
/// Create a tun interface in a newly created unprivileged namespace /// Create a tun interface in a newly created unprivileged namespace
/// while maintaining proxy connectivity via the global network namespace. /// while maintaining proxy connectivity via the global network namespace.
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
@ -56,8 +76,9 @@ pub struct Args {
pub ipv6_enabled: bool, pub ipv6_enabled: bool,
/// Routing and system setup, which decides whether to setup the routing and system configuration. /// Routing and system setup, which decides whether to setup the routing and system configuration.
/// This option is only available on Linux and requires root-like privileges. See `capabilities(7)`. /// This option requires root-like privileges on every platform.
#[arg(short, long, default_value = if cfg!(target_os = "linux") { "false" } else { "true" })] /// It is very important on Linux, see `capabilities(7)`.
#[arg(short, long)]
pub setup: bool, pub setup: bool,
/// DNS handling strategy /// DNS handling strategy
@ -68,6 +89,10 @@ pub struct Args {
#[arg(long, value_name = "IP", default_value = "8.8.8.8")] #[arg(long, value_name = "IP", default_value = "8.8.8.8")]
pub dns_addr: IpAddr, pub dns_addr: IpAddr,
/// IP address pool to be used by virtual DNS in CIDR notation.
#[arg(long, value_name = "CIDR", default_value = "198.18.0.0/15")]
pub virtual_dns_pool: IpCidr,
/// IPs used in routing setup which should bypass the tunnel, /// IPs used in routing setup which should bypass the tunnel,
/// in the form of IP or IP/CIDR. Multiple IPs can be specified, /// in the form of IP or IP/CIDR. Multiple IPs can be specified,
/// e.g. --bypass 3.4.5.0/24 --bypass 5.6.7.8 /// e.g. --bypass 3.4.5.0/24 --bypass 5.6.7.8
@ -85,6 +110,33 @@ pub struct Args {
/// Verbosity level /// Verbosity level
#[arg(short, long, value_name = "level", value_enum, default_value = "info")] #[arg(short, long, value_name = "level", value_enum, default_value = "info")]
pub verbosity: ArgVerbosity, pub verbosity: ArgVerbosity,
/// Daemonize for unix family or run as Windows service
#[arg(long)]
pub daemonize: bool,
/// Exit immediately when fatal error occurs, useful for running as a service
#[arg(long)]
pub exit_on_fatal_error: bool,
/// Maximum number of sessions to be handled concurrently
#[arg(long, value_name = "number", default_value = "200")]
pub max_sessions: usize,
/// UDP gateway server address, forwards UDP packets via specified TCP server
#[cfg(feature = "udpgw")]
#[arg(long, value_name = "IP:PORT")]
pub udpgw_server: Option<SocketAddr>,
/// Max connections for the UDP gateway, default value is 5
#[cfg(feature = "udpgw")]
#[arg(long, value_name = "number", requires = "udpgw_server")]
pub udpgw_connections: Option<usize>,
/// Keepalive interval in seconds for the UDP gateway, default value is 30
#[cfg(feature = "udpgw")]
#[arg(long, value_name = "seconds", requires = "udpgw_server")]
pub udpgw_keepalive: Option<u64>,
} }
fn validate_tun(p: &str) -> Result<String> { fn validate_tun(p: &str) -> Result<String> {
@ -104,7 +156,10 @@ impl Default for Args {
Args { Args {
proxy: ArgProxy::default(), proxy: ArgProxy::default(),
tun: None, tun: None,
#[cfg(unix)]
tun_fd: None, tun_fd: None,
#[cfg(unix)]
close_fd_on_drop: None,
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
unshare: false, unshare: false,
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
@ -121,6 +176,16 @@ impl Default for Args {
tcp_timeout: 600, tcp_timeout: 600,
udp_timeout: 10, udp_timeout: 10,
verbosity: ArgVerbosity::Info, verbosity: ArgVerbosity::Info,
virtual_dns_pool: IpCidr::from_str("198.18.0.0/15").unwrap(),
daemonize: false,
exit_on_fatal_error: false,
max_sessions: 200,
#[cfg(feature = "udpgw")]
udpgw_server: None,
#[cfg(feature = "udpgw")]
udpgw_connections: None,
#[cfg(feature = "udpgw")]
udpgw_keepalive: None,
} }
} }
} }
@ -128,8 +193,7 @@ impl Default for Args {
impl Args { impl Args {
#[allow(clippy::let_and_return)] #[allow(clippy::let_and_return)]
pub fn parse_args() -> Self { pub fn parse_args() -> Self {
use clap::Parser; let args = <Self as ::clap::Parser>::parse();
let args = Self::parse();
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
if !args.setup && args.tun.is_none() { if !args.setup && args.tun.is_none() {
eprintln!("Missing required argument, '--tun' must present when '--setup' is not used."); eprintln!("Missing required argument, '--tun' must present when '--setup' is not used.");
@ -148,11 +212,30 @@ impl Args {
self self
} }
#[cfg(feature = "udpgw")]
pub fn udpgw_server(&mut self, udpgw: SocketAddr) -> &mut Self {
self.udpgw_server = Some(udpgw);
self
}
#[cfg(feature = "udpgw")]
pub fn udpgw_connections(&mut self, udpgw_connections: usize) -> &mut Self {
self.udpgw_connections = Some(udpgw_connections);
self
}
#[cfg(unix)]
pub fn tun_fd(&mut self, tun_fd: Option<i32>) -> &mut Self { pub fn tun_fd(&mut self, tun_fd: Option<i32>) -> &mut Self {
self.tun_fd = tun_fd; self.tun_fd = tun_fd;
self self
} }
#[cfg(unix)]
pub fn close_fd_on_drop(&mut self, close_fd_on_drop: bool) -> &mut Self {
self.close_fd_on_drop = Some(close_fd_on_drop);
self
}
pub fn verbosity(&mut self, verbosity: ArgVerbosity) -> &mut Self { pub fn verbosity(&mut self, verbosity: ArgVerbosity) -> &mut Self {
self.verbosity = verbosity; self.verbosity = verbosity;
self self
@ -323,17 +406,11 @@ impl TryFrom<&str> for ArgProxy {
let e = format!("`{s}` does not contain a host"); let e = format!("`{s}` does not contain a host");
let host = url.host_str().ok_or(Error::from(e))?; let host = url.host_str().ok_or(Error::from(e))?;
let mut url_host = String::from(host);
let e = format!("`{s}` does not contain a port"); let e = format!("`{s}` does not contain a port");
let port = url.port().ok_or(Error::from(&e))?; let port = url.port_or_known_default().ok_or(Error::from(&e))?;
url_host.push(':');
url_host.push_str(port.to_string().as_str());
let e = format!("`{host}` could not be resolved"); let e2 = format!("`{host}` does not resolve to a usable IP address");
let mut addr_iter = url_host.to_socket_addrs().map_err(|_| Error::from(&e))?; let addr = (host, port).to_socket_addrs()?.next().ok_or(Error::from(&e2))?;
let e = format!("`{host}` does not resolve to a usable IP address");
let addr = addr_iter.next().ok_or(Error::from(&e))?;
let credentials = if url.username() == "" && url.password().is_none() { let credentials = if url.username() == "" && url.password().is_none() {
None None

View file

@ -1,16 +1,43 @@
use tun2proxy::{Args, BoxError}; use tun2proxy::{ArgVerbosity, Args, BoxError};
#[tokio::main] fn main() -> Result<(), BoxError> {
async fn main() -> Result<(), BoxError> {
dotenvy::dotenv().ok(); dotenvy::dotenv().ok();
let args = Args::parse_args(); let args = Args::parse_args();
// let default = format!("{}={:?},trust_dns_proto=warn", module_path!(), args.verbosity); #[cfg(unix)]
let default = format!("{:?},trust_dns_proto=warn", args.verbosity); if args.daemonize {
let stdout = std::fs::File::create("/tmp/tun2proxy.out")?;
let stderr = std::fs::File::create("/tmp/tun2proxy.err")?;
let daemonize = daemonize::Daemonize::new()
.working_directory("/tmp")
.umask(0o777)
.stdout(stdout)
.stderr(stderr)
.privileged_action(|| "Executed before drop privileges");
let _ = daemonize.start()?;
}
#[cfg(target_os = "windows")]
if args.daemonize {
tun2proxy::win_svc::start_service()?;
return Ok(());
}
let rt = tokio::runtime::Builder::new_multi_thread().enable_all().build()?;
rt.block_on(main_async(args))
}
async fn main_async(args: Args) -> Result<(), BoxError> {
let ipstack = match args.verbosity {
ArgVerbosity::Trace => ArgVerbosity::Debug,
_ => args.verbosity,
};
let default = format!("{:?},hickory_proto=warn,ipstack={:?}", args.verbosity, ipstack);
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(default)).init(); env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(default)).init();
let shutdown_token = tokio_util::sync::CancellationToken::new(); let shutdown_token = tokio_util::sync::CancellationToken::new();
let join_handle = tokio::spawn({ let main_loop_handle = tokio::spawn({
let args = args.clone();
let shutdown_token = shutdown_token.clone(); let shutdown_token = shutdown_token.clone();
async move { async move {
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
@ -18,29 +45,43 @@ async fn main() -> Result<(), BoxError> {
if let Err(err) = namespace_proxy_main(args, shutdown_token).await { if let Err(err) = namespace_proxy_main(args, shutdown_token).await {
log::error!("namespace proxy error: {}", err); log::error!("namespace proxy error: {}", err);
} }
return; return Ok(0);
} }
unsafe extern "C" fn traffic_cb(status: *const tun2proxy::TrafficStatus, _: *mut std::ffi::c_void) { unsafe extern "C" fn traffic_cb(status: *const tun2proxy::TrafficStatus, _: *mut std::ffi::c_void) {
let status = &*status; let status = unsafe { &*status };
log::debug!("Traffic: ▲ {} : ▼ {}", status.tx, status.rx); log::debug!("Traffic: ▲ {} : ▼ {}", status.tx, status.rx);
} }
unsafe { tun2proxy::tun2proxy_set_traffic_status_callback(1, Some(traffic_cb), std::ptr::null_mut()) }; unsafe { tun2proxy::tun2proxy_set_traffic_status_callback(1, Some(traffic_cb), std::ptr::null_mut()) };
if let Err(err) = tun2proxy::desktop_run_async(args, shutdown_token).await { let ret = tun2proxy::general_run_async(args, tun::DEFAULT_MTU, cfg!(target_os = "macos"), shutdown_token).await;
log::error!("main loop error: {}", err); if let Err(err) = &ret {
log::error!("main loop error: {err}");
} }
ret
} }
}); });
ctrlc2::set_async_handler(async move { let ctrlc_fired = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let ctrlc_fired_clone = ctrlc_fired.clone();
let ctrlc_handel = ctrlc2::AsyncCtrlC::new(move || {
log::info!("Ctrl-C received, exiting..."); log::info!("Ctrl-C received, exiting...");
ctrlc_fired_clone.store(true, std::sync::atomic::Ordering::SeqCst);
shutdown_token.cancel(); shutdown_token.cancel();
}) true
.await; })?;
if let Err(err) = join_handle.await { let tasks = main_loop_handle.await??;
log::error!("main_entry error {}", err);
if ctrlc_fired.load(std::sync::atomic::Ordering::SeqCst) {
log::info!("Ctrl-C fired, waiting the handler to finish...");
ctrlc_handel.await?;
}
if args.exit_on_fatal_error && tasks >= args.max_sessions {
// Because `main_async` function perhaps stuck in `await` state, so we need to exit the process forcefully
log::info!("Internal fatal error, max sessions reached ({tasks}/{})", args.max_sessions);
std::process::exit(-1);
} }
Ok(()) Ok(())
@ -51,7 +92,7 @@ async fn namespace_proxy_main(
_args: Args, _args: Args,
_shutdown_token: tokio_util::sync::CancellationToken, _shutdown_token: tokio_util::sync::CancellationToken,
) -> Result<std::process::ExitStatus, tun2proxy::Error> { ) -> Result<std::process::ExitStatus, tun2proxy::Error> {
use nix::fcntl::{open, OFlag}; use nix::fcntl::{OFlag, open};
use nix::sys::stat::Mode; use nix::sys::stat::Mode;
use std::os::fd::AsRawFd; use std::os::fd::AsRawFd;
@ -61,7 +102,7 @@ async fn namespace_proxy_main(
let child = tokio::process::Command::new("unshare") let child = tokio::process::Command::new("unshare")
.args("--user --map-current-user --net --mount --keep-caps --kill-child --fork".split(' ')) .args("--user --map-current-user --net --mount --keep-caps --kill-child --fork".split(' '))
.arg(format!("/proc/self/fd/{}", fd)) .arg(format!("/proc/self/fd/{}", fd.as_raw_fd()))
.arg("--socket-transfer-fd") .arg("--socket-transfer-fd")
.arg(remote_fd.as_raw_fd().to_string()) .arg(remote_fd.as_raw_fd().to_string())
.args(std::env::args().skip(1)) .args(std::env::args().skip(1))
@ -82,7 +123,7 @@ async fn namespace_proxy_main(
log::info!("The tun proxy is running in unprivileged mode. See `namespaces(7)`."); log::info!("The tun proxy is running in unprivileged mode. See `namespaces(7)`.");
log::info!(""); log::info!("");
log::info!("If you need to run a process that relies on root-like capabilities (e.g. `openvpn`)"); log::info!("If you need to run a process that relies on root-like capabilities (e.g. `openvpn`)");
log::info!("Use `tun2proxy --unshare --setup [...] -- openvpn --config [...]`"); log::info!("Use `tun2proxy-bin --unshare --setup [...] -- openvpn --config [...]`");
log::info!(""); log::info!("");
log::info!("To run a new process in the created namespace (e.g. a flatpak app)"); log::info!("To run a new process in the created namespace (e.g. a flatpak app)");
log::info!( log::info!(

273
src/bin/udpgw_server.rs Normal file
View file

@ -0,0 +1,273 @@
use socks5_impl::protocol::AsyncStreamOperation;
use std::net::SocketAddr;
use tokio::{
io::AsyncWriteExt,
net::{
UdpSocket,
tcp::{ReadHalf, WriteHalf},
},
sync::mpsc::{Receiver, Sender},
};
use tun2proxy::{
ArgVerbosity, BoxError, Error, Result,
udpgw::{Packet, UdpFlag},
};
pub(crate) const CLIENT_DISCONNECT_TIMEOUT: tokio::time::Duration = std::time::Duration::from_secs(60);
#[derive(Debug, Clone)]
pub struct Client {
addr: SocketAddr,
last_activity: std::time::Instant,
}
impl Client {
pub fn new(addr: SocketAddr) -> Self {
let last_activity = std::time::Instant::now();
Self { addr, last_activity }
}
}
fn about_info() -> &'static str {
concat!("UDP Gateway Server for tun2proxy\nVersion: ", tun2proxy::version_info!())
}
#[derive(Debug, Clone, clap::Parser)]
#[command(author, version = tun2proxy::version_info!(), about = about_info(), long_about = None)]
pub struct UdpGwArgs {
/// UDP gateway listen address
#[arg(short, long, value_name = "IP:PORT", default_value = "127.0.0.1:7300")]
pub listen_addr: SocketAddr,
/// UDP mtu
#[arg(short = 'm', long, value_name = "udp mtu", default_value = "10240")]
pub udp_mtu: u16,
/// UDP timeout in seconds
#[arg(short = 't', long, value_name = "seconds", default_value = "3")]
pub udp_timeout: u64,
/// Daemonize for unix family or run as Windows service
#[cfg(unix)]
#[arg(short, long)]
pub daemonize: bool,
/// Verbosity level
#[arg(short, long, value_name = "level", value_enum, default_value = "info")]
pub verbosity: ArgVerbosity,
}
impl UdpGwArgs {
pub fn parse_args() -> Self {
<Self as ::clap::Parser>::parse()
}
}
async fn send_error_response(tx: Sender<Packet>, conn_id: u16) {
let error_packet = Packet::build_error_packet(conn_id);
if let Err(e) = tx.send(error_packet).await {
log::error!("send error response error {:?}", e);
}
}
async fn send_keepalive_response(tx: Sender<Packet>, conn_id: u16) {
let keepalive_packet = Packet::build_keepalive_packet(conn_id);
if let Err(e) = tx.send(keepalive_packet).await {
log::error!("send keepalive response error {:?}", e);
}
}
/// Send data field of packet from client to destination server and receive response,
/// then wrap response data to the packet's data field and send packet back to client.
async fn process_udp(udp_mtu: u16, udp_timeout: u64, tx: Sender<Packet>, mut packet: Packet) -> Result<()> {
let Some(dst_addr) = &packet.address else {
return Err(std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "udp request address is None").into());
};
use std::net::ToSocketAddrs;
let Some(dst_addr) = dst_addr.to_socket_addrs()?.next() else {
return Err(std::io::Error::new(std::io::ErrorKind::AddrNotAvailable, "to_socket_addrs").into());
};
let std_sock = match dst_addr {
std::net::SocketAddr::V6(_) => std::net::UdpSocket::bind("[::]:0")?,
std::net::SocketAddr::V4(_) => std::net::UdpSocket::bind("0.0.0.0:0")?,
};
std_sock.set_nonblocking(true)?;
#[cfg(unix)]
nix::sys::socket::setsockopt(&std_sock, nix::sys::socket::sockopt::ReuseAddr, &true)?;
let socket = UdpSocket::from_std(std_sock)?;
// 1. send udp data to destination server
socket.send_to(&packet.data, &dst_addr).await?;
// 2. receive response from destination server
let mut buf = vec![0u8; udp_mtu as usize];
let (len, _addr) = tokio::time::timeout(tokio::time::Duration::from_secs(udp_timeout), socket.recv_from(&mut buf))
.await
.map_err(std::io::Error::from)??;
packet.data = buf[..len].to_vec();
// 3. send response back to client
use std::io::{Error, ErrorKind::BrokenPipe};
tx.send(packet).await.map_err(|e| Error::new(BrokenPipe, e))?;
Ok(())
}
fn mask_ip(ip: &str) -> String {
if ip.len() <= 2 {
return ip.to_string();
}
let mut masked_ip = String::new();
for (i, c) in ip.chars().enumerate() {
if i == 0 || i == ip.len() - 1 || c == '.' || c == ':' {
masked_ip.push(c);
} else {
masked_ip.push('*');
}
}
masked_ip
}
fn mask_socket_addr(socket_addr: std::net::SocketAddr) -> String {
match socket_addr {
std::net::SocketAddr::V4(addr) => {
let masked_ip = mask_ip(&addr.ip().to_string());
format!("{}:{}", masked_ip, addr.port())
}
std::net::SocketAddr::V6(addr) => {
let masked_ip = mask_ip(&addr.ip().to_string());
format!("[{}]:{}", masked_ip, addr.port())
}
}
}
async fn process_client_udp_req(args: &UdpGwArgs, tx: Sender<Packet>, mut client: Client, mut reader: ReadHalf<'_>) -> std::io::Result<()> {
let udp_timeout = args.udp_timeout;
let udp_mtu = args.udp_mtu;
let masked_addr = mask_socket_addr(client.addr);
loop {
let masked_addr = masked_addr.clone();
// 1. read udpgw packet from client
let res = tokio::time::timeout(tokio::time::Duration::from_secs(2), Packet::retrieve_from_async_stream(&mut reader)).await;
let packet = match res {
Ok(Ok(packet)) => packet,
Ok(Err(e)) => {
log::debug!("client {} retrieve_from_async_stream \"{}\"", masked_addr, e);
break;
}
Err(e) => {
if client.last_activity.elapsed() >= CLIENT_DISCONNECT_TIMEOUT {
log::debug!("client {} last_activity elapsed \"{e}\"", masked_addr);
break;
}
continue;
}
};
client.last_activity = std::time::Instant::now();
let flags = packet.header.flags;
let conn_id = packet.header.conn_id;
if flags & UdpFlag::KEEPALIVE == UdpFlag::KEEPALIVE {
log::trace!("client {} send keepalive", masked_addr);
// 2. if keepalive packet, do nothing, send keepalive response to client
send_keepalive_response(tx.clone(), conn_id).await;
continue;
}
log::trace!("client {} received udp data {}", masked_addr, packet);
// 3. process client udpgw packet in a new task
let tx = tx.clone();
tokio::spawn(async move {
if let Err(e) = process_udp(udp_mtu, udp_timeout, tx.clone(), packet).await {
send_error_response(tx, conn_id).await;
log::debug!("client {} process udp function \"{e}\"", masked_addr);
}
});
}
Ok(())
}
async fn write_to_client(addr: SocketAddr, mut writer: WriteHalf<'_>, mut rx: Receiver<Packet>) -> std::io::Result<()> {
let masked_addr = mask_socket_addr(addr);
loop {
use std::io::{Error, ErrorKind::BrokenPipe};
let packet = rx.recv().await.ok_or(Error::new(BrokenPipe, "recv error"))?;
log::trace!("send response to client {} with {}", masked_addr, packet);
let data: Vec<u8> = packet.into();
let _r = writer.write(&data).await?;
}
}
async fn main_async(args: UdpGwArgs) -> Result<(), BoxError> {
log::info!("{} {} starting...", module_path!(), tun2proxy::version_info!());
log::info!("UDP Gateway Server running at {}", args.listen_addr);
let shutdown_token = tokio_util::sync::CancellationToken::new();
let main_loop_handle = tokio::spawn(run(args, shutdown_token.clone()));
let ctrlc_fired = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let ctrlc_fired_clone = ctrlc_fired.clone();
let ctrlc_handel = ctrlc2::AsyncCtrlC::new(move || {
log::info!("Ctrl-C received, exiting...");
ctrlc_fired_clone.store(true, std::sync::atomic::Ordering::SeqCst);
shutdown_token.cancel();
true
})?;
let _ = main_loop_handle.await?;
if ctrlc_fired.load(std::sync::atomic::Ordering::SeqCst) {
log::info!("Ctrl-C fired, waiting the handler to finish...");
ctrlc_handel.await?;
}
Ok(())
}
pub async fn run(args: UdpGwArgs, shutdown_token: tokio_util::sync::CancellationToken) -> crate::Result<()> {
let tcp_listener = tokio::net::TcpListener::bind(args.listen_addr).await?;
loop {
let (mut tcp_stream, addr) = tokio::select! {
v = tcp_listener.accept() => v?,
_ = shutdown_token.cancelled() => break,
};
let client = Client::new(addr);
let masked_addr = mask_socket_addr(addr);
log::info!("client {} connected", masked_addr);
let params = args.clone();
tokio::spawn(async move {
let (tx, rx) = tokio::sync::mpsc::channel::<Packet>(100);
let (tcp_read_stream, tcp_write_stream) = tcp_stream.split();
let res = tokio::select! {
v = process_client_udp_req(&params, tx, client, tcp_read_stream) => v,
v = write_to_client(addr, tcp_write_stream, rx) => v,
};
log::info!("client {} disconnected with {:?}", masked_addr, res);
});
}
Ok::<(), Error>(())
}
fn main() -> Result<(), BoxError> {
dotenvy::dotenv().ok();
let args = UdpGwArgs::parse_args();
let default = format!("{:?}", args.verbosity);
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(default)).init();
#[cfg(unix)]
if args.daemonize {
let stdout = std::fs::File::create("/tmp/udpgw.out")?;
let stderr = std::fs::File::create("/tmp/udpgw.err")?;
let daemonize = daemonize::Daemonize::new()
.working_directory("/tmp")
.umask(0o777)
.stdout(stdout)
.stderr(stderr)
.privileged_action(|| "Executed before drop privileges");
let _ = daemonize
.start()
.map_err(|e| format!("Failed to daemonize process, error:{:?}", e))?;
}
let rt = tokio::runtime::Builder::new_multi_thread().enable_all().build()?;
rt.block_on(main_async(args))
}

View file

@ -1,179 +0,0 @@
#![cfg(any(target_os = "windows", target_os = "macos", target_os = "linux"))]
use crate::{
args::{ArgDns, ArgProxy},
ArgVerbosity, Args,
};
use std::os::raw::{c_char, c_int};
use tproxy_config::{TproxyArgs, TUN_GATEWAY, TUN_IPV4, TUN_NETMASK};
use tun2::{AbstractDevice, DEFAULT_MTU as MTU};
static TUN_QUIT: std::sync::Mutex<Option<tokio_util::sync::CancellationToken>> = std::sync::Mutex::new(None);
/// # Safety
///
/// Run the tun2proxy component with some arguments.
/// Parameters:
/// - proxy_url: the proxy url, e.g. "socks5://127.0.0.1:1080"
/// - tun: the tun device name, e.g. "utun5"
/// - bypass: the bypass IP/CIDR, e.g. "123.45.67.0/24"
/// - dns_strategy: the dns strategy, see ArgDns enum
/// - root_privilege: whether to run with root privilege
/// - verbosity: the verbosity level, see ArgVerbosity enum
#[no_mangle]
pub unsafe extern "C" fn tun2proxy_with_name_run(
proxy_url: *const c_char,
tun: *const c_char,
bypass: *const c_char,
dns_strategy: ArgDns,
_root_privilege: bool,
verbosity: ArgVerbosity,
) -> c_int {
let shutdown_token = tokio_util::sync::CancellationToken::new();
{
if let Ok(mut lock) = TUN_QUIT.lock() {
if lock.is_some() {
return -1;
}
*lock = Some(shutdown_token.clone());
} else {
return -2;
}
}
log::set_max_level(verbosity.into());
if let Err(err) = log::set_boxed_logger(Box::<crate::dump_logger::DumpLogger>::default()) {
log::warn!("set logger error: {}", err);
}
let proxy_url = std::ffi::CStr::from_ptr(proxy_url).to_str().unwrap();
let proxy = ArgProxy::try_from(proxy_url).unwrap();
let tun = std::ffi::CStr::from_ptr(tun).to_str().unwrap().to_string();
let mut args = Args::default();
args.proxy(proxy).tun(tun).dns(dns_strategy).verbosity(verbosity);
#[cfg(target_os = "linux")]
args.setup(_root_privilege);
if let Ok(bypass) = std::ffi::CStr::from_ptr(bypass).to_str() {
args.bypass(bypass.parse().unwrap());
}
let main_loop = async move {
if let Err(err) = desktop_run_async(args, shutdown_token).await {
log::error!("main loop error: {}", err);
return Err(err);
}
Ok(())
};
let exit_code = match tokio::runtime::Builder::new_multi_thread().enable_all().build() {
Err(_e) => -3,
Ok(rt) => match rt.block_on(main_loop) {
Ok(_) => 0,
Err(_e) => -4,
},
};
exit_code
}
/// Run the tun2proxy component with some arguments.
pub async fn desktop_run_async(args: Args, shutdown_token: tokio_util::sync::CancellationToken) -> std::io::Result<()> {
let bypass_ips = args.bypass.clone();
let mut tun_config = tun2::Configuration::default();
tun_config.address(TUN_IPV4).netmask(TUN_NETMASK).mtu(MTU).up();
tun_config.destination(TUN_GATEWAY);
if let Some(tun_fd) = args.tun_fd {
tun_config.raw_fd(tun_fd);
} else if let Some(ref tun) = args.tun {
tun_config.tun_name(tun);
}
#[cfg(target_os = "linux")]
tun_config.platform_config(|cfg| {
#[allow(deprecated)]
cfg.packet_information(true);
cfg.ensure_root_privileges(args.setup);
});
#[cfg(target_os = "windows")]
tun_config.platform_config(|cfg| {
cfg.device_guid(12324323423423434234_u128);
});
#[allow(unused_variables)]
let mut tproxy_args = TproxyArgs::new()
.tun_dns(args.dns_addr)
.proxy_addr(args.proxy.addr)
.bypass_ips(&bypass_ips)
.ipv6_default_route(args.ipv6_enabled);
#[allow(unused_mut, unused_assignments, unused_variables)]
let mut setup = true;
let device = tun2::create_as_async(&tun_config)?;
if let Ok(tun_name) = device.as_ref().tun_name() {
tproxy_args = tproxy_args.tun_name(&tun_name);
}
// TproxyState implements the Drop trait to restore network configuration,
// so we we need to assign it to a variable, even if it is not used.
let mut _restore: Option<tproxy_config::TproxyState> = None;
#[cfg(target_os = "linux")]
{
setup = args.setup;
}
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
if setup {
_restore = Some(tproxy_config::tproxy_setup(&tproxy_args)?);
}
#[cfg(target_os = "linux")]
{
let mut admin_command_args = args.admin_command.iter();
if let Some(command) = admin_command_args.next() {
let child = tokio::process::Command::new(command)
.args(admin_command_args)
.kill_on_drop(true)
.spawn();
match child {
Err(err) => {
log::warn!("Failed to start admin process: {err}");
}
Ok(mut child) => {
tokio::spawn(async move {
if let Err(err) = child.wait().await {
log::warn!("Admin process terminated: {err}");
}
});
}
};
}
}
let join_handle = tokio::spawn(crate::run(device, MTU, args, shutdown_token));
join_handle.await.map_err(std::io::Error::from)??;
Ok::<(), std::io::Error>(())
}
/// # Safety
///
/// Shutdown the tun2proxy component.
#[no_mangle]
pub unsafe extern "C" fn tun2proxy_with_name_stop() -> c_int {
if let Ok(mut lock) = TUN_QUIT.lock() {
if let Some(shutdown_token) = lock.take() {
shutdown_token.cancel();
return 0;
}
}
-1
}

View file

@ -1,22 +1,16 @@
use std::{net::IpAddr, str::FromStr}; use hickory_proto::{
use trust_dns_proto::op::MessageType; op::{Message, MessageType, ResponseCode},
use trust_dns_proto::{ rr::{
op::{Message, ResponseCode}, Name, RData, Record,
rr::{record_type::RecordType, Name, RData, Record}, rdata::{A, AAAA},
},
}; };
use std::{net::IpAddr, str::FromStr};
pub fn build_dns_response(mut request: Message, domain: &str, ip: IpAddr, ttl: u32) -> Result<Message, String> { pub fn build_dns_response(mut request: Message, domain: &str, ip: IpAddr, ttl: u32) -> Result<Message, String> {
let record = match ip { let record = match ip {
IpAddr::V4(ip) => { IpAddr::V4(ip) => Record::from_rdata(Name::from_str(domain)?, ttl, RData::A(A(ip))),
let mut record = Record::with(Name::from_str(domain)?, RecordType::A, ttl); IpAddr::V6(ip) => Record::from_rdata(Name::from_str(domain)?, ttl, RData::AAAA(AAAA(ip))),
record.set_data(Some(RData::A(ip.into())));
record
}
IpAddr::V6(ip) => {
let mut record = Record::with(Name::from_str(domain)?, RecordType::AAAA, ttl);
record.set_data(Some(RData::AAAA(ip.into())));
record
}
}; };
// We must indicate that this message is a response. Otherwise, implementations may not // We must indicate that this message is a response. Otherwise, implementations may not
@ -28,9 +22,7 @@ pub fn build_dns_response(mut request: Message, domain: &str, ip: IpAddr, ttl: u
} }
pub fn remove_ipv6_entries(message: &mut Message) { pub fn remove_ipv6_entries(message: &mut Message) {
message message.answers_mut().retain(|answer| !matches!(answer.data(), RData::AAAA(_)));
.answers_mut()
.retain(|answer| !matches!(answer.data(), Some(RData::AAAA(_))));
} }
pub fn extract_ipaddr_from_dns_message(message: &Message) -> Result<IpAddr, String> { pub fn extract_ipaddr_from_dns_message(message: &Message) -> Result<IpAddr, String> {
@ -39,7 +31,7 @@ pub fn extract_ipaddr_from_dns_message(message: &Message) -> Result<IpAddr, Stri
} }
let mut cname = None; let mut cname = None;
for answer in message.answers() { for answer in message.answers() {
match answer.data().ok_or("DNS response not contains answer data")? { match answer.data() {
RData::A(addr) => { RData::A(addr) => {
return Ok(IpAddr::V4((*addr).into())); return Ok(IpAddr::V4((*addr).into()));
} }

View file

@ -9,7 +9,7 @@ pub(crate) static DUMP_CALLBACK: Mutex<Option<DumpCallback>> = Mutex::new(None);
/// # Safety /// # Safety
/// ///
/// set dump log info callback. /// set dump log info callback.
#[no_mangle] #[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_set_log_callback( pub unsafe extern "C" fn tun2proxy_set_log_callback(
callback: Option<unsafe extern "C" fn(ArgVerbosity, *const c_char, *mut c_void)>, callback: Option<unsafe extern "C" fn(ArgVerbosity, *const c_char, *mut c_void)>,
ctx: *mut c_void, ctx: *mut c_void,
@ -23,7 +23,7 @@ pub struct DumpCallback(Option<unsafe extern "C" fn(ArgVerbosity, *const c_char,
impl DumpCallback { impl DumpCallback {
unsafe fn call(self, dump_level: ArgVerbosity, info: *const c_char) { unsafe fn call(self, dump_level: ArgVerbosity, info: *const c_char) {
if let Some(cb) = self.0 { if let Some(cb) = self.0 {
cb(dump_level, info, self.1); unsafe { cb(dump_level, info, self.1) };
} }
} }
} }

View file

@ -6,7 +6,7 @@ pub enum Error {
#[error(transparent)] #[error(transparent)]
Io(#[from] std::io::Error), Io(#[from] std::io::Error),
#[cfg(target_os = "linux")] #[cfg(unix)]
#[error("nix::errno::Errno {0:?}")] #[error("nix::errno::Errno {0:?}")]
NixErrno(#[from] nix::errno::Errno), NixErrno(#[from] nix::errno::Errno),
@ -23,10 +23,10 @@ pub enum Error {
TryFromSlice(#[from] std::array::TryFromSliceError), TryFromSlice(#[from] std::array::TryFromSliceError),
#[error("IpStackError {0:?}")] #[error("IpStackError {0:?}")]
IpStack(#[from] ipstack::IpStackError), IpStack(#[from] Box<ipstack::IpStackError>),
#[error("DnsProtoError {0:?}")] #[error("DnsProtoError {0:?}")]
DnsProto(#[from] trust_dns_proto::error::ProtoError), DnsProto(#[from] hickory_proto::ProtoError),
#[error("httparse::Error {0:?}")] #[error("httparse::Error {0:?}")]
Httparse(#[from] httparse::Error), Httparse(#[from] httparse::Error),
@ -43,10 +43,12 @@ pub enum Error {
#[error("std::num::ParseIntError {0:?}")] #[error("std::num::ParseIntError {0:?}")]
IntParseError(#[from] std::num::ParseIntError), IntParseError(#[from] std::num::ParseIntError),
}
#[cfg(target_os = "linux")] impl From<ipstack::IpStackError> for Error {
#[error("bincode::Error {0:?}")] fn from(err: ipstack::IpStackError) -> Self {
BincodeError(#[from] bincode::Error), Self::IpStack(Box::new(err))
}
} }
impl From<&str> for Error { impl From<&str> for Error {
@ -71,7 +73,7 @@ impl From<Error> for std::io::Error {
fn from(err: Error) -> Self { fn from(err: Error) -> Self {
match err { match err {
Error::Io(err) => err, Error::Io(err) => err,
_ => std::io::Error::new(std::io::ErrorKind::Other, err), _ => std::io::Error::other(err),
} }
} }
} }

261
src/general_api.rs Normal file
View file

@ -0,0 +1,261 @@
use crate::{
ArgVerbosity, Args,
args::{ArgDns, ArgProxy},
};
use std::os::raw::{c_char, c_int, c_ushort};
static TUN_QUIT: std::sync::Mutex<Option<tokio_util::sync::CancellationToken>> = std::sync::Mutex::new(None);
/// # Safety
///
/// Run the tun2proxy component with some arguments.
/// Parameters:
/// - proxy_url: the proxy url, e.g. "socks5://127.0.0.1:1080"
/// - tun: the tun device name, e.g. "utun5"
/// - bypass: the bypass IP/CIDR, e.g. "123.45.67.0/24"
/// - dns_strategy: the dns strategy, see ArgDns enum
/// - root_privilege: whether to run with root privilege
/// - verbosity: the verbosity level, see ArgVerbosity enum
#[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_with_name_run(
proxy_url: *const c_char,
tun: *const c_char,
bypass: *const c_char,
dns_strategy: ArgDns,
_root_privilege: bool,
verbosity: ArgVerbosity,
) -> c_int {
let proxy_url = unsafe { std::ffi::CStr::from_ptr(proxy_url) }.to_str().unwrap();
let proxy = ArgProxy::try_from(proxy_url).unwrap();
let tun = unsafe { std::ffi::CStr::from_ptr(tun) }.to_str().unwrap().to_string();
let mut args = Args::default();
if let Ok(bypass) = unsafe { std::ffi::CStr::from_ptr(bypass) }.to_str() {
args.bypass(bypass.parse().unwrap());
}
args.proxy(proxy).tun(tun).dns(dns_strategy).verbosity(verbosity);
#[cfg(target_os = "linux")]
args.setup(_root_privilege);
general_run_for_api(args, tun::DEFAULT_MTU, false)
}
/// # Safety
///
/// Run the tun2proxy component with some arguments.
/// Parameters:
/// - proxy_url: the proxy url, e.g. "socks5://127.0.0.1:1080"
/// - tun_fd: the tun file descriptor, it will be owned by tun2proxy
/// - close_fd_on_drop: whether close the tun_fd on drop
/// - packet_information: indicates whether exists packet information in packet from TUN device
/// - tun_mtu: the tun mtu
/// - dns_strategy: the dns strategy, see ArgDns enum
/// - verbosity: the verbosity level, see ArgVerbosity enum
#[cfg(unix)]
#[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_with_fd_run(
proxy_url: *const c_char,
tun_fd: c_int,
close_fd_on_drop: bool,
packet_information: bool,
tun_mtu: c_ushort,
dns_strategy: ArgDns,
verbosity: ArgVerbosity,
) -> c_int {
let proxy_url = unsafe { std::ffi::CStr::from_ptr(proxy_url) }.to_str().unwrap();
let proxy = ArgProxy::try_from(proxy_url).unwrap();
let mut args = Args::default();
args.proxy(proxy)
.tun_fd(Some(tun_fd))
.close_fd_on_drop(close_fd_on_drop)
.dns(dns_strategy)
.verbosity(verbosity);
general_run_for_api(args, tun_mtu, packet_information)
}
/// # Safety
/// Run the tun2proxy component with command line arguments
/// Parameters:
/// - cli_args: The command line arguments,
/// e.g. `tun2proxy-bin --setup --proxy socks5://127.0.0.1:1080 --bypass 98.76.54.0/24 --dns over-tcp --verbosity trace`
/// - tun_mtu: The MTU of the TUN device, e.g. 1500
/// - packet_information: Whether exists packet information in packet from TUN device
#[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_run_with_cli_args(cli_args: *const c_char, tun_mtu: c_ushort, packet_information: bool) -> c_int {
let Ok(cli_args) = unsafe { std::ffi::CStr::from_ptr(cli_args) }.to_str() else {
log::error!("Failed to convert CLI arguments to string");
return -5;
};
let Some(args) = shlex::split(cli_args) else {
log::error!("Failed to split CLI arguments");
return -6;
};
let args = <Args as ::clap::Parser>::parse_from(args);
general_run_for_api(args, tun_mtu, packet_information)
}
pub fn general_run_for_api(args: Args, tun_mtu: u16, packet_information: bool) -> c_int {
log::set_max_level(args.verbosity.into());
if let Err(err) = log::set_boxed_logger(Box::<crate::dump_logger::DumpLogger>::default()) {
log::debug!("set logger error: {}", err);
}
let shutdown_token = tokio_util::sync::CancellationToken::new();
if let Ok(mut lock) = TUN_QUIT.lock() {
if lock.is_some() {
log::error!("tun2proxy already started");
return -1;
}
*lock = Some(shutdown_token.clone());
} else {
log::error!("failed to lock tun2proxy quit token");
return -2;
}
let Ok(rt) = tokio::runtime::Builder::new_multi_thread().enable_all().build() else {
log::error!("failed to create tokio runtime with");
return -3;
};
match rt.block_on(async move {
let ret = general_run_async(args.clone(), tun_mtu, packet_information, shutdown_token).await;
match &ret {
Ok(sessions) => {
if args.exit_on_fatal_error && *sessions >= args.max_sessions {
log::error!("Forced exit due to max sessions reached ({sessions}/{})", args.max_sessions);
std::process::exit(-1);
}
log::debug!("tun2proxy exited normally, current sessions: {sessions}");
}
Err(err) => log::error!("main loop error: {err}"),
}
ret
}) {
Ok(_) => 0,
Err(e) => {
log::error!("failed to run tun2proxy with error: {:?}", e);
-4
}
}
}
/// Run the tun2proxy component with some arguments.
pub async fn general_run_async(
args: Args,
tun_mtu: u16,
_packet_information: bool,
shutdown_token: tokio_util::sync::CancellationToken,
) -> std::io::Result<usize> {
let mut tun_config = tun::Configuration::default();
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
{
use tproxy_config::{TUN_GATEWAY, TUN_IPV4, TUN_NETMASK};
tun_config.address(TUN_IPV4).netmask(TUN_NETMASK).mtu(tun_mtu).up();
tun_config.destination(TUN_GATEWAY);
}
#[cfg(unix)]
if let Some(fd) = args.tun_fd {
tun_config.raw_fd(fd);
if let Some(v) = args.close_fd_on_drop {
tun_config.close_fd_on_drop(v);
};
} else if let Some(ref tun) = args.tun {
tun_config.tun_name(tun);
}
#[cfg(windows)]
if let Some(ref tun) = args.tun {
tun_config.tun_name(tun);
}
#[cfg(target_os = "linux")]
tun_config.platform_config(|cfg| {
#[allow(deprecated)]
cfg.packet_information(true);
cfg.ensure_root_privileges(args.setup);
});
#[cfg(target_os = "windows")]
tun_config.platform_config(|cfg| {
cfg.device_guid(12324323423423434234_u128);
});
#[cfg(any(target_os = "ios", target_os = "macos"))]
tun_config.platform_config(|cfg| {
cfg.packet_information(_packet_information);
});
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
#[allow(unused_variables)]
let mut tproxy_args = tproxy_config::TproxyArgs::new()
.tun_dns(args.dns_addr)
.proxy_addr(args.proxy.addr)
.bypass_ips(&args.bypass)
.ipv6_default_route(args.ipv6_enabled);
let device = tun::create_as_async(&tun_config)?;
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
if let Ok(tun_name) = tun::AbstractDevice::tun_name(&*device) {
// Above line is equivalent to: `use tun::AbstractDevice; if let Ok(tun_name) = device.tun_name() {`
tproxy_args = tproxy_args.tun_name(&tun_name);
}
// TproxyState implements the Drop trait to restore network configuration,
// so we need to assign it to a variable, even if it is not used.
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
let mut _restore: Option<tproxy_config::TproxyState> = None;
#[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))]
if args.setup {
_restore = Some(tproxy_config::tproxy_setup(&tproxy_args)?);
}
#[cfg(target_os = "linux")]
{
let mut admin_command_args = args.admin_command.iter();
if let Some(command) = admin_command_args.next() {
let child = tokio::process::Command::new(command)
.args(admin_command_args)
.kill_on_drop(true)
.spawn();
match child {
Err(err) => {
log::warn!("Failed to start admin process: {err}");
}
Ok(mut child) => {
tokio::spawn(async move {
if let Err(err) = child.wait().await {
log::warn!("Admin process terminated: {err}");
}
});
}
};
}
}
let join_handle = tokio::spawn(crate::run(device, tun_mtu, args, shutdown_token));
Ok(join_handle.await.map_err(std::io::Error::from)??)
}
/// # Safety
///
/// Shutdown the tun2proxy component.
#[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_stop() -> c_int {
tun2proxy_stop_internal()
}
pub(crate) fn tun2proxy_stop_internal() -> c_int {
if let Ok(mut lock) = TUN_QUIT.lock() {
if let Some(shutdown_token) = lock.take() {
shutdown_token.cancel();
return 0;
}
}
-1
}

View file

@ -4,11 +4,10 @@ use crate::{
proxy_handler::{ProxyHandler, ProxyHandlerManager}, proxy_handler::{ProxyHandler, ProxyHandlerManager},
session_info::{IpProtocol, SessionInfo}, session_info::{IpProtocol, SessionInfo},
}; };
use base64::Engine;
use httparse::Response; use httparse::Response;
use socks5_impl::protocol::UserKey; use socks5_impl::protocol::UserKey;
use std::{ use std::{
collections::{hash_map::RandomState, HashMap, VecDeque}, collections::{HashMap, VecDeque, hash_map::RandomState},
iter::FromIterator, iter::FromIterator,
net::SocketAddr, net::SocketAddr,
str, str,
@ -141,8 +140,7 @@ impl HttpConnection {
.extend(format!("{}: {}\r\n", PROXY_AUTHORIZATION, response.to_header_string()).as_bytes()); .extend(format!("{}: {}\r\n", PROXY_AUTHORIZATION, response.to_header_string()).as_bytes());
} }
AuthenticationScheme::Basic => { AuthenticationScheme::Basic => {
let cred = format!("{}:{}", credentials.username, credentials.password); let auth_b64 = base64easy::encode(credentials.to_string(), base64easy::EngineKind::Standard);
let auth_b64 = base64::engine::general_purpose::STANDARD.encode(cred);
self.server_outbuf self.server_outbuf
.extend(format!("{}: Basic {}\r\n", PROXY_AUTHORIZATION, auth_b64).as_bytes()); .extend(format!("{}: Basic {}\r\n", PROXY_AUTHORIZATION, auth_b64).as_bytes());
} }
@ -252,7 +250,7 @@ impl HttpConnection {
} }
// The HTTP/1.1 expected to be keep alive waiting for the next frame so, we must // The HTTP/1.1 expected to be keep alive waiting for the next frame so, we must
// compute the lenght of the response in order to detect the next frame (response) // compute the length of the response in order to detect the next frame (response)
// [RFC-9112](https://datatracker.ietf.org/doc/html/rfc9112#body.content-length) // [RFC-9112](https://datatracker.ietf.org/doc/html/rfc9112#body.content-length)
// Transfer-Encoding isn't supported yet // Transfer-Encoding isn't supported yet

View file

@ -1,3 +1,5 @@
#[cfg(feature = "udpgw")]
use crate::udpgw::UdpGwClient;
use crate::{ use crate::{
directions::{IncomingDataEvent, IncomingDirection, OutgoingDirection}, directions::{IncomingDataEvent, IncomingDirection, OutgoingDirection},
http::HttpManager, http::HttpManager,
@ -5,10 +7,12 @@ use crate::{
session_info::{IpProtocol, SessionInfo}, session_info::{IpProtocol, SessionInfo},
virtual_dns::VirtualDns, virtual_dns::VirtualDns,
}; };
use ipstack::stream::{IpStackStream, IpStackTcpStream, IpStackUdpStream}; use ipstack::{IpStackStream, IpStackTcpStream, IpStackUdpStream};
use proxy_handler::{ProxyHandler, ProxyHandlerManager}; use proxy_handler::{ProxyHandler, ProxyHandlerManager};
use socks::SocksProxyManager; use socks::SocksProxyManager;
pub use socks5_impl::protocol::UserKey; pub use socks5_impl::protocol::UserKey;
#[cfg(feature = "udpgw")]
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6};
use std::{ use std::{
collections::VecDeque, collections::VecDeque,
io::ErrorKind, io::ErrorKind,
@ -18,55 +22,54 @@ use std::{
use tokio::{ use tokio::{
io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}, io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt},
net::{TcpSocket, TcpStream, UdpSocket}, net::{TcpSocket, TcpStream, UdpSocket},
sync::{mpsc::Receiver, Mutex}, sync::{Mutex, mpsc::Receiver},
}; };
pub use tokio_util::sync::CancellationToken; pub use tokio_util::sync::CancellationToken;
use tproxy_config::is_private_ip; use tproxy_config::is_private_ip;
use udp_stream::UdpStream; use udp_stream::UdpStream;
#[cfg(feature = "udpgw")]
use udpgw::{UDPGW_KEEPALIVE_TIME, UDPGW_MAX_CONNECTIONS, UdpGwClientStream, UdpGwResponse};
pub use { pub use {
args::{ArgDns, ArgProxy, ArgVerbosity, Args, ProxyType}, args::{ArgDns, ArgProxy, ArgVerbosity, Args, ProxyType},
error::{BoxError, Error, Result}, error::{BoxError, Error, Result},
traffic_status::{tun2proxy_set_traffic_status_callback, TrafficStatus}, traffic_status::{TrafficStatus, tun2proxy_set_traffic_status_callback},
}; };
#[cfg(any(target_os = "windows", target_os = "macos", target_os = "linux"))] #[cfg(feature = "mimalloc")]
pub use desktop_api::desktop_run_async; #[global_allocator]
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
#[cfg(any(target_os = "ios", target_os = "android"))] pub use general_api::general_run_async;
pub use mobile_api::{desktop_run_async, mobile_run, mobile_stop};
#[cfg(target_os = "macos")]
pub use mobile_api::{mobile_run, mobile_stop};
mod android; mod android;
mod apple;
mod args; mod args;
mod desktop_api;
mod directions; mod directions;
mod dns; mod dns;
mod dump_logger; mod dump_logger;
mod error; mod error;
mod general_api;
mod http; mod http;
mod mobile_api;
mod no_proxy; mod no_proxy;
mod proxy_handler; mod proxy_handler;
mod session_info; mod session_info;
pub mod socket_transfer; pub mod socket_transfer;
mod socks; mod socks;
mod traffic_status; mod traffic_status;
#[cfg(feature = "udpgw")]
pub mod udpgw;
mod virtual_dns; mod virtual_dns;
#[doc(hidden)]
pub mod win_svc;
const DNS_PORT: u16 = 53; const DNS_PORT: u16 = 53;
const MAX_SESSIONS: u64 = 200;
static TASK_COUNT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
use std::sync::atomic::Ordering::Relaxed;
#[allow(unused)] #[allow(unused)]
#[derive(Hash, Copy, Clone, Eq, PartialEq, Debug)] #[derive(Hash, Copy, Clone, Eq, PartialEq, Debug)]
#[cfg_attr(target_os = "linux", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(
target_os = "linux",
derive(bincode::Encode, bincode::Decode, serde::Serialize, serde::Deserialize)
)]
pub enum SocketProtocol { pub enum SocketProtocol {
Tcp, Tcp,
Udp, Udp,
@ -74,7 +77,10 @@ pub enum SocketProtocol {
#[allow(unused)] #[allow(unused)]
#[derive(Hash, Copy, Clone, Eq, PartialEq, Debug)] #[derive(Hash, Copy, Clone, Eq, PartialEq, Debug)]
#[cfg_attr(target_os = "linux", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(
target_os = "linux",
derive(bincode::Encode, bincode::Decode, serde::Serialize, serde::Deserialize)
)]
pub enum SocketDomain { pub enum SocketDomain {
IpV4, IpV4,
IpV6, IpV6,
@ -145,11 +151,13 @@ async fn create_udp_stream(socket_queue: &Option<Arc<SocketQueue>>, peer: Socket
/// * `mtu` - The MTU of the network device /// * `mtu` - The MTU of the network device
/// * `args` - The arguments to use /// * `args` - The arguments to use
/// * `shutdown_token` - The token to exit the server /// * `shutdown_token` - The token to exit the server
pub async fn run<D>(device: D, mtu: u16, args: Args, shutdown_token: CancellationToken) -> crate::Result<()> /// # Returns
/// * The number of sessions while exiting
pub async fn run<D>(device: D, mtu: u16, args: Args, shutdown_token: CancellationToken) -> crate::Result<usize>
where where
D: AsyncRead + AsyncWrite + Unpin + Send + 'static, D: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{ {
log::info!("{} {} starting...", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")); log::info!("{} {} starting...", env!("CARGO_PKG_NAME"), version_info!());
log::info!("Proxy {} server: {}", args.proxy.proxy_type, args.proxy.addr); log::info!("Proxy {} server: {}", args.proxy.proxy_type, args.proxy.addr);
let server_addr = args.proxy.addr; let server_addr = args.proxy.addr;
@ -157,7 +165,7 @@ where
let dns_addr = args.dns_addr; let dns_addr = args.dns_addr;
let ipv6_enabled = args.ipv6_enabled; let ipv6_enabled = args.ipv6_enabled;
let virtual_dns = if args.dns == ArgDns::Virtual { let virtual_dns = if args.dns == ArgDns::Virtual {
Some(Arc::new(Mutex::new(VirtualDns::new()))) Some(Arc::new(Mutex::new(VirtualDns::new(args.virtual_dns_pool))))
} else { } else {
None None
}; };
@ -213,11 +221,11 @@ where
let socket_queue = None; let socket_queue = None;
use socks5_impl::protocol::Version::{V4, V5}; use socks5_impl::protocol::Version::{V4, V5};
let mgr = match args.proxy.proxy_type { let mgr: Arc<dyn ProxyHandlerManager> = match args.proxy.proxy_type {
ProxyType::Socks5 => Arc::new(SocksProxyManager::new(server_addr, V5, key)) as Arc<dyn ProxyHandlerManager>, ProxyType::Socks5 => Arc::new(SocksProxyManager::new(server_addr, V5, key)),
ProxyType::Socks4 => Arc::new(SocksProxyManager::new(server_addr, V4, key)) as Arc<dyn ProxyHandlerManager>, ProxyType::Socks4 => Arc::new(SocksProxyManager::new(server_addr, V4, key)),
ProxyType::Http => Arc::new(HttpManager::new(server_addr, key)) as Arc<dyn ProxyHandlerManager>, ProxyType::Http => Arc::new(HttpManager::new(server_addr, key)),
ProxyType::None => Arc::new(NoProxyManager::new()) as Arc<dyn ProxyHandlerManager>, ProxyType::None => Arc::new(NoProxyManager::new()),
}; };
let mut ipstack_config = ipstack::IpStackConfig::default(); let mut ipstack_config = ipstack::IpStackConfig::default();
@ -227,7 +235,29 @@ where
let mut ip_stack = ipstack::IpStack::new(ipstack_config, device); let mut ip_stack = ipstack::IpStack::new(ipstack_config, device);
#[cfg(feature = "udpgw")]
let udpgw_client = args.udpgw_server.map(|addr| {
log::info!("UDP Gateway enabled, server: {}", addr);
use std::time::Duration;
let client = Arc::new(UdpGwClient::new(
mtu,
args.udpgw_connections.unwrap_or(UDPGW_MAX_CONNECTIONS),
args.udpgw_keepalive.map(Duration::from_secs).unwrap_or(UDPGW_KEEPALIVE_TIME),
args.udp_timeout,
addr,
));
let client_keepalive = client.clone();
tokio::spawn(async move {
let _ = client_keepalive.heartbeat_task().await;
});
client
});
let task_count = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0));
use std::sync::atomic::Ordering::Relaxed;
loop { loop {
let task_count = task_count.clone();
let virtual_dns = virtual_dns.clone(); let virtual_dns = virtual_dns.clone();
let ip_stack_stream = tokio::select! { let ip_stack_stream = tokio::select! {
_ = shutdown_token.cancelled() => { _ = shutdown_token.cancelled() => {
@ -238,13 +268,18 @@ where
ip_stack_stream? ip_stack_stream?
} }
}; };
let max_sessions = args.max_sessions;
match ip_stack_stream { match ip_stack_stream {
IpStackStream::Tcp(tcp) => { IpStackStream::Tcp(tcp) => {
if TASK_COUNT.load(Relaxed) > MAX_SESSIONS { if task_count.load(Relaxed) >= max_sessions {
log::warn!("Too many sessions that over {MAX_SESSIONS}, dropping new session"); if args.exit_on_fatal_error {
log::info!("Too many sessions that over {max_sessions}, exiting...");
break;
}
log::warn!("Too many sessions that over {max_sessions}, dropping new session");
continue; continue;
} }
log::trace!("Session count {}", TASK_COUNT.fetch_add(1, Relaxed) + 1); log::trace!("Session count {}", task_count.fetch_add(1, Relaxed).saturating_add(1));
let info = SessionInfo::new(tcp.local_addr(), tcp.peer_addr(), IpProtocol::Tcp); let info = SessionInfo::new(tcp.local_addr(), tcp.peer_addr(), IpProtocol::Tcp);
let domain_name = if let Some(virtual_dns) = &virtual_dns { let domain_name = if let Some(virtual_dns) = &virtual_dns {
let mut virtual_dns = virtual_dns.lock().await; let mut virtual_dns = virtual_dns.lock().await;
@ -259,19 +294,23 @@ where
if let Err(err) = handle_tcp_session(tcp, proxy_handler, socket_queue).await { if let Err(err) = handle_tcp_session(tcp, proxy_handler, socket_queue).await {
log::error!("{} error \"{}\"", info, err); log::error!("{} error \"{}\"", info, err);
} }
log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1); log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1));
}); });
} }
IpStackStream::Udp(udp) => { IpStackStream::Udp(udp) => {
if TASK_COUNT.load(Relaxed) > MAX_SESSIONS { if task_count.load(Relaxed) >= max_sessions {
log::warn!("Too many sessions that over {MAX_SESSIONS}, dropping new session"); if args.exit_on_fatal_error {
log::info!("Too many sessions that over {max_sessions}, exiting...");
break;
}
log::warn!("Too many sessions that over {max_sessions}, dropping new session");
continue; continue;
} }
log::trace!("Session count {}", TASK_COUNT.fetch_add(1, Relaxed) + 1); log::trace!("Session count {}", task_count.fetch_add(1, Relaxed).saturating_add(1));
let mut info = SessionInfo::new(udp.local_addr(), udp.peer_addr(), IpProtocol::Udp); let mut info = SessionInfo::new(udp.local_addr(), udp.peer_addr(), IpProtocol::Udp);
if info.dst.port() == DNS_PORT { if info.dst.port() == DNS_PORT {
if is_private_ip(info.dst.ip()) { if is_private_ip(info.dst.ip()) {
info.dst.set_ip(dns_addr); info.dst.set_ip(dns_addr); // !!! Here we change the destination address to remote DNS server!!!
} }
if args.dns == ArgDns::OverTcp { if args.dns == ArgDns::OverTcp {
info.protocol = IpProtocol::Tcp; info.protocol = IpProtocol::Tcp;
@ -281,7 +320,7 @@ where
if let Err(err) = handle_dns_over_tcp_session(udp, proxy_handler, socket_queue, ipv6_enabled).await { if let Err(err) = handle_dns_over_tcp_session(udp, proxy_handler, socket_queue, ipv6_enabled).await {
log::error!("{} error \"{}\"", info, err); log::error!("{} error \"{}\"", info, err);
} }
log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1); log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1));
}); });
continue; continue;
} }
@ -292,7 +331,7 @@ where
log::error!("{} error \"{}\"", info, err); log::error!("{} error \"{}\"", info, err);
} }
} }
log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1); log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1));
}); });
continue; continue;
} }
@ -305,6 +344,28 @@ where
} else { } else {
None None
}; };
#[cfg(feature = "udpgw")]
if let Some(udpgw) = udpgw_client.clone() {
let tcp_src = match udp.peer_addr() {
SocketAddr::V4(_) => SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0)),
SocketAddr::V6(_) => SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::UNSPECIFIED, 0, 0, 0)),
};
let tcpinfo = SessionInfo::new(tcp_src, udpgw.get_udpgw_server_addr(), IpProtocol::Tcp);
let proxy_handler = mgr.new_proxy_handler(tcpinfo, None, false).await?;
let queue = socket_queue.clone();
tokio::spawn(async move {
let dst = info.dst; // real UDP destination address
let dst_addr = match domain_name {
Some(ref d) => socks5_impl::protocol::Address::from((d.clone(), dst.port())),
None => dst.into(),
};
if let Err(e) = handle_udp_gateway_session(udp, udpgw, &dst_addr, proxy_handler, queue, ipv6_enabled).await {
log::info!("Ending {} with \"{}\"", info, e);
}
log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1));
});
continue;
}
match mgr.new_proxy_handler(info, domain_name, true).await { match mgr.new_proxy_handler(info, domain_name, true).await {
Ok(proxy_handler) => { Ok(proxy_handler) => {
let socket_queue = socket_queue.clone(); let socket_queue = socket_queue.clone();
@ -313,7 +374,7 @@ where
if let Err(err) = handle_udp_associate_session(udp, ty, proxy_handler, socket_queue, ipv6_enabled).await { if let Err(err) = handle_udp_associate_session(udp, ty, proxy_handler, socket_queue, ipv6_enabled).await {
log::info!("Ending {} with \"{}\"", info, err); log::info!("Ending {} with \"{}\"", info, err);
} }
log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1); log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1));
}); });
} }
Err(e) => { Err(e) => {
@ -332,7 +393,7 @@ where
} }
} }
} }
Ok(()) Ok(task_count.load(Relaxed))
} }
async fn handle_virtual_dns_session(mut udp: IpStackUdpStream, dns: Arc<Mutex<VirtualDns>>) -> crate::Result<()> { async fn handle_virtual_dns_session(mut udp: IpStackUdpStream, dns: Arc<Mutex<VirtualDns>>) -> crate::Result<()> {
@ -423,6 +484,128 @@ async fn handle_tcp_session(
Ok(()) Ok(())
} }
#[cfg(feature = "udpgw")]
async fn handle_udp_gateway_session(
mut udp_stack: IpStackUdpStream,
udpgw_client: Arc<UdpGwClient>,
udp_dst: &socks5_impl::protocol::Address,
proxy_handler: Arc<Mutex<dyn ProxyHandler>>,
socket_queue: Option<Arc<SocketQueue>>,
ipv6_enabled: bool,
) -> crate::Result<()> {
let proxy_server_addr = { proxy_handler.lock().await.get_server_addr() };
let udp_mtu = udpgw_client.get_udp_mtu();
let udp_timeout = udpgw_client.get_udp_timeout();
let mut stream = loop {
match udpgw_client.pop_server_connection_from_queue().await {
Some(stream) => {
if stream.is_closed() {
continue;
} else {
break stream;
}
}
None => {
let mut tcp_server_stream = create_tcp_stream(&socket_queue, proxy_server_addr).await?;
if let Err(e) = handle_proxy_session(&mut tcp_server_stream, proxy_handler).await {
return Err(format!("udpgw connection error: {}", e).into());
}
break UdpGwClientStream::new(tcp_server_stream);
}
}
};
let tcp_local_addr = stream.local_addr();
let sn = stream.serial_number();
log::info!("[UdpGw] Beginning stream {} {} -> {}", sn, &tcp_local_addr, udp_dst);
let Some(mut reader) = stream.get_reader() else {
return Err("get reader failed".into());
};
let Some(mut writer) = stream.get_writer() else {
return Err("get writer failed".into());
};
let mut tmp_buf = vec![0; udp_mtu.into()];
loop {
tokio::select! {
len = udp_stack.read(&mut tmp_buf) => {
let read_len = match len {
Ok(0) => {
log::info!("[UdpGw] Ending stream {} {} <> {}", sn, &tcp_local_addr, udp_dst);
break;
}
Ok(n) => n,
Err(e) => {
log::info!("[UdpGw] Ending stream {} {} <> {} with udp stack \"{}\"", sn, &tcp_local_addr, udp_dst, e);
break;
}
};
crate::traffic_status::traffic_status_update(read_len, 0)?;
let sn = stream.serial_number();
if let Err(e) = UdpGwClient::send_udpgw_packet(ipv6_enabled, &tmp_buf[0..read_len], udp_dst, sn, &mut writer).await {
log::info!("[UdpGw] Ending stream {} {} <> {} with send_udpgw_packet {}", sn, &tcp_local_addr, udp_dst, e);
break;
}
log::debug!("[UdpGw] stream {} {} -> {} send len {}", sn, &tcp_local_addr, udp_dst, read_len);
stream.update_activity();
}
ret = UdpGwClient::recv_udpgw_packet(udp_mtu, udp_timeout, &mut reader) => {
if let Ok((len, _)) = ret {
crate::traffic_status::traffic_status_update(0, len)?;
}
match ret {
Err(e) => {
log::warn!("[UdpGw] Ending stream {} {} <> {} with recv_udpgw_packet {}", sn, &tcp_local_addr, udp_dst, e);
stream.close();
break;
}
Ok((_, packet)) => match packet {
//should not received keepalive
UdpGwResponse::KeepAlive => {
log::error!("[UdpGw] Ending stream {} {} <> {} with recv keepalive", sn, &tcp_local_addr, udp_dst);
stream.close();
break;
}
//server udp may be timeout,can continue to receive udp data?
UdpGwResponse::Error => {
log::info!("[UdpGw] Ending stream {} {} <> {} with recv udp error", sn, &tcp_local_addr, udp_dst);
stream.update_activity();
continue;
}
UdpGwResponse::TcpClose => {
log::error!("[UdpGw] Ending stream {} {} <> {} with tcp closed", sn, &tcp_local_addr, udp_dst);
stream.close();
break;
}
UdpGwResponse::Data(data) => {
use socks5_impl::protocol::StreamOperation;
let len = data.len();
let f = data.header.flags;
log::debug!("[UdpGw] stream {sn} {} <- {} receive {f} len {len}", &tcp_local_addr, udp_dst);
if let Err(e) = udp_stack.write_all(&data.data).await {
log::error!("[UdpGw] Ending stream {} {} <> {} with send_udp_packet {}", sn, &tcp_local_addr, udp_dst, e);
break;
}
}
}
}
stream.update_activity();
}
}
}
if !stream.is_closed() {
udpgw_client.store_server_connection_full(stream, reader, writer).await;
}
Ok(())
}
async fn handle_udp_associate_session( async fn handle_udp_associate_session(
mut udp_stack: IpStackUdpStream, mut udp_stack: IpStackUdpStream,
proxy_type: ProxyType, proxy_type: ProxyType,

View file

@ -1,80 +0,0 @@
#![cfg(any(target_os = "ios", target_os = "android", target_os = "macos"))]
use crate::Args;
use std::os::raw::c_int;
static TUN_QUIT: std::sync::Mutex<Option<tokio_util::sync::CancellationToken>> = std::sync::Mutex::new(None);
/// Dummy function to make the build pass.
#[doc(hidden)]
#[cfg(not(target_os = "macos"))]
pub async fn desktop_run_async(_: Args, _: tokio_util::sync::CancellationToken) -> std::io::Result<()> {
Ok(())
}
pub fn mobile_run(args: Args, tun_mtu: u16, _packet_information: bool) -> c_int {
let shutdown_token = tokio_util::sync::CancellationToken::new();
{
if let Ok(mut lock) = TUN_QUIT.lock() {
if lock.is_some() {
log::error!("tun2proxy already started");
return -1;
}
*lock = Some(shutdown_token.clone());
} else {
log::error!("failed to lock tun2proxy quit token");
return -2;
}
}
let block = async move {
let mut config = tun2::Configuration::default();
#[cfg(unix)]
if let Some(fd) = args.tun_fd {
config.raw_fd(fd);
} else if let Some(ref tun) = args.tun {
config.tun_name(tun);
}
#[cfg(windows)]
if let Some(ref tun) = args.tun {
config.tun_name(tun);
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
config.platform_config(|config| {
config.packet_information(_packet_information);
});
let device = tun2::create_as_async(&config).map_err(std::io::Error::from)?;
let join_handle = tokio::spawn(crate::run(device, tun_mtu, args, shutdown_token));
join_handle.await.map_err(std::io::Error::from)?
};
let exit_code = match tokio::runtime::Builder::new_multi_thread().enable_all().build() {
Err(e) => {
log::error!("failed to create tokio runtime with error: {:?}", e);
-1
}
Ok(rt) => match rt.block_on(block) {
Ok(_) => 0,
Err(e) => {
log::error!("failed to run tun2proxy with error: {:?}", e);
-2
}
},
};
exit_code
}
pub fn mobile_stop() -> c_int {
if let Ok(mut lock) = TUN_QUIT.lock() {
if let Some(shutdown_token) = lock.take() {
shutdown_token.cancel();
return 0;
}
}
-1
}

View file

@ -16,7 +16,7 @@ impl std::fmt::Display for IpProtocol {
IpProtocol::Tcp => write!(f, "TCP"), IpProtocol::Tcp => write!(f, "TCP"),
IpProtocol::Udp => write!(f, "UDP"), IpProtocol::Udp => write!(f, "UDP"),
IpProtocol::Icmp => write!(f, "ICMP"), IpProtocol::Icmp => write!(f, "ICMP"),
IpProtocol::Other(v) => write!(f, "Other({})", v), IpProtocol::Other(v) => write!(f, "Other(0x{:02X})", v),
} }
} }
} }

View file

@ -1,10 +1,10 @@
#![cfg(target_os = "linux")] #![cfg(target_os = "linux")]
use crate::{error, SocketDomain, SocketProtocol}; use crate::{SocketDomain, SocketProtocol, error};
use nix::{ use nix::{
errno::Errno, errno::Errno,
fcntl::{self, FdFlag}, fcntl::{self, FdFlag},
sys::socket::{cmsg_space, getsockopt, recvmsg, sendmsg, sockopt, ControlMessage, ControlMessageOwned, MsgFlags, SockType}, sys::socket::{ControlMessage, ControlMessageOwned, MsgFlags, SockType, cmsg_space, getsockopt, recvmsg, sendmsg, sockopt},
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{ use std::{
@ -16,31 +16,31 @@ use tokio::net::{TcpSocket, UdpSocket, UnixDatagram};
const REQUEST_BUFFER_SIZE: usize = 64; const REQUEST_BUFFER_SIZE: usize = 64;
#[derive(Hash, Copy, Clone, Eq, PartialEq, Debug, Serialize, Deserialize)] #[derive(bincode::Encode, bincode::Decode, Hash, Copy, Clone, Eq, PartialEq, Debug, Serialize, Deserialize)]
struct Request { struct Request {
protocol: SocketProtocol, protocol: SocketProtocol,
domain: SocketDomain, domain: SocketDomain,
number: u32, number: u32,
} }
#[derive(Hash, Copy, Clone, Eq, PartialEq, Debug, Serialize, Deserialize)] #[derive(bincode::Encode, bincode::Decode, PartialEq, Debug, Hash, Copy, Clone, Eq, Serialize, Deserialize)]
enum Response { enum Response {
Ok, Ok,
} }
/// Reconstruct socket from raw `fd` /// Reconstruct socket from raw `fd`
pub fn reconstruct_socket(fd: RawFd) -> Result<OwnedFd> { pub fn reconstruct_socket(fd: RawFd) -> Result<OwnedFd> {
// Check if `fd` is valid
let fd_flags = fcntl::fcntl(fd, fcntl::F_GETFD)?;
// `fd` is confirmed to be valid so it should be closed // `fd` is confirmed to be valid so it should be closed
let socket = unsafe { OwnedFd::from_raw_fd(fd) }; let socket = unsafe { OwnedFd::from_raw_fd(fd) };
// Check if `fd` is valid
let fd_flags = fcntl::fcntl(socket.as_fd(), fcntl::F_GETFD)?;
// Insert CLOEXEC flag to the `fd` to prevent further propagation across `execve(2)` calls // Insert CLOEXEC flag to the `fd` to prevent further propagation across `execve(2)` calls
let mut fd_flags = FdFlag::from_bits(fd_flags).ok_or(ErrorKind::Unsupported)?; let mut fd_flags = FdFlag::from_bits(fd_flags).ok_or(ErrorKind::Unsupported)?;
if !fd_flags.contains(FdFlag::FD_CLOEXEC) { if !fd_flags.contains(FdFlag::FD_CLOEXEC) {
fd_flags.insert(FdFlag::FD_CLOEXEC); fd_flags.insert(FdFlag::FD_CLOEXEC);
fcntl::fcntl(fd, fcntl::F_SETFD(fd_flags))?; fcntl::fcntl(socket.as_fd(), fcntl::F_SETFD(fd_flags))?;
} }
Ok(socket) Ok(socket)
@ -70,12 +70,12 @@ pub async fn create_transfer_socket_pair() -> std::io::Result<(UnixDatagram, Own
let remote_fd: OwnedFd = remote.into_std().unwrap().into(); let remote_fd: OwnedFd = remote.into_std().unwrap().into();
// Get `remote_fd` flags // Get `remote_fd` flags
let fd_flags = fcntl::fcntl(remote_fd.as_raw_fd(), fcntl::F_GETFD)?; let fd_flags = fcntl::fcntl(remote_fd.as_fd(), fcntl::F_GETFD)?;
// Remove CLOEXEC flag from the `remote_fd` to allow propagating across `execve(2)` // Remove CLOEXEC flag from the `remote_fd` to allow propagating across `execve(2)`
let mut fd_flags = FdFlag::from_bits(fd_flags).ok_or(ErrorKind::Unsupported)?; let mut fd_flags = FdFlag::from_bits(fd_flags).ok_or(ErrorKind::Unsupported)?;
fd_flags.remove(FdFlag::FD_CLOEXEC); fd_flags.remove(FdFlag::FD_CLOEXEC);
fcntl::fcntl(remote_fd.as_raw_fd(), fcntl::F_SETFD(fd_flags))?; fcntl::fcntl(remote_fd.as_fd(), fcntl::F_SETFD(fd_flags))?;
Ok((local, remote_fd)) Ok((local, remote_fd))
} }
@ -135,14 +135,21 @@ where
// Borrow socket as mut to prevent multiple simultaneous requests // Borrow socket as mut to prevent multiple simultaneous requests
let socket = socket.deref_mut(); let socket = socket.deref_mut();
// Send request let mut request = [0u8; 1000];
let request = bincode::serialize(&Request {
protocol: T::domain(),
domain,
number,
})?;
socket.send(&request[..]).await?; // Send request
let size = bincode::encode_into_slice(
Request {
protocol: T::domain(),
domain,
number,
},
&mut request,
bincode::config::standard(),
)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?;
socket.send(&request[..size]).await?;
// Receive response // Receive response
loop { loop {
@ -161,7 +168,9 @@ where
// Parse response // Parse response
let response = &msg.iovs().next().unwrap()[..msg.bytes]; let response = &msg.iovs().next().unwrap()[..msg.bytes];
let response: Response = bincode::deserialize(response)?; let response: Response = bincode::decode_from_slice(response, bincode::config::standard())
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?
.0;
if !matches!(response, Response::Ok) { if !matches!(response, Response::Ok) {
return Err("Request for new sockets failed".into()); return Err("Request for new sockets failed".into());
} }
@ -194,10 +203,14 @@ pub async fn process_socket_requests(socket: &UnixDatagram) -> error::Result<()>
let len = socket.recv(&mut buf[..]).await?; let len = socket.recv(&mut buf[..]).await?;
let request: Request = bincode::deserialize(&buf[..len])?; let request: Request = bincode::decode_from_slice(&buf[..len], bincode::config::standard())
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?
.0;
let response = Response::Ok; let response = Response::Ok;
let buf = bincode::serialize(&response)?; let mut buf = [0u8; 1000];
let size = bincode::encode_into_slice(response, &mut buf, bincode::config::standard())
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidInput, e))?;
let mut owned_fd_buf: Vec<OwnedFd> = Vec::with_capacity(request.number as usize); let mut owned_fd_buf: Vec<OwnedFd> = Vec::with_capacity(request.number as usize);
for _ in 0..request.number { for _ in 0..request.number {
@ -223,7 +236,7 @@ pub async fn process_socket_requests(socket: &UnixDatagram) -> error::Result<()>
let raw_fd_buf: Vec<RawFd> = owned_fd_buf.iter().map(|fd| fd.as_raw_fd()).collect(); let raw_fd_buf: Vec<RawFd> = owned_fd_buf.iter().map(|fd| fd.as_raw_fd()).collect();
let cmsg = ControlMessage::ScmRights(&raw_fd_buf[..]); let cmsg = ControlMessage::ScmRights(&raw_fd_buf[..]);
let iov = [IoSlice::new(&buf[..])]; let iov = [IoSlice::new(&buf[..size])];
sendmsg::<()>(socket.as_raw_fd(), &iov, &[cmsg], MsgFlags::empty(), None)?; sendmsg::<()>(socket.as_raw_fd(), &iov, &[cmsg], MsgFlags::empty(), None)?;
} }

View file

@ -4,7 +4,7 @@ use crate::{
proxy_handler::{ProxyHandler, ProxyHandlerManager}, proxy_handler::{ProxyHandler, ProxyHandlerManager},
session_info::SessionInfo, session_info::SessionInfo,
}; };
use socks5_impl::protocol::{self, handshake, password_method, Address, AuthMethod, StreamOperation, UserKey, Version}; use socks5_impl::protocol::{self, Address, AuthMethod, StreamOperation, UserKey, Version, handshake, password_method};
use std::{collections::VecDeque, net::SocketAddr, sync::Arc}; use std::{collections::VecDeque, net::SocketAddr, sync::Arc};
use tokio::sync::Mutex; use tokio::sync::Mutex;

View file

@ -1,10 +1,11 @@
use crate::error::{Error, Result}; use crate::error::{Error, Result};
use std::os::raw::c_void; use std::os::raw::c_void;
use std::sync::{LazyLock, Mutex};
/// # Safety /// # Safety
/// ///
/// set traffic status callback. /// set traffic status callback.
#[no_mangle] #[unsafe(no_mangle)]
pub unsafe extern "C" fn tun2proxy_set_traffic_status_callback( pub unsafe extern "C" fn tun2proxy_set_traffic_status_callback(
send_interval_secs: u32, send_interval_secs: u32,
callback: Option<unsafe extern "C" fn(*const TrafficStatus, *mut c_void)>, callback: Option<unsafe extern "C" fn(*const TrafficStatus, *mut c_void)>,
@ -33,7 +34,7 @@ struct TrafficStatusCallback(Option<unsafe extern "C" fn(*const TrafficStatus, *
impl TrafficStatusCallback { impl TrafficStatusCallback {
unsafe fn call(self, info: &TrafficStatus) { unsafe fn call(self, info: &TrafficStatus) {
if let Some(cb) = self.0 { if let Some(cb) = self.0 {
cb(info, self.1); unsafe { cb(info, self.1) };
} }
} }
} }
@ -44,10 +45,8 @@ unsafe impl Sync for TrafficStatusCallback {}
static TRAFFIC_STATUS_CALLBACK: std::sync::Mutex<Option<TrafficStatusCallback>> = std::sync::Mutex::new(None); static TRAFFIC_STATUS_CALLBACK: std::sync::Mutex<Option<TrafficStatusCallback>> = std::sync::Mutex::new(None);
static SEND_INTERVAL_SECS: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(1); static SEND_INTERVAL_SECS: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(1);
lazy_static::lazy_static! { static TRAFFIC_STATUS: LazyLock<Mutex<TrafficStatus>> = LazyLock::new(|| Mutex::new(TrafficStatus::default()));
static ref TRAFFIC_STATUS: std::sync::Mutex<TrafficStatus> = std::sync::Mutex::new(TrafficStatus::default()); static TIME_STAMP: LazyLock<Mutex<std::time::Instant>> = LazyLock::new(|| Mutex::new(std::time::Instant::now()));
static ref TIME_STAMP: std::sync::Mutex<std::time::Instant> = std::sync::Mutex::new(std::time::Instant::now());
}
pub(crate) fn traffic_status_update(delta_tx: usize, delta_rx: usize) -> Result<()> { pub(crate) fn traffic_status_update(delta_tx: usize, delta_rx: usize) -> Result<()> {
{ {

578
src/udpgw.rs Normal file
View file

@ -0,0 +1,578 @@
use crate::error::Result;
use socks5_impl::protocol::{Address, AsyncStreamOperation, BufMut, StreamOperation};
use std::{collections::VecDeque, hash::Hash, net::SocketAddr, sync::atomic::Ordering::Relaxed};
use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
net::{
TcpStream,
tcp::{OwnedReadHalf, OwnedWriteHalf},
},
sync::Mutex,
time::{Duration, sleep},
};
pub(crate) const UDPGW_LENGTH_FIELD_SIZE: usize = std::mem::size_of::<u16>();
pub(crate) const UDPGW_MAX_CONNECTIONS: usize = 5;
pub(crate) const UDPGW_KEEPALIVE_TIME: tokio::time::Duration = std::time::Duration::from_secs(30);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct UdpFlag(pub u8);
impl UdpFlag {
pub const ZERO: UdpFlag = UdpFlag(0x00);
pub const KEEPALIVE: UdpFlag = UdpFlag(0x01);
pub const ERR: UdpFlag = UdpFlag(0x20);
pub const DATA: UdpFlag = UdpFlag(0x02);
}
impl std::fmt::Display for UdpFlag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let flag = match self.0 {
0x00 => "ZERO",
0x01 => "KEEPALIVE",
0x20 => "ERR",
0x02 => "DATA",
n => return write!(f, "Unknown UdpFlag(0x{:02X})", n),
};
write!(f, "{}", flag)
}
}
impl std::ops::BitAnd for UdpFlag {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
UdpFlag(self.0 & rhs.0)
}
}
impl std::ops::BitOr for UdpFlag {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
UdpFlag(self.0 | rhs.0)
}
}
/// UDP Gateway Packet Format
///
/// The format is referenced from SOCKS5 packet format, with additional flags and connection ID fields.
///
/// `LEN`: This field is indicated the length of the packet, not including the length field itself.
///
/// `FLAGS`: This field is used to indicate the packet type. The flags are defined as follows:
/// - `0x01`: Keepalive packet without address and data
/// - `0x20`: Error packet without address and data
/// - `0x02`: Data packet with address and data
///
/// `CONN_ID`: This field is used to indicate the unique connection ID for the packet.
///
/// `ATYP` & `DST.ADDR` & `DST.PORT`: This fields are used to indicate the remote address and port.
/// It can be either an IPv4 address, an IPv6 address, or a domain name, depending on the `ATYP` field.
/// The address format directly uses the address format of the [SOCKS5](https://datatracker.ietf.org/doc/html/rfc1928#section-4) protocol.
/// - `ATYP`: Address Type, 1 byte, indicating the type of address ( 0x01-IPv4, 0x04-IPv6, or 0x03-domain name )
/// - `DST.ADDR`: Destination Address. If `ATYP` is 0x01 or 0x04, it is 4 or 16 bytes of IP address;
/// If `ATYP` is 0x03, it is a domain name, `DST.ADDR` is a variable length field,
/// it begins with a 1-byte length field and then the domain name without null-termination,
/// since the length field is 1 byte, the maximum length of the domain name is 255 bytes.
/// - `DST.PORT`: Destination Port, 2 bytes, the port number of the destination address.
///
/// `DATA`: The data field, a variable length field, the length is determined by the `LEN` field.
///
/// All the digits fields are in big-endian byte order.
///
/// ```plain
/// +-----+ +-------+---------+ +------+----------+----------+ +----------+
/// | LEN | | FLAGS | CONN_ID | | ATYP | DST.ADDR | DST.PORT | | DATA |
/// +-----+ +-------+---------+ +------+----------+----------+ +----------+
/// | 2 | | 1 | 2 | | 1 | Variable | 2 | | Variable |
/// +-----+ +-------+---------+ +------+----------+----------+ +----------+
/// ```
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Packet {
pub header: UdpgwHeader,
pub address: Option<Address>,
pub data: Vec<u8>,
}
impl std::fmt::Display for Packet {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let addr = self.address.as_ref().map_or("None".to_string(), |addr| addr.to_string());
let len = self.data.len();
write!(f, "Packet {{ {}, address: {}, payload length: {} }}", self.header, addr, len)
}
}
impl From<Packet> for Vec<u8> {
fn from(packet: Packet) -> Vec<u8> {
(&packet).into()
}
}
impl From<&Packet> for Vec<u8> {
fn from(packet: &Packet) -> Vec<u8> {
let mut bytes: Vec<u8> = vec![];
packet.write_to_buf(&mut bytes);
bytes
}
}
impl TryFrom<&[u8]> for Packet {
type Error = std::io::Error;
fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
if value.len() < UDPGW_LENGTH_FIELD_SIZE {
return Err(std::io::ErrorKind::InvalidData.into());
}
let mut iter = std::io::Cursor::new(value);
use tokio_util::bytes::Buf;
let length = iter.get_u16();
if value.len() < length as usize + UDPGW_LENGTH_FIELD_SIZE {
return Err(std::io::ErrorKind::InvalidData.into());
}
let header = UdpgwHeader::retrieve_from_stream(&mut iter)?;
let address = if header.flags & UdpFlag::DATA != UdpFlag::ZERO {
Some(Address::retrieve_from_stream(&mut iter)?)
} else {
None
};
Ok(Packet::new(header, address, iter.chunk()))
}
}
impl Packet {
pub fn new(header: UdpgwHeader, address: Option<Address>, data: &[u8]) -> Self {
let data = data.to_vec();
Packet { header, address, data }
}
pub fn build_keepalive_packet(conn_id: u16) -> Self {
Packet::new(UdpgwHeader::new(UdpFlag::KEEPALIVE, conn_id), None, &[])
}
pub fn build_error_packet(conn_id: u16) -> Self {
Packet::new(UdpgwHeader::new(UdpFlag::ERR, conn_id), None, &[])
}
pub fn build_packet_from_address(conn_id: u16, remote_addr: &Address, data: &[u8]) -> std::io::Result<Self> {
use socks5_impl::protocol::Address::{DomainAddress, SocketAddress};
let packet = match remote_addr {
SocketAddress(addr) => Packet::build_ip_packet(conn_id, *addr, data),
DomainAddress(domain, port) => Packet::build_domain_packet(conn_id, *port, domain, data)?,
};
Ok(packet)
}
pub fn build_ip_packet(conn_id: u16, remote_addr: SocketAddr, data: &[u8]) -> Self {
let addr: Address = remote_addr.into();
Packet::new(UdpgwHeader::new(UdpFlag::DATA, conn_id), Some(addr), data)
}
pub fn build_domain_packet(conn_id: u16, port: u16, domain: &str, data: &[u8]) -> std::io::Result<Self> {
if domain.len() > 255 {
return Err(std::io::ErrorKind::InvalidInput.into());
}
let addr = Address::from((domain, port));
Ok(Packet::new(UdpgwHeader::new(UdpFlag::DATA, conn_id), Some(addr), data))
}
}
impl StreamOperation for Packet {
fn retrieve_from_stream<R>(stream: &mut R) -> std::io::Result<Self>
where
R: std::io::Read,
Self: Sized,
{
let mut buf = [0; UDPGW_LENGTH_FIELD_SIZE];
stream.read_exact(&mut buf)?;
let length = u16::from_be_bytes(buf) as usize;
let header = UdpgwHeader::retrieve_from_stream(stream)?;
let address = if header.flags & UdpFlag::DATA == UdpFlag::DATA {
Some(Address::retrieve_from_stream(stream)?)
} else {
None
};
let read_len = header.len() + address.as_ref().map_or(0, |addr| addr.len());
if length < read_len {
return Err(std::io::ErrorKind::InvalidData.into());
}
let mut data = vec![0; length - read_len];
stream.read_exact(&mut data)?;
Ok(Packet::new(header, address, &data))
}
fn write_to_buf<B: BufMut>(&self, buf: &mut B) {
let len = self.len() - UDPGW_LENGTH_FIELD_SIZE;
buf.put_u16(len as u16);
self.header.write_to_buf(buf);
if let Some(addr) = &self.address {
addr.write_to_buf(buf);
}
buf.put_slice(&self.data);
}
fn len(&self) -> usize {
UDPGW_LENGTH_FIELD_SIZE + self.header.len() + self.address.as_ref().map_or(0, |addr| addr.len()) + self.data.len()
}
}
#[async_trait::async_trait]
impl AsyncStreamOperation for Packet {
async fn retrieve_from_async_stream<R>(r: &mut R) -> std::io::Result<Self>
where
R: tokio::io::AsyncRead + Unpin + Send + ?Sized,
Self: Sized,
{
let mut buf = [0; UDPGW_LENGTH_FIELD_SIZE];
r.read_exact(&mut buf).await?;
let length = u16::from_be_bytes(buf) as usize;
let header = UdpgwHeader::retrieve_from_async_stream(r).await?;
let address = if header.flags & UdpFlag::DATA == UdpFlag::DATA {
Some(Address::retrieve_from_async_stream(r).await?)
} else {
None
};
let read_len = header.len() + address.as_ref().map_or(0, |addr| addr.len());
if length < read_len {
return Err(std::io::ErrorKind::InvalidData.into());
}
let mut data = vec![0; length - read_len];
r.read_exact(&mut data).await?;
Ok(Packet::new(header, address, &data))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct UdpgwHeader {
pub flags: UdpFlag,
pub conn_id: u16,
}
impl std::fmt::Display for UdpgwHeader {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} conn_id: {}", self.flags, self.conn_id)
}
}
impl StreamOperation for UdpgwHeader {
fn retrieve_from_stream<R>(stream: &mut R) -> std::io::Result<Self>
where
R: std::io::Read,
Self: Sized,
{
let mut buf = [0; UdpgwHeader::static_len()];
stream.read_exact(&mut buf)?;
UdpgwHeader::try_from(&buf[..])
}
fn write_to_buf<B: BufMut>(&self, buf: &mut B) {
let bytes: Vec<u8> = self.into();
buf.put_slice(&bytes);
}
fn len(&self) -> usize {
Self::static_len()
}
}
#[async_trait::async_trait]
impl AsyncStreamOperation for UdpgwHeader {
async fn retrieve_from_async_stream<R>(r: &mut R) -> std::io::Result<Self>
where
R: tokio::io::AsyncRead + Unpin + Send + ?Sized,
Self: Sized,
{
let mut buf = [0; UdpgwHeader::static_len()];
r.read_exact(&mut buf).await?;
UdpgwHeader::try_from(&buf[..])
}
}
impl UdpgwHeader {
pub fn new(flags: UdpFlag, conn_id: u16) -> Self {
UdpgwHeader { flags, conn_id }
}
pub const fn static_len() -> usize {
std::mem::size_of::<u8>() + std::mem::size_of::<u16>()
}
}
impl TryFrom<&[u8]> for UdpgwHeader {
type Error = std::io::Error;
fn try_from(value: &[u8]) -> std::result::Result<Self, Self::Error> {
if value.len() < UdpgwHeader::static_len() {
return Err(std::io::ErrorKind::InvalidData.into());
}
let conn_id = u16::from_be_bytes([value[1], value[2]]);
Ok(UdpgwHeader::new(UdpFlag(value[0]), conn_id))
}
}
impl From<&UdpgwHeader> for Vec<u8> {
fn from(header: &UdpgwHeader) -> Vec<u8> {
let mut bytes = vec![0; header.len()];
bytes[0] = header.flags.0;
bytes[1..3].copy_from_slice(&header.conn_id.to_be_bytes());
bytes
}
}
#[allow(dead_code)]
#[derive(Debug)]
pub(crate) enum UdpGwResponse {
KeepAlive,
Error,
TcpClose,
Data(Packet),
}
impl std::fmt::Display for UdpGwResponse {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
UdpGwResponse::KeepAlive => write!(f, "KeepAlive"),
UdpGwResponse::Error => write!(f, "Error"),
UdpGwResponse::TcpClose => write!(f, "TcpClose"),
UdpGwResponse::Data(packet) => write!(f, "Data({})", packet),
}
}
}
static SERIAL_NUMBER: std::sync::atomic::AtomicU16 = std::sync::atomic::AtomicU16::new(1);
#[derive(Debug)]
pub(crate) struct UdpGwClientStream {
local_addr: SocketAddr,
writer: Option<OwnedWriteHalf>,
reader: Option<OwnedReadHalf>,
closed: bool,
last_activity: std::time::Instant,
serial_number: u16,
}
impl UdpGwClientStream {
pub fn close(&mut self) {
self.closed = true;
}
pub fn get_reader(&mut self) -> Option<OwnedReadHalf> {
self.reader.take()
}
pub fn set_reader(&mut self, reader: Option<OwnedReadHalf>) {
self.reader = reader;
}
pub fn set_writer(&mut self, writer: Option<OwnedWriteHalf>) {
self.writer = writer;
}
pub fn get_writer(&mut self) -> Option<OwnedWriteHalf> {
self.writer.take()
}
pub fn local_addr(&self) -> SocketAddr {
self.local_addr
}
pub fn update_activity(&mut self) {
self.last_activity = std::time::Instant::now();
}
pub fn is_closed(&self) -> bool {
self.closed
}
pub fn serial_number(&self) -> u16 {
self.serial_number
}
pub fn new(tcp_server_stream: TcpStream) -> Self {
let default = "0.0.0.0:0".parse::<SocketAddr>().unwrap();
let local_addr = tcp_server_stream.local_addr().unwrap_or(default);
let (reader, writer) = tcp_server_stream.into_split();
let serial_number = SERIAL_NUMBER.fetch_add(1, Relaxed);
UdpGwClientStream {
local_addr,
reader: Some(reader),
writer: Some(writer),
last_activity: std::time::Instant::now(),
closed: false,
serial_number,
}
}
}
#[derive(Debug)]
pub(crate) struct UdpGwClient {
udp_mtu: u16,
max_connections: usize,
udp_timeout: u64,
keepalive_time: Duration,
udpgw_server: SocketAddr,
server_connections: Mutex<VecDeque<UdpGwClientStream>>,
}
impl UdpGwClient {
pub fn new(udp_mtu: u16, max_connections: usize, keepalive_time: Duration, udp_timeout: u64, udpgw_server: SocketAddr) -> Self {
let server_connections = Mutex::new(VecDeque::with_capacity(max_connections));
UdpGwClient {
udp_mtu,
max_connections,
udp_timeout,
udpgw_server,
keepalive_time,
server_connections,
}
}
pub(crate) fn get_udp_mtu(&self) -> u16 {
self.udp_mtu
}
pub(crate) fn get_udp_timeout(&self) -> u64 {
self.udp_timeout
}
pub(crate) async fn pop_server_connection_from_queue(&self) -> Option<UdpGwClientStream> {
self.server_connections.lock().await.pop_front()
}
pub(crate) async fn store_server_connection(&self, stream: UdpGwClientStream) {
if self.server_connections.lock().await.len() < self.max_connections {
self.server_connections.lock().await.push_back(stream);
}
}
pub(crate) async fn store_server_connection_full(&self, mut stream: UdpGwClientStream, reader: OwnedReadHalf, writer: OwnedWriteHalf) {
if self.server_connections.lock().await.len() < self.max_connections {
stream.set_reader(Some(reader));
stream.set_writer(Some(writer));
self.server_connections.lock().await.push_back(stream);
}
}
pub(crate) fn get_udpgw_server_addr(&self) -> SocketAddr {
self.udpgw_server
}
/// Heartbeat task asynchronous function to periodically check and maintain the active state of the server connection.
pub(crate) async fn heartbeat_task(&self) -> std::io::Result<()> {
loop {
sleep(self.keepalive_time).await;
let mut streams = Vec::new();
while let Some(stream) = self.pop_server_connection_from_queue().await {
if !stream.is_closed() {
streams.push(stream);
}
}
let (mut tx, mut rx) = (0, 0);
for mut stream in streams {
if stream.last_activity.elapsed() < self.keepalive_time {
self.store_server_connection(stream).await;
continue;
}
let Some(mut stream_reader) = stream.get_reader() else {
continue;
};
let Some(mut stream_writer) = stream.get_writer() else {
continue;
};
let local_addr = stream_writer.local_addr()?;
let sn = stream.serial_number();
let keepalive_packet: Vec<u8> = Packet::build_keepalive_packet(sn).into();
tx += keepalive_packet.len();
if let Err(e) = stream_writer.write_all(&keepalive_packet).await {
log::warn!("stream {} {:?} send keepalive failed: {}", sn, local_addr, e);
continue;
}
match UdpGwClient::recv_udpgw_packet(self.udp_mtu, self.udp_timeout, &mut stream_reader).await {
Ok((len, UdpGwResponse::KeepAlive)) => {
stream.update_activity();
self.store_server_connection_full(stream, stream_reader, stream_writer).await;
log::trace!("stream {sn} {:?} send keepalive and recieve it successfully", local_addr);
rx += len;
}
Ok((len, v)) => {
log::debug!("stream {sn} {:?} keepalive unexpected response: {v}", local_addr);
rx += len;
}
Err(e) => log::debug!("stream {sn} {:?} keepalive no response, error \"{e}\"", local_addr),
}
}
crate::traffic_status::traffic_status_update(tx, rx)?;
}
}
/// Parses the UDP response data.
pub(crate) fn parse_udp_response(udp_mtu: u16, packet: Packet) -> Result<UdpGwResponse> {
let flags = packet.header.flags;
if flags & UdpFlag::ERR == UdpFlag::ERR {
return Ok(UdpGwResponse::Error);
}
if flags & UdpFlag::KEEPALIVE == UdpFlag::KEEPALIVE {
return Ok(UdpGwResponse::KeepAlive);
}
if packet.data.len() > udp_mtu as usize {
return Err("too much data".into());
}
Ok(UdpGwResponse::Data(packet))
}
/// Receives a UDP gateway packet.
///
/// This function is responsible for receiving packets from the UDP gateway
///
/// # Arguments
/// - `udp_mtu`: The maximum transmission unit size for UDP packets.
/// - `udp_timeout`: The timeout in seconds for receiving UDP packets.
/// - `stream`: A mutable reference to the UDP gateway client stream reader.
///
/// # Returns
/// - `Result<UdpGwResponse>`: Returns a result type containing the parsed UDP gateway response, or an error if one occurs.
pub(crate) async fn recv_udpgw_packet(udp_mtu: u16, udp_timeout: u64, stream: &mut OwnedReadHalf) -> Result<(usize, UdpGwResponse)> {
let packet = tokio::time::timeout(
tokio::time::Duration::from_secs(udp_timeout + 2),
Packet::retrieve_from_async_stream(stream),
)
.await
.map_err(std::io::Error::from)??;
Ok((packet.len(), UdpGwClient::parse_udp_response(udp_mtu, packet)?))
}
/// Sends a UDP gateway packet.
///
/// This function constructs and sends a UDP gateway packet based on the IPv6 enabled status, data length,
/// remote address, domain (if any), connection ID, and the UDP gateway client writer stream.
///
/// # Arguments
///
/// * `ipv6_enabled` - Whether IPv6 is enabled
/// * `data` - The data packet
/// * `remote_addr` - Remote address
/// * `conn_id` - Connection ID
/// * `stream` - UDP gateway client writer stream
///
/// # Returns
///
/// Returns `Ok(())` if the packet is sent successfully, otherwise returns an error.
pub(crate) async fn send_udpgw_packet(
ipv6_enabled: bool,
data: &[u8],
remote_addr: &socks5_impl::protocol::Address,
conn_id: u16,
stream: &mut OwnedWriteHalf,
) -> Result<()> {
if !ipv6_enabled && remote_addr.get_type() == socks5_impl::protocol::AddressType::IPv6 {
return Err("ipv6 not support".into());
}
let out_data: Vec<u8> = Packet::build_packet_from_address(conn_id, remote_addr, data)?.into();
stream.write_all(&out_data).await?;
Ok(())
}
}

View file

@ -1,12 +1,12 @@
use crate::error::Result; use crate::error::Result;
use hashlink::{linked_hash_map::RawEntryMut, LruCache}; use hashlink::{LruCache, linked_hash_map::RawEntryMut};
use std::{ use std::{
collections::HashMap, collections::HashMap,
convert::TryInto, convert::TryInto,
net::{IpAddr, Ipv4Addr, Ipv6Addr}, net::{IpAddr, Ipv4Addr, Ipv6Addr},
str::FromStr,
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use tproxy_config::IpCidr;
const MAPPING_TIMEOUT: u64 = 60; // Mapping timeout in seconds const MAPPING_TIMEOUT: u64 = 60; // Mapping timeout in seconds
@ -27,29 +27,17 @@ pub struct VirtualDns {
next_addr: IpAddr, next_addr: IpAddr,
} }
impl Default for VirtualDns { impl VirtualDns {
fn default() -> Self { pub fn new(ip_pool: IpCidr) -> Self {
let start_addr = Ipv4Addr::from_str("198.18.0.0").unwrap();
let prefix_len = 15;
let network_addr = calculate_network_addr(start_addr, prefix_len);
let broadcast_addr = calculate_broadcast_addr(start_addr, prefix_len);
Self { Self {
trailing_dot: false, trailing_dot: false,
next_addr: start_addr.into(), next_addr: ip_pool.first_address(),
name_to_ip: HashMap::default(), name_to_ip: HashMap::default(),
network_addr: IpAddr::from(network_addr), network_addr: ip_pool.first_address(),
broadcast_addr: IpAddr::from(broadcast_addr), broadcast_addr: ip_pool.last_address(),
lru_cache: LruCache::new_unbounded(), lru_cache: LruCache::new_unbounded(),
} }
} }
}
impl VirtualDns {
pub fn new() -> Self {
VirtualDns::default()
}
/// Returns the DNS response to send back to the client. /// Returns the DNS response to send back to the client.
pub fn generate_query(&mut self, data: &[u8]) -> Result<(Vec<u8>, String, IpAddr)> { pub fn generate_query(&mut self, data: &[u8]) -> Result<(Vec<u8>, String, IpAddr)> {
@ -160,30 +148,3 @@ impl VirtualDns {
} }
} }
} }
fn calculate_network_addr(ip: std::net::Ipv4Addr, prefix_len: u8) -> std::net::Ipv4Addr {
let mask = (!0u32) << (32 - prefix_len);
let ip_u32 = u32::from_be_bytes(ip.octets());
std::net::Ipv4Addr::from((ip_u32 & mask).to_be_bytes())
}
fn calculate_broadcast_addr(ip: std::net::Ipv4Addr, prefix_len: u8) -> std::net::Ipv4Addr {
let mask = (!0u32) >> prefix_len;
let ip_u32 = u32::from_be_bytes(ip.octets());
std::net::Ipv4Addr::from((ip_u32 | mask).to_be_bytes())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cidr_addr() {
let start_addr = Ipv4Addr::from_str("198.18.0.0").unwrap();
let prefix_len = 15;
let network_addr = calculate_network_addr(start_addr, prefix_len);
let broadcast_addr = calculate_broadcast_addr(start_addr, prefix_len);
assert_eq!(network_addr, Ipv4Addr::from_str("198.18.0.0").unwrap());
assert_eq!(broadcast_addr, Ipv4Addr::from_str("198.19.255.255").unwrap());
}
}

101
src/win_svc.rs Normal file
View file

@ -0,0 +1,101 @@
#![cfg(windows)]
const SERVICE_NAME: &str = "tun2proxy";
windows_service::define_windows_service!(ffi_service_main, my_service_main);
pub fn start_service() -> Result<(), windows_service::Error> {
// Register generated `ffi_service_main` with the system and start the service,
// blocking this thread until the service is stopped.
windows_service::service_dispatcher::start(SERVICE_NAME, ffi_service_main)?;
Ok(())
}
fn my_service_main(arguments: Vec<std::ffi::OsString>) {
// The entry point where execution will start on a background thread after a call to
// `service_dispatcher::start` from `main`.
if let Err(_e) = run_service(arguments) {
log::error!("Error: {:?}", _e);
}
}
fn run_service(_arguments: Vec<std::ffi::OsString>) -> Result<(), crate::BoxError> {
use windows_service::service::ServiceControl;
use windows_service::service_control_handler::{self, ServiceControlHandlerResult};
let shutdown_token = crate::CancellationToken::new();
let shutdown_token_clone = shutdown_token.clone();
let event_handler = move |control_event| -> ServiceControlHandlerResult {
match control_event {
ServiceControl::Stop => {
// Handle stop event and return control back to the system.
shutdown_token_clone.cancel();
ServiceControlHandlerResult::NoError
}
// All services must accept Interrogate even if it's a no-op.
ServiceControl::Interrogate => ServiceControlHandlerResult::NoError,
_ => ServiceControlHandlerResult::NotImplemented,
}
};
// Register system service event handler
let status_handle = service_control_handler::register(SERVICE_NAME, event_handler)?;
let mut next_status = windows_service::service::ServiceStatus {
// Should match the one from system service registry
service_type: windows_service::service::ServiceType::OWN_PROCESS,
// The new state
current_state: windows_service::service::ServiceState::Running,
// Accept stop events when running
controls_accepted: windows_service::service::ServiceControlAccept::STOP,
// Used to report an error when starting or stopping only, otherwise must be zero
exit_code: windows_service::service::ServiceExitCode::Win32(0),
// Only used for pending states, otherwise must be zero
checkpoint: 0,
// Only used for pending states, otherwise must be zero
wait_hint: std::time::Duration::default(),
// Unused for setting status
process_id: None,
};
// Tell the system that the service is running now
status_handle.set_service_status(next_status.clone())?;
// main logic here
{
let args = crate::Args::parse_args();
let default = format!("{:?},trust_dns_proto=warn", args.verbosity);
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(default)).init();
let rt = tokio::runtime::Builder::new_multi_thread().enable_all().build()?;
rt.block_on(async {
unsafe extern "C" fn traffic_cb(status: *const crate::TrafficStatus, _: *mut std::ffi::c_void) {
let status = unsafe { &*status };
log::debug!("Traffic: ▲ {} : ▼ {}", status.tx, status.rx);
}
unsafe { crate::tun2proxy_set_traffic_status_callback(1, Some(traffic_cb), std::ptr::null_mut()) };
let ret = crate::general_run_async(args.clone(), tun::DEFAULT_MTU, false, shutdown_token).await;
match &ret {
Ok(sessions) => {
if args.exit_on_fatal_error && *sessions >= args.max_sessions {
log::error!("Forced exit due to max sessions reached ({sessions}/{})", args.max_sessions);
std::process::exit(-1);
}
log::debug!("tun2proxy exited normally, current sessions: {sessions}");
}
Err(err) => log::error!("main loop error: {err}"),
}
Ok::<(), crate::Error>(())
})?;
}
// Tell the system that the service is stopped now
next_status.current_state = windows_service::service::ServiceState::Stopped;
status_handle.set_service_status(next_status)?;
Ok(())
}

View file

@ -8,7 +8,7 @@ echo $SCRIPT_DIR
netns="test" netns="test"
dante="danted" dante="danted"
tun2proxy="${SCRIPT_DIR}/../../target/release/tun2proxy" tun2proxy="${SCRIPT_DIR}/../../target/release/tun2proxy-bin"
ip netns add "$netns" ip netns add "$netns"
@ -47,4 +47,4 @@ iperf3 -c 10.0.0.4
iperf3 -c 10.0.0.4 -R -P 10 iperf3 -c 10.0.0.4 -R -P 10
# Clean up # Clean up
# sudo sh -c "pkill tun2proxy; pkill iperf3; pkill danted; ip link del tun0; ip netns del test" # sudo sh -c "pkill tun2proxy-bin; pkill iperf3; pkill danted; ip link del tun0; ip netns del test"

View file

@ -1,2 +1,3 @@
requests requests
python-dotenv python-dotenv
psutil

View file

@ -4,6 +4,7 @@ import os
import subprocess import subprocess
import time import time
import unittest import unittest
import psutil
import dotenv import dotenv
import requests import requests
@ -29,10 +30,18 @@ def get_ip(version=None):
def get_tool_path(): def get_tool_path():
default = glob.glob(os.path.join(os.path.dirname(__file__), '..', 'target', '*', 'tun2proxy')) default = glob.glob(os.path.join(os.path.dirname(__file__), '..', 'target', '*', 'tun2proxy-bin'))
default = default[0] if len(default) > 0 else 'tun2proxy' default = default[0] if len(default) > 0 else 'tun2proxy-bin'
return os.environ.get('TOOL_PATH', default) return os.environ.get('TOOL_PATH', default)
def sudo_kill_process_and_children(proc):
try:
for child in psutil.Process(proc.pid).children(recursive=True):
if child.name() == 'tun2proxy-bin':
subprocess.run(['sudo', 'kill', str(child.pid)])
subprocess.run(['sudo', 'kill', str(proc.pid)])
except psutil.NoSuchProcess:
pass
class Tun2ProxyTest(unittest.TestCase): class Tun2ProxyTest(unittest.TestCase):
@staticmethod @staticmethod
@ -40,7 +49,7 @@ class Tun2ProxyTest(unittest.TestCase):
ip_noproxy = get_ip(ip_version) ip_noproxy = get_ip(ip_version)
additional = ['-6'] if ip_version == 6 else [] additional = ['-6'] if ip_version == 6 else []
p = subprocess.Popen( p = subprocess.Popen(
[get_tool_path(), "--proxy", os.getenv(proxy_var), '--setup', '-v', 'trace', '--dns', dns, *additional]) ['sudo', get_tool_path(), "--proxy", os.getenv(proxy_var), '--setup', '-v', 'trace', '--dns', dns, *additional])
try: try:
time.sleep(1) time.sleep(1)
ip_withproxy = get_ip(ip_version) ip_withproxy = get_ip(ip_version)
@ -49,6 +58,7 @@ class Tun2ProxyTest(unittest.TestCase):
except Exception as e: except Exception as e:
raise e raise e
finally: finally:
sudo_kill_process_and_children(p)
p.terminate() p.terminate()
p.wait() p.wait()