diff --git a/.dockerignore b/.dockerignore new file mode 120000 index 0000000..3e4e48b --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml new file mode 100644 index 0000000..d06ff8a --- /dev/null +++ b/.github/workflows/close-stale-issues.yml @@ -0,0 +1,26 @@ +name: Close stale issues and PRs + +on: + schedule: + - cron: "0 0 * * *" # run a cron job every day at midnight + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - name: Close stale issues and PRs + uses: actions/stale@v9 + with: + stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.' + stale-pr-message: 'This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days.' + close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.' + close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.' + days-before-issue-stale: 30 + days-before-pr-stale: 45 + days-before-issue-close: 5 + days-before-pr-close: 10 + stale-issue-label: 'no-issue-activity' + exempt-issue-labels: 'keep-open,awaiting-approval,work-in-progress' + stale-pr-label: 'no-pr-activity' + exempt-pr-labels: 'awaiting-approval,work-in-progress' + # only-labels: 'awaiting-feedback,awaiting-answers' diff --git a/.github/workflows/publish-docker.yml b/.github/workflows/publish-docker.yml index 10db660..541e53e 100644 --- a/.github/workflows/publish-docker.yml +++ b/.github/workflows/publish-docker.yml @@ -1,7 +1,5 @@ -# -name: Create and publish a Docker image +name: Publish Docker Images -# Configures this workflow to run every time a change is pushed to the branch called `release`. on: push: tags: [ 'v*.*.*' ] @@ -9,12 +7,20 @@ on: # Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds. env: REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} + # This also contains the owner, i.e. tun2proxy/tun2proxy. + IMAGE_PATH: ${{ github.repository }} + IMAGE_NAME: ${{ github.event.repository.name }} + DEFAULT_OS: scratch # There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu. jobs: build-and-push-image: + name: Build and push Docker image runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + os: [ 'scratch', 'ubuntu', 'alpine' ] # Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job. permissions: contents: read @@ -31,30 +37,36 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - + # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. - name: Log in to the Container registry - uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 + uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. - - name: Extract metadata (tags, labels) for Docker + - name: Extract metadata (tags, labels) for Docker Image id: meta - uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 + uses: docker/metadata-action@v5 with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - + # We publish the images with an OS-suffix. + # The image based on a default OS is also published without a suffix. + images: | + ${{ env.REGISTRY }}/${{ env.IMAGE_PATH }}-${{ matrix.os }} + ${{ env.DEFAULT_OS == matrix.os && format('{0}/{1}', env.REGISTRY, env.IMAGE_PATH) || '' }} + # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. - name: Build and push Docker image - uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 + uses: docker/build-push-action@v6 with: platforms: linux/amd64,linux/arm64 context: . + file: Dockerfile + target: ${{ env.IMAGE_NAME }}-${{ matrix.os }} push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/publish-exe.yml b/.github/workflows/publish-exe.yml index dc2f007..697f352 100644 --- a/.github/workflows/publish-exe.yml +++ b/.github/workflows/publish-exe.yml @@ -1,4 +1,5 @@ on: + workflow_dispatch: push: tags: - "v*.*.*" diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 75d5c93..2e57100 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -1,12 +1,15 @@ name: Push or PR on: + workflow_dispatch: push: branches: - '**' pull_request: branches: - '**' + schedule: + - cron: '0 0 * * 0' # Every Sunday at midnight UTC env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c274674..32d02f5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -12,18 +12,16 @@ jobs: proxy_tests: name: Proxy Tests runs-on: ubuntu-latest - if: github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'safe to test') + if: (github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'safe to test')) && github.actor != 'dependabot[bot]' && github.actor != 'github-actions[bot]' steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable - name: Populate .env env: DOTENV: ${{ secrets.DOTENV }} - run: echo "$DOTENV" > .env + run: | + echo "$DOTENV" > tests/.env + ln -s tests/.env - name: Set up Python uses: actions/setup-python@v2 @@ -40,7 +38,7 @@ jobs: - name: Build project run: cargo build --release - + - name: Run tests run: | source venv/bin/activate diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..4a7b842 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.3.0 + hooks: + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + - repo: https://github.com/rhysd/actionlint + rev: v1.7.7 + hooks: + - id: actionlint diff --git a/Cargo.toml b/Cargo.toml index 2d48a4d..04c93b1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tun2proxy" -version = "0.7.6" +version = "0.7.11" edition = "2024" license = "MIT" repository = "https://github.com/tun2proxy/tun2proxy" @@ -31,51 +31,52 @@ async-trait = "0.1" base64easy = "0.1" chrono = "0.4" clap = { version = "4", features = ["derive", "wrap_help", "color"] } -ctrlc2 = { version = "3", features = ["tokio", "termination"] } +ctrlc2 = { version = "3.6.5", features = ["async", "termination"] } digest_auth = "0.3" dotenvy = "0.15" env_logger = "0.11" hashlink = "0.10" -hickory-proto = "0.24" +hickory-proto = "0.25" httparse = "1" -ipstack = { version = "0.2" } +ipstack = { version = "0.4" } log = { version = "0.4", features = ["std"] } mimalloc = { version = "0.1", default-features = false, optional = true } percent-encoding = "2" shlex = "1.3.0" -socks5-impl = { version = "0.6", default-features = false, features = [ +socks5-impl = { version = "0.7", default-features = false, features = [ "tokio", ] } thiserror = "2" tokio = { version = "1", features = ["full"] } tokio-util = "0.7" -tproxy-config = { version = "6", default-features = false } -tun = { version = "0.7", features = ["async"] } +tproxy-config = { version = "7", default-features = false } +tun = { version = "0.8", features = ["async"] } udp-stream = { version = "0.0.12", default-features = false } unicase = "2" url = "2" -[build-dependencies] -serde_json = "1" - -[target.'cfg(target_os="linux")'.dependencies] -serde = { version = "1", features = ["derive"] } -bincode = "2" - [target.'cfg(target_os="android")'.dependencies] android_logger = "0.15" jni = { version = "0.21", default-features = false } +[target.'cfg(target_os="linux")'.dependencies] +bincode = "2" +serde = { version = "1", features = ["derive"] } + +[target.'cfg(target_os="windows")'.dependencies] +windows-service = "0.8" + [target.'cfg(unix)'.dependencies] daemonize = "0.5" -nix = { version = "0.29", default-features = false, features = [ +nix = { version = "0.30", default-features = false, features = [ "fs", "socket", "uio", ] } -[target.'cfg(target_os = "windows")'.dependencies] -windows-service = "0.8" +[build-dependencies] +chrono = "0.4" +serde_json = "1" -[profile.release] -strip = "symbols" +# [profile.release] +# strip = "symbols" diff --git a/Dockerfile b/Dockerfile index e6ad592..f7aafdc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,20 +1,61 @@ #################################################################################################### -## Builder +# This is a multi-stage Dockerfile. +# Build with `docker buildx build -t --target .` +# For example, to build the Alpine-based image while naming it tun2proxy, run: +# `docker buildx build -t tun2proxy --target tun2proxy-alpine .` #################################################################################################### -FROM rust:latest AS builder - -WORKDIR /worker -COPY ./ . -RUN cargo build --release - #################################################################################################### -## Final image +## glibc builder #################################################################################################### -FROM ubuntu:latest +FROM rust:latest AS glibc-builder -RUN apt update && apt install -y iproute2 && apt clean all + WORKDIR /worker + COPY ./ . + RUN cargo build --release -COPY --from=builder /worker/target/release/tun2proxy-bin /usr/bin/tun2proxy-bin +#################################################################################################### +## musl builder +#################################################################################################### +FROM rust:latest AS musl-builder -ENTRYPOINT ["/usr/bin/tun2proxy-bin", "--setup"] + WORKDIR /worker + COPY ./ . + RUN ARCH=$(rustc -vV | sed -nE 's/host:\s*([^-]+).*/\1/p') \ + && rustup target add "$ARCH-unknown-linux-musl" \ + && cargo build --release --target "$ARCH-unknown-linux-musl" + + RUN mkdir /.etc \ + && touch /.etc/resolv.conf \ + && mkdir /.tmp \ + && chmod 777 /.tmp \ + && chmod +t /.tmp + +#################################################################################################### +## Alpine image +#################################################################################################### +FROM alpine:latest AS tun2proxy-alpine + + COPY --from=musl-builder /worker/target/*/release/tun2proxy-bin /usr/bin/tun2proxy-bin + + ENTRYPOINT ["/usr/bin/tun2proxy-bin", "--setup"] + +#################################################################################################### +## Ubuntu image +#################################################################################################### +FROM ubuntu:latest AS tun2proxy-ubuntu + + COPY --from=glibc-builder /worker/target/release/tun2proxy-bin /usr/bin/tun2proxy-bin + + ENTRYPOINT ["/usr/bin/tun2proxy-bin", "--setup"] + +#################################################################################################### +## OS-less image (default) +#################################################################################################### +FROM scratch AS tun2proxy-scratch + + COPY --from=musl-builder ./tmp /tmp + COPY --from=musl-builder ./etc /etc + COPY --from=musl-builder /worker/target/*/release/tun2proxy-bin /usr/bin/tun2proxy-bin + + ENTRYPOINT ["/usr/bin/tun2proxy-bin", "--setup"] diff --git a/README.md b/README.md index 5c9b591..e813cb0 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ cargo build --release ``` ### Building Framework for Apple Devices -To build an XCFramework for macOS and iOS, run the following: +To build an XCFramework for macOS and iOS, run the following: ``` ./build-apple.sh ``` @@ -149,8 +149,8 @@ Options: --unshare-pidfile Create a pidfile of `unshare` process when using `--unshare` -6, --ipv6-enabled IPv6 enabled -s, --setup Routing and system setup, which decides whether to setup the routing and system - configuration. This option is only available on Linux and requires root-like privileges. - See `capabilities(7)` + configuration. This option requires root-like privileges on every platform. + It is very important on Linux, see `capabilities(7)` -d, --dns DNS handling strategy [default: direct] [possible values: virtual, over-tcp, direct] --dns-addr DNS resolver address [default: 8.8.8.8] --virtual-dns-pool IP address pool to be used by virtual DNS in CIDR notation [default: 198.18.0.0/15] @@ -177,7 +177,16 @@ supplied as `--proxy http://john.doe:secret@1.2.3.4:3128`. This works analogousl Tun2proxy can serve as a proxy for other Docker containers. To make use of that feature, first build the image: ```bash -docker build -t tun2proxy . +docker buildx build -t tun2proxy . +``` + +This will build an image containing a statically linked `tun2proxy` binary (based on `musl`) without OS. + +Alternatively, you can build images based on Ubuntu or Alpine as follows: + +```bash +docker buildx build -t tun2proxy --target tun2proxy-ubuntu . +docker buildx build -t tun2proxy --target tun2proxy-alpine . ``` Next, start a container from the tun2proxy image: @@ -188,7 +197,7 @@ docker run -d \ --sysctl net.ipv6.conf.default.disable_ipv6=0 \ --cap-add NET_ADMIN \ --name tun2proxy \ - tun2proxy-bin --proxy proto://[username[:password]@]host:port + tun2proxy --proxy proto://[username[:password]@]host:port ``` You can then provide the running container's network to another worker container by sharing the network namespace (like kubernetes sidecar): @@ -200,7 +209,7 @@ docker run -it \ ``` ### Docker Compose -Write a `docker-compose.yaml` file with the following content: +Create a `docker-compose.yaml` file with the following content: ```yaml services: @@ -212,7 +221,7 @@ services: cap_add: - NET_ADMIN container_name: tun2proxy - image: ghcr.io/tun2proxy/tun2proxy:latest + image: ghcr.io/tun2proxy/tun2proxy-ubuntu:latest command: --proxy proto://[username[:password]@]host:port alpine: stdin_open: true diff --git a/build.rs b/build.rs index a7dc782..a628cef 100644 --- a/build.rs +++ b/build.rs @@ -1,4 +1,13 @@ fn main() -> Result<(), Box> { + if let Ok(git_hash) = get_git_hash() { + // Set the environment variables + println!("cargo:rustc-env=GIT_HASH={}", git_hash.trim()); + } + + // Get the build time + let build_time = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S").to_string(); + println!("cargo:rustc-env=BUILD_TIME={build_time}"); + #[cfg(target_os = "windows")] if let Ok(cargo_target_dir) = get_cargo_target_dir() { let mut f = std::fs::File::create(cargo_target_dir.join("build.log"))?; @@ -19,7 +28,7 @@ fn main() -> Result<(), Box> { // Copy to the target directory if let Err(e) = std::fs::copy(src_path, &dst_path) { - f.write_all(format!("Failed to copy 'wintun.dll': {}\r\n", e).as_bytes())?; + f.write_all(format!("Failed to copy 'wintun.dll': {e}\r\n").as_bytes())?; } else { f.write_all(format!("Copied 'wintun.dll' to '{}'\r\n", dst_path.display()).as_bytes())?; @@ -85,3 +94,10 @@ fn get_crate_dir(crate_name: &str) -> Result std::io::Result { + use std::process::Command; + let git_hash = Command::new("git").args(["rev-parse", "--short", "HEAD"]).output()?.stdout; + let git_hash = String::from_utf8(git_hash).map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + Ok(git_hash) +} diff --git a/src/args.rs b/src/args.rs index 71a85f2..333e758 100644 --- a/src/args.rs +++ b/src/args.rs @@ -8,8 +8,19 @@ use std::ffi::OsString; use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; use std::str::FromStr; +#[macro_export] +macro_rules! version_info { + () => { + concat!(env!("CARGO_PKG_VERSION"), " (", env!("GIT_HASH"), " ", env!("BUILD_TIME"), ")") + }; +} + +fn about_info() -> &'static str { + concat!("Tunnel interface to proxy.\nVersion: ", version_info!()) +} + #[derive(Debug, Clone, clap::Parser)] -#[command(author, version, about = "Tunnel interface to proxy.", long_about = None)] +#[command(author, version = version_info!(), about = about_info(), long_about = None)] pub struct Args { /// Proxy URL in the form proto://[username[:password]@]host:port, /// where proto is one of socks4, socks5, http. @@ -65,8 +76,9 @@ pub struct Args { pub ipv6_enabled: bool, /// Routing and system setup, which decides whether to setup the routing and system configuration. - /// This option is only available on Linux and requires root-like privileges. See `capabilities(7)`. - #[arg(short, long, default_value = if cfg!(target_os = "linux") { "false" } else { "true" })] + /// This option requires root-like privileges on every platform. + /// It is very important on Linux, see `capabilities(7)`. + #[arg(short, long)] pub setup: bool, /// DNS handling strategy @@ -367,7 +379,7 @@ impl Default for ArgProxy { impl std::fmt::Display for ArgProxy { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let auth = match &self.credentials { - Some(creds) => format!("{}", creds), + Some(creds) => format!("{creds}"), None => "".to_owned(), }; if auth.is_empty() { diff --git a/src/bin/main.rs b/src/bin/main.rs index b8ad59e..1f5142f 100644 --- a/src/bin/main.rs +++ b/src/bin/main.rs @@ -1,4 +1,4 @@ -use tun2proxy::{Args, BoxError}; +use tun2proxy::{ArgVerbosity, Args, BoxError}; fn main() -> Result<(), BoxError> { dotenvy::dotenv().ok(); @@ -27,20 +27,32 @@ fn main() -> Result<(), BoxError> { rt.block_on(main_async(args)) } -async fn main_async(args: Args) -> Result<(), BoxError> { - let default = format!("{:?},hickory_proto=warn", args.verbosity); +fn setup_logging(args: &Args) { + let avoid_trace = match args.verbosity { + ArgVerbosity::Trace => ArgVerbosity::Debug, + _ => args.verbosity, + }; + let default = format!( + "{:?},hickory_proto=warn,ipstack={:?},netlink_proto={:?},netlink_sys={:?}", + args.verbosity, avoid_trace, avoid_trace, avoid_trace + ); env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(default)).init(); +} + +async fn main_async(args: Args) -> Result<(), BoxError> { + setup_logging(&args); let shutdown_token = tokio_util::sync::CancellationToken::new(); let main_loop_handle = tokio::spawn({ + let args = args.clone(); let shutdown_token = shutdown_token.clone(); async move { #[cfg(target_os = "linux")] if args.unshare && args.socket_transfer_fd.is_none() { if let Err(err) = namespace_proxy_main(args, shutdown_token).await { - log::error!("namespace proxy error: {}", err); + log::error!("namespace proxy error: {err}"); } - return; + return Ok(0); } unsafe extern "C" fn traffic_cb(status: *const tun2proxy::TrafficStatus, _: *mut std::ffi::c_void) { @@ -49,26 +61,34 @@ async fn main_async(args: Args) -> Result<(), BoxError> { } unsafe { tun2proxy::tun2proxy_set_traffic_status_callback(1, Some(traffic_cb), std::ptr::null_mut()) }; - if let Err(err) = tun2proxy::general_run_async(args, tun::DEFAULT_MTU, cfg!(target_os = "macos"), shutdown_token).await { - log::error!("main loop error: {}", err); + let ret = tun2proxy::general_run_async(args, tun::DEFAULT_MTU, cfg!(target_os = "macos"), shutdown_token).await; + if let Err(err) = &ret { + log::error!("main loop error: {err}"); } + ret } }); let ctrlc_fired = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)); let ctrlc_fired_clone = ctrlc_fired.clone(); - let ctrlc_handel = ctrlc2::set_async_handler(async move { + let ctrlc_handel = ctrlc2::AsyncCtrlC::new(move || { log::info!("Ctrl-C received, exiting..."); ctrlc_fired_clone.store(true, std::sync::atomic::Ordering::SeqCst); shutdown_token.cancel(); - }) - .await; + true + })?; - main_loop_handle.await?; + let tasks = main_loop_handle.await??; if ctrlc_fired.load(std::sync::atomic::Ordering::SeqCst) { log::info!("Ctrl-C fired, waiting the handler to finish..."); - ctrlc_handel.await.map_err(|err| err.to_string())?; + ctrlc_handel.await?; + } + + if args.exit_on_fatal_error && tasks >= args.max_sessions { + // Because `main_async` function perhaps stuck in `await` state, so we need to exit the process forcefully + log::info!("Internal fatal error, max sessions reached ({tasks}/{})", args.max_sessions); + std::process::exit(-1); } Ok(()) @@ -89,7 +109,7 @@ async fn namespace_proxy_main( let child = tokio::process::Command::new("unshare") .args("--user --map-current-user --net --mount --keep-caps --kill-child --fork".split(' ')) - .arg(format!("/proc/self/fd/{}", fd)) + .arg(format!("/proc/self/fd/{}", fd.as_raw_fd())) .arg("--socket-transfer-fd") .arg(remote_fd.as_raw_fd().to_string()) .args(std::env::args().skip(1)) @@ -113,13 +133,10 @@ async fn namespace_proxy_main( log::info!("Use `tun2proxy-bin --unshare --setup [...] -- openvpn --config [...]`"); log::info!(""); log::info!("To run a new process in the created namespace (e.g. a flatpak app)"); - log::info!( - "Use `nsenter --preserve-credentials --user --net --mount --target {} /bin/sh`", - unshare_pid - ); + log::info!("Use `nsenter --preserve-credentials --user --net --mount --target {unshare_pid} /bin/sh`"); log::info!(""); if let Some(pidfile) = _args.unshare_pidfile.as_ref() { - log::info!("Writing unshare pid to {}", pidfile); + log::info!("Writing unshare pid to {pidfile}"); std::fs::write(pidfile, unshare_pid.to_string()).ok(); } tokio::spawn(async move { tun2proxy::socket_transfer::process_socket_requests(&socket).await }); diff --git a/src/bin/udpgw_server.rs b/src/bin/udpgw_server.rs index 370897a..b6c2dc6 100644 --- a/src/bin/udpgw_server.rs +++ b/src/bin/udpgw_server.rs @@ -28,8 +28,12 @@ impl Client { } } +fn about_info() -> &'static str { + concat!("UDP Gateway Server for tun2proxy\nVersion: ", tun2proxy::version_info!()) +} + #[derive(Debug, Clone, clap::Parser)] -#[command(author, version, about = "UDP Gateway Server for tun2proxy", long_about = None)] +#[command(author, version = tun2proxy::version_info!(), about = about_info(), long_about = None)] pub struct UdpGwArgs { /// UDP gateway listen address #[arg(short, long, value_name = "IP:PORT", default_value = "127.0.0.1:7300")] @@ -62,14 +66,14 @@ impl UdpGwArgs { async fn send_error_response(tx: Sender, conn_id: u16) { let error_packet = Packet::build_error_packet(conn_id); if let Err(e) = tx.send(error_packet).await { - log::error!("send error response error {:?}", e); + log::error!("send error response error {e:?}"); } } async fn send_keepalive_response(tx: Sender, conn_id: u16) { let keepalive_packet = Packet::build_keepalive_packet(conn_id); if let Err(e) = tx.send(keepalive_packet).await { - log::error!("send keepalive response error {:?}", e); + log::error!("send keepalive response error {e:?}"); } } @@ -146,12 +150,12 @@ async fn process_client_udp_req(args: &UdpGwArgs, tx: Sender, mut client let packet = match res { Ok(Ok(packet)) => packet, Ok(Err(e)) => { - log::debug!("client {} retrieve_from_async_stream \"{}\"", masked_addr, e); + log::debug!("client {masked_addr} retrieve_from_async_stream \"{e}\""); break; } Err(e) => { if client.last_activity.elapsed() >= CLIENT_DISCONNECT_TIMEOUT { - log::debug!("client {} last_activity elapsed \"{e}\"", masked_addr); + log::debug!("client {masked_addr} last_activity elapsed \"{e}\""); break; } continue; @@ -162,19 +166,19 @@ async fn process_client_udp_req(args: &UdpGwArgs, tx: Sender, mut client let flags = packet.header.flags; let conn_id = packet.header.conn_id; if flags & UdpFlag::KEEPALIVE == UdpFlag::KEEPALIVE { - log::trace!("client {} send keepalive", masked_addr); + log::trace!("client {masked_addr} send keepalive"); // 2. if keepalive packet, do nothing, send keepalive response to client send_keepalive_response(tx.clone(), conn_id).await; continue; } - log::trace!("client {} received udp data {}", masked_addr, packet); + log::trace!("client {masked_addr} received udp data {packet}"); // 3. process client udpgw packet in a new task let tx = tx.clone(); tokio::spawn(async move { if let Err(e) = process_udp(udp_mtu, udp_timeout, tx.clone(), packet).await { send_error_response(tx, conn_id).await; - log::debug!("client {} process udp function \"{e}\"", masked_addr); + log::debug!("client {masked_addr} process udp function \"{e}\""); } }); } @@ -186,14 +190,14 @@ async fn write_to_client(addr: SocketAddr, mut writer: WriteHalf<'_>, mut rx: Re loop { use std::io::{Error, ErrorKind::BrokenPipe}; let packet = rx.recv().await.ok_or(Error::new(BrokenPipe, "recv error"))?; - log::trace!("send response to client {} with {}", masked_addr, packet); + log::trace!("send response to client {masked_addr} with {packet}"); let data: Vec = packet.into(); let _r = writer.write(&data).await?; } } async fn main_async(args: UdpGwArgs) -> Result<(), BoxError> { - log::info!("{} {} starting...", module_path!(), env!("CARGO_PKG_VERSION")); + log::info!("{} {} starting...", module_path!(), tun2proxy::version_info!()); log::info!("UDP Gateway Server running at {}", args.listen_addr); let shutdown_token = tokio_util::sync::CancellationToken::new(); @@ -201,18 +205,18 @@ async fn main_async(args: UdpGwArgs) -> Result<(), BoxError> { let ctrlc_fired = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)); let ctrlc_fired_clone = ctrlc_fired.clone(); - let ctrlc_handel = ctrlc2::set_async_handler(async move { + let ctrlc_handel = ctrlc2::AsyncCtrlC::new(move || { log::info!("Ctrl-C received, exiting..."); ctrlc_fired_clone.store(true, std::sync::atomic::Ordering::SeqCst); shutdown_token.cancel(); - }) - .await; + true + })?; let _ = main_loop_handle.await?; if ctrlc_fired.load(std::sync::atomic::Ordering::SeqCst) { log::info!("Ctrl-C fired, waiting the handler to finish..."); - ctrlc_handel.await.map_err(|err| err.to_string())?; + ctrlc_handel.await?; } Ok(()) @@ -227,7 +231,7 @@ pub async fn run(args: UdpGwArgs, shutdown_token: tokio_util::sync::Cancellation }; let client = Client::new(addr); let masked_addr = mask_socket_addr(addr); - log::info!("client {} connected", masked_addr); + log::info!("client {masked_addr} connected"); let params = args.clone(); tokio::spawn(async move { let (tx, rx) = tokio::sync::mpsc::channel::(100); @@ -236,7 +240,7 @@ pub async fn run(args: UdpGwArgs, shutdown_token: tokio_util::sync::Cancellation v = process_client_udp_req(¶ms, tx, client, tcp_read_stream) => v, v = write_to_client(addr, tcp_write_stream, rx) => v, }; - log::info!("client {} disconnected with {:?}", masked_addr, res); + log::info!("client {masked_addr} disconnected with {res:?}"); }); } Ok::<(), Error>(()) @@ -259,9 +263,7 @@ fn main() -> Result<(), BoxError> { .stdout(stdout) .stderr(stderr) .privileged_action(|| "Executed before drop privileges"); - let _ = daemonize - .start() - .map_err(|e| format!("Failed to daemonize process, error:{:?}", e))?; + let _ = daemonize.start().map_err(|e| format!("Failed to daemonize process, error:{e:?}"))?; } let rt = tokio::runtime::Builder::new_multi_thread().enable_all().build()?; diff --git a/src/dns.rs b/src/dns.rs index 6f9470d..ed36f5f 100644 --- a/src/dns.rs +++ b/src/dns.rs @@ -1,21 +1,16 @@ use hickory_proto::{ op::{Message, MessageType, ResponseCode}, - rr::{Name, RData, Record, record_type::RecordType}, + rr::{ + Name, RData, Record, + rdata::{A, AAAA}, + }, }; use std::{net::IpAddr, str::FromStr}; pub fn build_dns_response(mut request: Message, domain: &str, ip: IpAddr, ttl: u32) -> Result { let record = match ip { - IpAddr::V4(ip) => { - let mut record = Record::with(Name::from_str(domain)?, RecordType::A, ttl); - record.set_data(Some(RData::A(ip.into()))); - record - } - IpAddr::V6(ip) => { - let mut record = Record::with(Name::from_str(domain)?, RecordType::AAAA, ttl); - record.set_data(Some(RData::AAAA(ip.into()))); - record - } + IpAddr::V4(ip) => Record::from_rdata(Name::from_str(domain)?, ttl, RData::A(A(ip))), + IpAddr::V6(ip) => Record::from_rdata(Name::from_str(domain)?, ttl, RData::AAAA(AAAA(ip))), }; // We must indicate that this message is a response. Otherwise, implementations may not @@ -27,9 +22,7 @@ pub fn build_dns_response(mut request: Message, domain: &str, ip: IpAddr, ttl: u } pub fn remove_ipv6_entries(message: &mut Message) { - message - .answers_mut() - .retain(|answer| !matches!(answer.data(), Some(RData::AAAA(_)))); + message.answers_mut().retain(|answer| !matches!(answer.data(), RData::AAAA(_))); } pub fn extract_ipaddr_from_dns_message(message: &Message) -> Result { @@ -38,7 +31,7 @@ pub fn extract_ipaddr_from_dns_message(message: &Message) -> Result { return Ok(IpAddr::V4((*addr).into())); } diff --git a/src/error.rs b/src/error.rs index c93ecb5..b6a6b92 100644 --- a/src/error.rs +++ b/src/error.rs @@ -23,10 +23,10 @@ pub enum Error { TryFromSlice(#[from] std::array::TryFromSliceError), #[error("IpStackError {0:?}")] - IpStack(#[from] ipstack::IpStackError), + IpStack(#[from] Box), #[error("DnsProtoError {0:?}")] - DnsProto(#[from] hickory_proto::error::ProtoError), + DnsProto(#[from] hickory_proto::ProtoError), #[error("httparse::Error {0:?}")] Httparse(#[from] httparse::Error), @@ -45,6 +45,12 @@ pub enum Error { IntParseError(#[from] std::num::ParseIntError), } +impl From for Error { + fn from(err: ipstack::IpStackError) -> Self { + Self::IpStack(Box::new(err)) + } +} + impl From<&str> for Error { fn from(err: &str) -> Self { Self::String(err.to_string()) @@ -67,7 +73,7 @@ impl From for std::io::Error { fn from(err: Error) -> Self { match err { Error::Io(err) => err, - _ => std::io::Error::new(std::io::ErrorKind::Other, err), + _ => std::io::Error::other(err), } } } diff --git a/src/general_api.rs b/src/general_api.rs index 2838271..e713409 100644 --- a/src/general_api.rs +++ b/src/general_api.rs @@ -100,7 +100,7 @@ pub unsafe extern "C" fn tun2proxy_run_with_cli_args(cli_args: *const c_char, tu pub fn general_run_for_api(args: Args, tun_mtu: u16, packet_information: bool) -> c_int { log::set_max_level(args.verbosity.into()); if let Err(err) = log::set_boxed_logger(Box::::default()) { - log::debug!("set logger error: {}", err); + log::debug!("set logger error: {err}"); } let shutdown_token = tokio_util::sync::CancellationToken::new(); @@ -120,15 +120,22 @@ pub fn general_run_for_api(args: Args, tun_mtu: u16, packet_information: bool) - return -3; }; match rt.block_on(async move { - if let Err(err) = general_run_async(args, tun_mtu, packet_information, shutdown_token).await { - log::error!("main loop error: {}", err); - return Err(err); + let ret = general_run_async(args.clone(), tun_mtu, packet_information, shutdown_token).await; + match &ret { + Ok(sessions) => { + if args.exit_on_fatal_error && *sessions >= args.max_sessions { + log::error!("Forced exit due to max sessions reached ({sessions}/{})", args.max_sessions); + std::process::exit(-1); + } + log::debug!("tun2proxy exited normally, current sessions: {sessions}"); + } + Err(err) => log::error!("main loop error: {err}"), } - Ok(()) + ret }) { Ok(_) => 0, Err(e) => { - log::error!("failed to run tun2proxy with error: {:?}", e); + log::error!("failed to run tun2proxy with error: {e:?}"); -4 } } @@ -140,7 +147,7 @@ pub async fn general_run_async( tun_mtu: u16, _packet_information: bool, shutdown_token: tokio_util::sync::CancellationToken, -) -> std::io::Result<()> { +) -> std::io::Result { let mut tun_config = tun::Configuration::default(); #[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))] @@ -189,9 +196,6 @@ pub async fn general_run_async( .bypass_ips(&args.bypass) .ipv6_default_route(args.ipv6_enabled); - #[allow(unused_mut, unused_assignments, unused_variables)] - let mut setup = true; - let device = tun::create_as_async(&tun_config)?; #[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))] @@ -203,16 +207,11 @@ pub async fn general_run_async( // TproxyState implements the Drop trait to restore network configuration, // so we need to assign it to a variable, even if it is not used. #[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))] - let mut _restore: Option = None; - - #[cfg(target_os = "linux")] - { - setup = args.setup; - } + let mut restore: Option = None; #[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))] - if setup { - _restore = Some(tproxy_config::tproxy_setup(&tproxy_args)?); + if args.setup { + restore = Some(tproxy_config::tproxy_setup(&tproxy_args).await?); } #[cfg(target_os = "linux")] @@ -239,8 +238,16 @@ pub async fn general_run_async( } } - let join_handle = tokio::spawn(crate::run(device, tun_mtu, args, shutdown_token)); - Ok(join_handle.await.map_err(std::io::Error::from)??) + let join_handle = tokio::spawn(crate::run(device, tun_mtu, args, shutdown_token.clone())); + + match join_handle.await? { + Ok(sessions) => { + #[cfg(any(target_os = "linux", target_os = "windows", target_os = "macos"))] + tproxy_config::tproxy_remove(restore).await?; + Ok(sessions) + } + Err(err) => Err(std::io::Error::from(err)), + } } /// # Safety diff --git a/src/http.rs b/src/http.rs index caf79e9..2b08ed1 100644 --- a/src/http.rs +++ b/src/http.rs @@ -142,7 +142,7 @@ impl HttpConnection { AuthenticationScheme::Basic => { let auth_b64 = base64easy::encode(credentials.to_string(), base64easy::EngineKind::Standard); self.server_outbuf - .extend(format!("{}: Basic {}\r\n", PROXY_AUTHORIZATION, auth_b64).as_bytes()); + .extend(format!("{PROXY_AUTHORIZATION}: Basic {auth_b64}\r\n").as_bytes()); } AuthenticationScheme::None => {} } diff --git a/src/lib.rs b/src/lib.rs index c62e08b..737a82d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,7 +7,7 @@ use crate::{ session_info::{IpProtocol, SessionInfo}, virtual_dns::VirtualDns, }; -use ipstack::stream::{IpStackStream, IpStackTcpStream, IpStackUdpStream}; +use ipstack::{IpStackStream, IpStackTcpStream, IpStackUdpStream}; use proxy_handler::{ProxyHandler, ProxyHandlerManager}; use socks::SocksProxyManager; pub use socks5_impl::protocol::UserKey; @@ -64,9 +64,6 @@ pub mod win_svc; const DNS_PORT: u16 = 53; -static TASK_COUNT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0); -use std::sync::atomic::Ordering::Relaxed; - #[allow(unused)] #[derive(Hash, Copy, Clone, Eq, PartialEq, Debug)] #[cfg_attr( @@ -154,11 +151,13 @@ async fn create_udp_stream(socket_queue: &Option>, peer: Socket /// * `mtu` - The MTU of the network device /// * `args` - The arguments to use /// * `shutdown_token` - The token to exit the server -pub async fn run(device: D, mtu: u16, args: Args, shutdown_token: CancellationToken) -> crate::Result<()> +/// # Returns +/// * The number of sessions while exiting +pub async fn run(device: D, mtu: u16, args: Args, shutdown_token: CancellationToken) -> crate::Result where D: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - log::info!("{} {} starting...", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")); + log::info!("{} {} starting...", env!("CARGO_PKG_NAME"), version_info!()); log::info!("Proxy {} server: {}", args.proxy.proxy_type, args.proxy.addr); let server_addr = args.proxy.addr; @@ -222,11 +221,11 @@ where let socket_queue = None; use socks5_impl::protocol::Version::{V4, V5}; - let mgr = match args.proxy.proxy_type { - ProxyType::Socks5 => Arc::new(SocksProxyManager::new(server_addr, V5, key)) as Arc, - ProxyType::Socks4 => Arc::new(SocksProxyManager::new(server_addr, V4, key)) as Arc, - ProxyType::Http => Arc::new(HttpManager::new(server_addr, key)) as Arc, - ProxyType::None => Arc::new(NoProxyManager::new()) as Arc, + let mgr: Arc = match args.proxy.proxy_type { + ProxyType::Socks5 => Arc::new(SocksProxyManager::new(server_addr, V5, key)), + ProxyType::Socks4 => Arc::new(SocksProxyManager::new(server_addr, V4, key)), + ProxyType::Http => Arc::new(HttpManager::new(server_addr, key)), + ProxyType::None => Arc::new(NoProxyManager::new()), }; let mut ipstack_config = ipstack::IpStackConfig::default(); @@ -238,7 +237,7 @@ where #[cfg(feature = "udpgw")] let udpgw_client = args.udpgw_server.map(|addr| { - log::info!("UDP Gateway enabled, server: {}", addr); + log::info!("UDP Gateway enabled, server: {addr}"); use std::time::Duration; let client = Arc::new(UdpGwClient::new( mtu, @@ -254,7 +253,11 @@ where client }); + let task_count = std::sync::Arc::new(std::sync::atomic::AtomicUsize::new(0)); + use std::sync::atomic::Ordering::Relaxed; + loop { + let task_count = task_count.clone(); let virtual_dns = virtual_dns.clone(); let ip_stack_stream = tokio::select! { _ = shutdown_token.cancelled() => { @@ -265,10 +268,10 @@ where ip_stack_stream? } }; - let max_sessions = args.max_sessions as u64; + let max_sessions = args.max_sessions; match ip_stack_stream { IpStackStream::Tcp(tcp) => { - if TASK_COUNT.load(Relaxed) > max_sessions { + if task_count.load(Relaxed) >= max_sessions { if args.exit_on_fatal_error { log::info!("Too many sessions that over {max_sessions}, exiting..."); break; @@ -276,7 +279,7 @@ where log::warn!("Too many sessions that over {max_sessions}, dropping new session"); continue; } - log::trace!("Session count {}", TASK_COUNT.fetch_add(1, Relaxed) + 1); + log::trace!("Session count {}", task_count.fetch_add(1, Relaxed).saturating_add(1)); let info = SessionInfo::new(tcp.local_addr(), tcp.peer_addr(), IpProtocol::Tcp); let domain_name = if let Some(virtual_dns) = &virtual_dns { let mut virtual_dns = virtual_dns.lock().await; @@ -289,13 +292,13 @@ where let socket_queue = socket_queue.clone(); tokio::spawn(async move { if let Err(err) = handle_tcp_session(tcp, proxy_handler, socket_queue).await { - log::error!("{} error \"{}\"", info, err); + log::error!("{info} error \"{err}\""); } - log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1); + log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1)); }); } IpStackStream::Udp(udp) => { - if TASK_COUNT.load(Relaxed) > max_sessions { + if task_count.load(Relaxed) >= max_sessions { if args.exit_on_fatal_error { log::info!("Too many sessions that over {max_sessions}, exiting..."); break; @@ -303,11 +306,11 @@ where log::warn!("Too many sessions that over {max_sessions}, dropping new session"); continue; } - log::trace!("Session count {}", TASK_COUNT.fetch_add(1, Relaxed) + 1); + log::trace!("Session count {}", task_count.fetch_add(1, Relaxed).saturating_add(1)); let mut info = SessionInfo::new(udp.local_addr(), udp.peer_addr(), IpProtocol::Udp); if info.dst.port() == DNS_PORT { if is_private_ip(info.dst.ip()) { - info.dst.set_ip(dns_addr); + info.dst.set_ip(dns_addr); // !!! Here we change the destination address to remote DNS server!!! } if args.dns == ArgDns::OverTcp { info.protocol = IpProtocol::Tcp; @@ -315,9 +318,9 @@ where let socket_queue = socket_queue.clone(); tokio::spawn(async move { if let Err(err) = handle_dns_over_tcp_session(udp, proxy_handler, socket_queue, ipv6_enabled).await { - log::error!("{} error \"{}\"", info, err); + log::error!("{info} error \"{err}\""); } - log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1); + log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1)); }); continue; } @@ -325,10 +328,10 @@ where tokio::spawn(async move { if let Some(virtual_dns) = virtual_dns { if let Err(err) = handle_virtual_dns_session(udp, virtual_dns).await { - log::error!("{} error \"{}\"", info, err); + log::error!("{info} error \"{err}\""); } } - log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1); + log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1)); }); continue; } @@ -357,9 +360,9 @@ where None => dst.into(), }; if let Err(e) = handle_udp_gateway_session(udp, udpgw, &dst_addr, proxy_handler, queue, ipv6_enabled).await { - log::info!("Ending {} with \"{}\"", info, e); + log::info!("Ending {info} with \"{e}\""); } - log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1); + log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1)); }); continue; } @@ -369,13 +372,13 @@ where tokio::spawn(async move { let ty = args.proxy.proxy_type; if let Err(err) = handle_udp_associate_session(udp, ty, proxy_handler, socket_queue, ipv6_enabled).await { - log::info!("Ending {} with \"{}\"", info, err); + log::info!("Ending {info} with \"{err}\""); } - log::trace!("Session count {}", TASK_COUNT.fetch_sub(1, Relaxed) - 1); + log::trace!("Session count {}", task_count.fetch_sub(1, Relaxed).saturating_sub(1)); }); } Err(e) => { - log::error!("Failed to create UDP connection: {}", e); + log::error!("Failed to create UDP connection: {e}"); } } } @@ -390,7 +393,7 @@ where } } } - Ok(()) + Ok(task_count.load(Relaxed)) } async fn handle_virtual_dns_session(mut udp: IpStackUdpStream, dns: Arc>) -> crate::Result<()> { @@ -399,7 +402,7 @@ async fn handle_virtual_dns_session(mut udp: IpStackUdpStream, dns: Arc { // indicate UDP read fails not an error. - log::debug!("Virtual DNS session error: {}", e); + log::debug!("Virtual DNS session error: {e}"); break; } Ok(len) => len, @@ -409,7 +412,7 @@ async fn handle_virtual_dns_session(mut udp: IpStackUdpStream, dns: Arc {}", qname, ip); + log::debug!("Virtual DNS query: {qname} -> {ip}"); } Ok(()) } @@ -428,7 +431,7 @@ where total += n as u64; let (tx, rx) = if is_tx { (n, 0) } else { (0, n) }; if let Err(e) = crate::traffic_status::traffic_status_update(tx, rx) { - log::debug!("Record traffic status error: {}", e); + log::debug!("Record traffic status error: {e}"); } writer.write_all(&buf[..n]).await?; } @@ -450,7 +453,7 @@ async fn handle_tcp_session( let mut server = create_tcp_stream(&socket_queue, server_addr).await?; - log::info!("Beginning {}", session_info); + log::info!("Beginning {session_info}"); if let Err(e) = handle_proxy_session(&mut server, proxy_handler).await { tcp_stack.shutdown().await?; @@ -464,19 +467,19 @@ async fn handle_tcp_session( async move { let r = copy_and_record_traffic(&mut t_rx, &mut s_tx, true).await; if let Err(err) = s_tx.shutdown().await { - log::trace!("{} s_tx shutdown error {}", session_info, err); + log::trace!("{session_info} s_tx shutdown error {err}"); } r }, async move { let r = copy_and_record_traffic(&mut s_rx, &mut t_tx, false).await; if let Err(err) = t_tx.shutdown().await { - log::trace!("{} t_tx shutdown error {}", session_info, err); + log::trace!("{session_info} t_tx shutdown error {err}"); } r }, ); - log::info!("Ending {} with {:?}", session_info, res); + log::info!("Ending {session_info} with {res:?}"); Ok(()) } @@ -506,7 +509,7 @@ async fn handle_udp_gateway_session( None => { let mut tcp_server_stream = create_tcp_stream(&socket_queue, proxy_server_addr).await?; if let Err(e) = handle_proxy_session(&mut tcp_server_stream, proxy_handler).await { - return Err(format!("udpgw connection error: {}", e).into()); + return Err(format!("udpgw connection error: {e}").into()); } break UdpGwClientStream::new(tcp_server_stream); } @@ -622,7 +625,7 @@ async fn handle_udp_associate_session( ) }; - log::info!("Beginning {}", session_info); + log::info!("Beginning {session_info}"); // `_server` is meaningful here, it must be alive all the time // to ensure that UDP transmission will not be interrupted accidentally. @@ -699,7 +702,7 @@ async fn handle_udp_associate_session( } } - log::info!("Ending {}", session_info); + log::info!("Ending {session_info}"); Ok(()) } @@ -718,7 +721,7 @@ async fn handle_dns_over_tcp_session( let mut server = create_tcp_stream(&socket_queue, server_addr).await?; - log::info!("Beginning {}", session_info); + log::info!("Beginning {session_info}"); let _ = handle_proxy_session(&mut server, proxy_handler).await?; @@ -771,7 +774,7 @@ async fn handle_dns_over_tcp_session( let name = dns::extract_domain_from_dns_message(&message)?; let ip = dns::extract_ipaddr_from_dns_message(&message); - log::trace!("DNS over TCP query result: {} -> {:?}", name, ip); + log::trace!("DNS over TCP query result: {name} -> {ip:?}"); if !ipv6_enabled { dns::remove_ipv6_entries(&mut message); @@ -791,7 +794,7 @@ async fn handle_dns_over_tcp_session( } } - log::info!("Ending {}", session_info); + log::info!("Ending {session_info}"); Ok(()) } diff --git a/src/session_info.rs b/src/session_info.rs index fc4e938..a0784a9 100644 --- a/src/session_info.rs +++ b/src/session_info.rs @@ -16,7 +16,7 @@ impl std::fmt::Display for IpProtocol { IpProtocol::Tcp => write!(f, "TCP"), IpProtocol::Udp => write!(f, "UDP"), IpProtocol::Icmp => write!(f, "ICMP"), - IpProtocol::Other(v) => write!(f, "Other(0x{:02X})", v), + IpProtocol::Other(v) => write!(f, "Other(0x{v:02X})"), } } } diff --git a/src/socket_transfer.rs b/src/socket_transfer.rs index e069b1d..4c81da7 100644 --- a/src/socket_transfer.rs +++ b/src/socket_transfer.rs @@ -30,17 +30,17 @@ enum Response { /// Reconstruct socket from raw `fd` pub fn reconstruct_socket(fd: RawFd) -> Result { - // Check if `fd` is valid - let fd_flags = fcntl::fcntl(fd, fcntl::F_GETFD)?; - // `fd` is confirmed to be valid so it should be closed let socket = unsafe { OwnedFd::from_raw_fd(fd) }; + // Check if `fd` is valid + let fd_flags = fcntl::fcntl(socket.as_fd(), fcntl::F_GETFD)?; + // Insert CLOEXEC flag to the `fd` to prevent further propagation across `execve(2)` calls let mut fd_flags = FdFlag::from_bits(fd_flags).ok_or(ErrorKind::Unsupported)?; if !fd_flags.contains(FdFlag::FD_CLOEXEC) { fd_flags.insert(FdFlag::FD_CLOEXEC); - fcntl::fcntl(fd, fcntl::F_SETFD(fd_flags))?; + fcntl::fcntl(socket.as_fd(), fcntl::F_SETFD(fd_flags))?; } Ok(socket) @@ -70,12 +70,12 @@ pub async fn create_transfer_socket_pair() -> std::io::Result<(UnixDatagram, Own let remote_fd: OwnedFd = remote.into_std().unwrap().into(); // Get `remote_fd` flags - let fd_flags = fcntl::fcntl(remote_fd.as_raw_fd(), fcntl::F_GETFD)?; + let fd_flags = fcntl::fcntl(remote_fd.as_fd(), fcntl::F_GETFD)?; // Remove CLOEXEC flag from the `remote_fd` to allow propagating across `execve(2)` let mut fd_flags = FdFlag::from_bits(fd_flags).ok_or(ErrorKind::Unsupported)?; fd_flags.remove(FdFlag::FD_CLOEXEC); - fcntl::fcntl(remote_fd.as_raw_fd(), fcntl::F_SETFD(fd_flags))?; + fcntl::fcntl(remote_fd.as_fd(), fcntl::F_SETFD(fd_flags))?; Ok((local, remote_fd)) } @@ -157,8 +157,7 @@ where let mut buf = [0_u8; REQUEST_BUFFER_SIZE]; let mut iov = [IoSliceMut::new(&mut buf[..])]; - let mut cmsg = Vec::with_capacity(cmsg_space::() * number as usize); - + let mut cmsg = vec![0; cmsg_space::() * number as usize]; let msg = recvmsg::<()>(socket.as_fd().as_raw_fd(), &mut iov, Some(&mut cmsg), MsgFlags::empty()); let msg = match msg { diff --git a/src/socks.rs b/src/socks.rs index 9c8ad2d..e2265ba 100644 --- a/src/socks.rs +++ b/src/socks.rs @@ -78,7 +78,7 @@ impl SocksProxyImpl { } } SocketAddr::V6(addr) => { - return Err(format!("SOCKS4 does not support IPv6: {}", addr).into()); + return Err(format!("SOCKS4 does not support IPv6: {addr}").into()); } } self.server_outbuf.extend(ip_vec); @@ -136,7 +136,7 @@ impl SocksProxyImpl { let response = handshake::Response::retrieve_from_stream(&mut self.server_inbuf.clone()); if let Err(e) = response { if e.kind() == std::io::ErrorKind::UnexpectedEof { - log::trace!("receive_server_hello_socks5 needs more data \"{}\"...", e); + log::trace!("receive_server_hello_socks5 needs more data \"{e}\"..."); return Ok(()); } else { return Err(e); @@ -181,7 +181,7 @@ impl SocksProxyImpl { let response = Response::retrieve_from_stream(&mut self.server_inbuf.clone()); if let Err(e) = response { if e.kind() == std::io::ErrorKind::UnexpectedEof { - log::trace!("receive_auth_data needs more data \"{}\"...", e); + log::trace!("receive_auth_data needs more data \"{e}\"..."); return Ok(()); } else { return Err(e); @@ -213,7 +213,7 @@ impl SocksProxyImpl { let response = protocol::Response::retrieve_from_stream(&mut self.server_inbuf.clone()); if let Err(e) = response { if e.kind() == std::io::ErrorKind::UnexpectedEof { - log::trace!("receive_connection_status needs more data \"{}\"...", e); + log::trace!("receive_connection_status needs more data \"{e}\"..."); return Ok(()); } else { return Err(e); diff --git a/src/traffic_status.rs b/src/traffic_status.rs index 1922401..9f32b7c 100644 --- a/src/traffic_status.rs +++ b/src/traffic_status.rs @@ -51,7 +51,7 @@ static TIME_STAMP: LazyLock> = LazyLock::new(|| Mutex: pub(crate) fn traffic_status_update(delta_tx: usize, delta_rx: usize) -> Result<()> { { let is_none_or_error = TRAFFIC_STATUS_CALLBACK.lock().map(|guard| guard.is_none()).unwrap_or_else(|e| { - log::error!("Failed to acquire lock: {}", e); + log::error!("Failed to acquire lock: {e}"); true }); if is_none_or_error { diff --git a/src/udpgw.rs b/src/udpgw.rs index 3670d64..24edaad 100644 --- a/src/udpgw.rs +++ b/src/udpgw.rs @@ -32,9 +32,9 @@ impl std::fmt::Display for UdpFlag { 0x01 => "KEEPALIVE", 0x20 => "ERR", 0x02 => "DATA", - n => return write!(f, "Unknown UdpFlag(0x{:02X})", n), + n => return write!(f, "Unknown UdpFlag(0x{n:02X})"), }; - write!(f, "{}", flag) + write!(f, "{flag}") } } @@ -332,7 +332,7 @@ impl std::fmt::Display for UdpGwResponse { UdpGwResponse::KeepAlive => write!(f, "KeepAlive"), UdpGwResponse::Error => write!(f, "Error"), UdpGwResponse::TcpClose => write!(f, "TcpClose"), - UdpGwResponse::Data(packet) => write!(f, "Data({})", packet), + UdpGwResponse::Data(packet) => write!(f, "Data({packet})"), } } } @@ -487,21 +487,21 @@ impl UdpGwClient { let keepalive_packet: Vec = Packet::build_keepalive_packet(sn).into(); tx += keepalive_packet.len(); if let Err(e) = stream_writer.write_all(&keepalive_packet).await { - log::warn!("stream {} {:?} send keepalive failed: {}", sn, local_addr, e); + log::warn!("stream {sn} {local_addr:?} send keepalive failed: {e}"); continue; } match UdpGwClient::recv_udpgw_packet(self.udp_mtu, self.udp_timeout, &mut stream_reader).await { Ok((len, UdpGwResponse::KeepAlive)) => { stream.update_activity(); self.store_server_connection_full(stream, stream_reader, stream_writer).await; - log::trace!("stream {sn} {:?} send keepalive and recieve it successfully", local_addr); + log::trace!("stream {sn} {local_addr:?} send keepalive and recieve it successfully"); rx += len; } Ok((len, v)) => { - log::debug!("stream {sn} {:?} keepalive unexpected response: {v}", local_addr); + log::debug!("stream {sn} {local_addr:?} keepalive unexpected response: {v}"); rx += len; } - Err(e) => log::debug!("stream {sn} {:?} keepalive no response, error \"{e}\"", local_addr), + Err(e) => log::debug!("stream {sn} {local_addr:?} keepalive no response, error \"{e}\""), } } crate::traffic_status::traffic_status_update(tx, rx)?; diff --git a/src/win_svc.rs b/src/win_svc.rs index a4090e3..2fed698 100644 --- a/src/win_svc.rs +++ b/src/win_svc.rs @@ -16,7 +16,7 @@ fn my_service_main(arguments: Vec) { // `service_dispatcher::start` from `main`. if let Err(_e) = run_service(arguments) { - log::error!("Error: {:?}", _e); + log::error!("Error: {_e:?}"); } } @@ -78,8 +78,16 @@ fn run_service(_arguments: Vec) -> Result<(), crate::BoxErro } unsafe { crate::tun2proxy_set_traffic_status_callback(1, Some(traffic_cb), std::ptr::null_mut()) }; - if let Err(err) = crate::general_run_async(args, tun::DEFAULT_MTU, false, shutdown_token).await { - log::error!("main loop error: {}", err); + let ret = crate::general_run_async(args.clone(), tun::DEFAULT_MTU, false, shutdown_token).await; + match &ret { + Ok(sessions) => { + if args.exit_on_fatal_error && *sessions >= args.max_sessions { + log::error!("Forced exit due to max sessions reached ({sessions}/{})", args.max_sessions); + std::process::exit(-1); + } + log::debug!("tun2proxy exited normally, current sessions: {sessions}"); + } + Err(err) => log::error!("main loop error: {err}"), } Ok::<(), crate::Error>(()) })?;