# gitzone dockerfile_service ## STAGE 1 // BUILD FROM code.foss.global/host.today/ht-docker-node:lts AS build # System build tools that the Rust dep tree needs beyond the base image: # - cmake : used by the `cmake` crate (transitive via ort_sys / a webrtc # sub-crate) to build a C/C++ library from source when a # prebuilt-binary download path doesn't apply. # - pkg-config : used by audiopus_sys and other *-sys crates to locate libs # on the native target (safe no-op if they vendor their own). # These are normally pre-installed on dev machines but not in ht-docker-node:lts. RUN apt-get update && apt-get install -y --no-install-recommends \ cmake \ pkg-config \ && rm -rf /var/lib/apt/lists/* # buildx sets TARGETARCH automatically for each platform it's building: # linux/amd64 -> TARGETARCH=amd64 # linux/arm64 -> TARGETARCH=arm64 # We use it to tell tsrust to build ONLY the current container's arch. This # overrides the `@git.zone/tsrust.targets` list in .smartconfig.json, which is # right for local dev / CI (where you want both binaries) but wrong for per- # platform Docker stages (each stage would then also try to cross-compile to # the OTHER arch — which fails in the arm64 stage because no reverse cross- # toolchain is installed). # # With --target set, tsrust builds a single target natively within whichever # platform this stage is running under (native on amd64, QEMU-emulated on arm64). ARG TARGETARCH COPY ./ /app WORKDIR /app RUN pnpm config set store-dir .pnpm-store RUN rm -rf node_modules && pnpm install # tsrust --target takes precedence over .smartconfig.json's targets array. # Writes dist_rust/proxy-engine_linux_amd64 or dist_rust/proxy-engine_linux_arm64. # The TS layer (ts/proxybridge.ts buildLocalPaths) picks the right one at runtime # via process.arch. RUN pnpm exec tsrust --target linux_${TARGETARCH} # Web bundle (esbuild — pure JS, uses the platform's native esbuild binary # installed by pnpm above, so no cross-bundling concerns). RUN pnpm run bundle # Drop pnpm store to keep the image smaller. node_modules stays because the # runtime entrypoint is tsx and siprouter has no separate dist_ts/ to run from. RUN rm -rf .pnpm-store ## STAGE 2 // PRODUCTION FROM code.foss.global/host.today/ht-docker-node:alpine-node AS production # gcompat + libstdc++ let the glibc-linked proxy-engine binary run on Alpine. RUN apk add --no-cache gcompat libstdc++ WORKDIR /app COPY --from=build /app /app ENV SIPROUTER_MODE=OCI_CONTAINER ENV NODE_ENV=production LABEL org.opencontainers.image.title="siprouter" \ org.opencontainers.image.description="SIP proxy with Rust data plane and WebRTC bridge" \ org.opencontainers.image.source="https://code.foss.global/serve.zone/siprouter" # 5070 SIP signaling (UDP+TCP) # 5061 SIP-TLS (optional, UDP+TCP) # 3060 Web UI / WebSocket (HTTP or HTTPS, auto-detected from .nogit/cert.pem) # 20000-20200/udp RTP media range (must match config.proxy.rtpPortRange) EXPOSE 5070/udp 5070/tcp 5061/udp 5061/tcp 3060/tcp 20000-20200/udp # exec replaces sh as PID 1 with tsx, so SIGINT/SIGTERM reach Node and # ts/sipproxy.ts' shutdown handler (which calls shutdownProxyEngine) runs cleanly. CMD ["sh", "-c", "exec ./node_modules/.bin/tsx ts/sipproxy.ts"]