From 7645830c7772a037205825a88147b3e1a1063c1c Mon Sep 17 00:00:00 2001 From: Jun Kurihara Date: Thu, 15 Feb 2024 21:29:49 +0900 Subject: [PATCH] deps --- Cargo.toml | 11 +- legacy-lib/Cargo.toml | 89 ---- legacy-lib/src/backend/load_balance.rs | 135 ------ legacy-lib/src/backend/load_balance_sticky.rs | 132 ------ legacy-lib/src/backend/mod.rs | 77 ---- legacy-lib/src/backend/sticky_cookie.rs | 208 --------- legacy-lib/src/backend/upstream.rs | 201 --------- legacy-lib/src/backend/upstream_opts.rs | 22 - legacy-lib/src/certs.rs | 91 ---- legacy-lib/src/constants.rs | 45 -- legacy-lib/src/error.rs | 86 ---- legacy-lib/src/globals.rs | 325 --------------- legacy-lib/src/handler/cache.rs | 393 ------------------ legacy-lib/src/handler/error.rs | 16 - legacy-lib/src/handler/forwarder.rs | 147 ------- legacy-lib/src/handler/handler_main.rs | 384 ----------------- legacy-lib/src/handler/mod.rs | 22 - legacy-lib/src/handler/utils_headers.rs | 276 ------------ legacy-lib/src/handler/utils_request.rs | 64 --- .../src/handler/utils_synth_response.rs | 35 -- legacy-lib/src/hyper_executor.rs | 45 -- legacy-lib/src/lib.rs | 112 ----- legacy-lib/src/log.rs | 98 ----- legacy-lib/src/proxy/crypto_service.rs | 276 ------------ legacy-lib/src/proxy/mod.rs | 42 -- legacy-lib/src/proxy/proxy_client_cert.rs | 47 --- legacy-lib/src/proxy/proxy_h3.rs | 186 --------- legacy-lib/src/proxy/proxy_main.rs | 150 ------- legacy-lib/src/proxy/proxy_quic_quinn.rs | 124 ------ legacy-lib/src/proxy/proxy_quic_s2n.rs | 135 ------ legacy-lib/src/proxy/proxy_tls.rs | 155 ------- legacy-lib/src/proxy/socket.rs | 46 -- legacy-lib/src/utils/bytes_name.rs | 123 ------ legacy-lib/src/utils/mod.rs | 5 - legacy-lib/src/utils/socket_addr.rs | 60 --- rpxy-bin/Cargo.toml | 19 +- rpxy-lib/Cargo.toml | 25 +- rpxy-lib/src/backend/upstream.rs | 13 +- submodules/h3 | 2 +- 39 files changed, 38 insertions(+), 4384 deletions(-) delete mode 100644 legacy-lib/Cargo.toml delete mode 100644 legacy-lib/src/backend/load_balance.rs delete mode 100644 legacy-lib/src/backend/load_balance_sticky.rs delete mode 100644 legacy-lib/src/backend/mod.rs delete mode 100644 legacy-lib/src/backend/sticky_cookie.rs delete mode 100644 legacy-lib/src/backend/upstream.rs delete mode 100644 legacy-lib/src/backend/upstream_opts.rs delete mode 100644 legacy-lib/src/certs.rs delete mode 100644 legacy-lib/src/constants.rs delete mode 100644 legacy-lib/src/error.rs delete mode 100644 legacy-lib/src/globals.rs delete mode 100644 legacy-lib/src/handler/cache.rs delete mode 100644 legacy-lib/src/handler/error.rs delete mode 100644 legacy-lib/src/handler/forwarder.rs delete mode 100644 legacy-lib/src/handler/handler_main.rs delete mode 100644 legacy-lib/src/handler/mod.rs delete mode 100644 legacy-lib/src/handler/utils_headers.rs delete mode 100644 legacy-lib/src/handler/utils_request.rs delete mode 100644 legacy-lib/src/handler/utils_synth_response.rs delete mode 100644 legacy-lib/src/hyper_executor.rs delete mode 100644 legacy-lib/src/lib.rs delete mode 100644 legacy-lib/src/log.rs delete mode 100644 legacy-lib/src/proxy/crypto_service.rs delete mode 100644 legacy-lib/src/proxy/mod.rs delete mode 100644 legacy-lib/src/proxy/proxy_client_cert.rs delete mode 100644 legacy-lib/src/proxy/proxy_h3.rs delete mode 100644 legacy-lib/src/proxy/proxy_main.rs delete mode 100644 legacy-lib/src/proxy/proxy_quic_quinn.rs delete mode 100644 legacy-lib/src/proxy/proxy_quic_s2n.rs delete mode 100644 legacy-lib/src/proxy/proxy_tls.rs delete mode 100644 legacy-lib/src/proxy/socket.rs delete mode 100644 legacy-lib/src/utils/bytes_name.rs delete mode 100644 legacy-lib/src/utils/mod.rs delete mode 100644 legacy-lib/src/utils/socket_addr.rs diff --git a/Cargo.toml b/Cargo.toml index c512b187..50e32067 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,14 @@ -[workspace] +[workspace.package] +version = "0.7.0-alpha.5" +authors = ["Jun Kurihara"] +homepage = "https://github.com/junkurihara/rust-rpxy" +repository = "https://github.com/junkurihara/rust-rpxy" +license = "MIT" +readme = "./README.md" +edition = "2021" +publish = false +[workspace] members = ["rpxy-bin", "rpxy-lib"] exclude = ["submodules"] resolver = "2" diff --git a/legacy-lib/Cargo.toml b/legacy-lib/Cargo.toml deleted file mode 100644 index 00f1edb3..00000000 --- a/legacy-lib/Cargo.toml +++ /dev/null @@ -1,89 +0,0 @@ -[package] -name = "rpxy-lib-legacy" -version = "0.6.2" -authors = ["Jun Kurihara"] -homepage = "https://github.com/junkurihara/rust-rpxy" -repository = "https://github.com/junkurihara/rust-rpxy" -license = "MIT" -readme = "../README.md" -edition = "2021" -publish = false - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[features] -default = ["http3-quinn", "sticky-cookie", "cache"] -http3-quinn = ["quinn", "h3", "h3-quinn", "socket2"] -http3-s2n = ["h3", "s2n-quic", "s2n-quic-rustls", "s2n-quic-h3"] -sticky-cookie = ["base64", "sha2", "chrono"] -cache = ["http-cache-semantics", "lru"] -native-roots = ["hyper-rustls/native-tokio"] - -[dependencies] -rand = "0.8.5" -rustc-hash = "1.1.0" -bytes = "1.5.0" -derive_builder = "0.12.0" -futures = { version = "0.3.29", features = ["alloc", "async-await"] } -tokio = { version = "1.34.0", default-features = false, features = [ - "net", - "rt-multi-thread", - "time", - "sync", - "macros", - "fs", -] } -async-trait = "0.1.74" -hot_reload = "0.1.4" # reloading certs - -# Error handling -anyhow = "1.0.75" -thiserror = "1.0.50" - -# http and tls -http = "1.0.0" -http-body-util = "0.1.0" -hyper = { version = "1.0.1", default-features = false } -hyper-util = { version = "0.1.1", features = ["full"] } -hyper-rustls = { version = "0.24.2", default-features = false, features = [ - "tokio-runtime", - "webpki-tokio", - "http1", - "http2", -] } -tokio-rustls = { version = "0.24.1", features = ["early-data"] } -rustls = { version = "0.21.9", default-features = false } -webpki = "0.22.4" -x509-parser = "0.15.1" - -# logging -tracing = { version = "0.1.40" } - -# http/3 -quinn = { version = "0.10.2", optional = true } -h3 = { path = "../submodules/h3/h3/", optional = true } -h3-quinn = { path = "../submodules/h3/h3-quinn/", optional = true } -s2n-quic = { version = "1.31.0", default-features = false, features = [ - "provider-tls-rustls", -], optional = true } -s2n-quic-h3 = { path = "../submodules/s2n-quic-h3/", optional = true } -s2n-quic-rustls = { version = "0.31.0", optional = true } -# for UDP socket wit SO_REUSEADDR when h3 with quinn -socket2 = { version = "0.5.5", features = ["all"], optional = true } - -# cache -http-cache-semantics = { path = "../submodules/rusty-http-cache-semantics/", optional = true } -lru = { version = "0.12.0", optional = true } - -# cookie handling for sticky cookie -chrono = { version = "0.4.31", default-features = false, features = [ - "unstable-locales", - "alloc", - "clock", -], optional = true } -base64 = { version = "0.21.5", optional = true } -sha2 = { version = "0.10.8", default-features = false, optional = true } - - -[dev-dependencies] -# http and tls diff --git a/legacy-lib/src/backend/load_balance.rs b/legacy-lib/src/backend/load_balance.rs deleted file mode 100644 index 5d93f0a8..00000000 --- a/legacy-lib/src/backend/load_balance.rs +++ /dev/null @@ -1,135 +0,0 @@ -#[cfg(feature = "sticky-cookie")] -pub use super::{ - load_balance_sticky::{LbStickyRoundRobin, LbStickyRoundRobinBuilder}, - sticky_cookie::StickyCookie, -}; -use derive_builder::Builder; -use rand::Rng; -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; - -/// Constants to specify a load balance option -pub(super) mod load_balance_options { - pub const FIX_TO_FIRST: &str = "none"; - pub const ROUND_ROBIN: &str = "round_robin"; - pub const RANDOM: &str = "random"; - #[cfg(feature = "sticky-cookie")] - pub const STICKY_ROUND_ROBIN: &str = "sticky"; -} - -#[derive(Debug, Clone)] -/// Pointer to upstream serving the incoming request. -/// If 'sticky cookie'-based LB is enabled and cookie must be updated/created, the new cookie is also given. -pub(super) struct PointerToUpstream { - pub ptr: usize, - pub context_lb: Option, -} -/// Trait for LB -pub(super) trait LbWithPointer { - fn get_ptr(&self, req_info: Option<&LbContext>) -> PointerToUpstream; -} - -#[derive(Debug, Clone, Builder)] -/// Round Robin LB object as a pointer to the current serving upstream destination -pub struct LbRoundRobin { - #[builder(default)] - /// Pointer to the index of the last served upstream destination - ptr: Arc, - #[builder(setter(custom), default)] - /// Number of upstream destinations - num_upstreams: usize, -} -impl LbRoundRobinBuilder { - pub fn num_upstreams(&mut self, v: &usize) -> &mut Self { - self.num_upstreams = Some(*v); - self - } -} -impl LbWithPointer for LbRoundRobin { - /// Increment the count of upstream served up to the max value - fn get_ptr(&self, _info: Option<&LbContext>) -> PointerToUpstream { - // Get a current count of upstream served - let current_ptr = self.ptr.load(Ordering::Relaxed); - - let ptr = if current_ptr < self.num_upstreams - 1 { - self.ptr.fetch_add(1, Ordering::Relaxed) - } else { - // Clear the counter - self.ptr.fetch_and(0, Ordering::Relaxed) - }; - PointerToUpstream { ptr, context_lb: None } - } -} - -#[derive(Debug, Clone, Builder)] -/// Random LB object to keep the object of random pools -pub struct LbRandom { - #[builder(setter(custom), default)] - /// Number of upstream destinations - num_upstreams: usize, -} -impl LbRandomBuilder { - pub fn num_upstreams(&mut self, v: &usize) -> &mut Self { - self.num_upstreams = Some(*v); - self - } -} -impl LbWithPointer for LbRandom { - /// Returns the random index within the range - fn get_ptr(&self, _info: Option<&LbContext>) -> PointerToUpstream { - let mut rng = rand::thread_rng(); - let ptr = rng.gen_range(0..self.num_upstreams); - PointerToUpstream { ptr, context_lb: None } - } -} - -#[derive(Debug, Clone)] -/// Load Balancing Option -pub enum LoadBalance { - /// Fix to the first upstream. Use if only one upstream destination is specified - FixToFirst, - /// Randomly chose one upstream server - Random(LbRandom), - /// Simple round robin without session persistance - RoundRobin(LbRoundRobin), - #[cfg(feature = "sticky-cookie")] - /// Round robin with session persistance using cookie - StickyRoundRobin(LbStickyRoundRobin), -} -impl Default for LoadBalance { - fn default() -> Self { - Self::FixToFirst - } -} - -impl LoadBalance { - /// Get the index of the upstream serving the incoming request - pub(super) fn get_context(&self, _context_to_lb: &Option) -> PointerToUpstream { - match self { - LoadBalance::FixToFirst => PointerToUpstream { - ptr: 0usize, - context_lb: None, - }, - LoadBalance::RoundRobin(ptr) => ptr.get_ptr(None), - LoadBalance::Random(ptr) => ptr.get_ptr(None), - #[cfg(feature = "sticky-cookie")] - LoadBalance::StickyRoundRobin(ptr) => { - // Generate new context if sticky round robin is enabled. - ptr.get_ptr(_context_to_lb.as_ref()) - } - } - } -} - -#[derive(Debug, Clone)] -/// Struct to handle the sticky cookie string, -/// - passed from Rp module (http handler) to LB module, manipulated from req, only StickyCookieValue exists. -/// - passed from LB module to Rp module (http handler), will be inserted into res, StickyCookieValue and Info exist. -pub struct LbContext { - #[cfg(feature = "sticky-cookie")] - pub sticky_cookie: StickyCookie, - #[cfg(not(feature = "sticky-cookie"))] - pub sticky_cookie: (), -} diff --git a/legacy-lib/src/backend/load_balance_sticky.rs b/legacy-lib/src/backend/load_balance_sticky.rs deleted file mode 100644 index 32f4fe58..00000000 --- a/legacy-lib/src/backend/load_balance_sticky.rs +++ /dev/null @@ -1,132 +0,0 @@ -use super::{ - load_balance::{LbContext, LbWithPointer, PointerToUpstream}, - sticky_cookie::StickyCookieConfig, - Upstream, -}; -use crate::{constants::STICKY_COOKIE_NAME, log::*}; -use derive_builder::Builder; -use rustc_hash::FxHashMap as HashMap; -use std::{ - borrow::Cow, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, -}; - -#[derive(Debug, Clone, Builder)] -/// Round Robin LB object in the sticky cookie manner -pub struct LbStickyRoundRobin { - #[builder(default)] - /// Pointer to the index of the last served upstream destination - ptr: Arc, - #[builder(setter(custom), default)] - /// Number of upstream destinations - num_upstreams: usize, - #[builder(setter(custom))] - /// Information to build the cookie to stick clients to specific backends - pub sticky_config: StickyCookieConfig, - #[builder(setter(custom))] - /// Hashmaps: - /// - Hashmap that maps server indices to server id (string) - /// - Hashmap that maps server ids (string) to server indices, for fast reverse lookup - upstream_maps: UpstreamMap, -} -#[derive(Debug, Clone)] -pub struct UpstreamMap { - /// Hashmap that maps server indices to server id (string) - upstream_index_map: Vec, - /// Hashmap that maps server ids (string) to server indices, for fast reverse lookup - upstream_id_map: HashMap, -} -impl LbStickyRoundRobinBuilder { - pub fn num_upstreams(&mut self, v: &usize) -> &mut Self { - self.num_upstreams = Some(*v); - self - } - pub fn sticky_config(&mut self, server_name: &str, path_opt: &Option) -> &mut Self { - self.sticky_config = Some(StickyCookieConfig { - name: STICKY_COOKIE_NAME.to_string(), // TODO: config等で変更できるように - domain: server_name.to_ascii_lowercase(), - path: if let Some(v) = path_opt { - v.to_ascii_lowercase() - } else { - "/".to_string() - }, - duration: 300, // TODO: config等で変更できるように - }); - self - } - pub fn upstream_maps(&mut self, upstream_vec: &[Upstream]) -> &mut Self { - let upstream_index_map: Vec = upstream_vec - .iter() - .enumerate() - .map(|(i, v)| v.calculate_id_with_index(i)) - .collect(); - let mut upstream_id_map = HashMap::default(); - for (i, v) in upstream_index_map.iter().enumerate() { - upstream_id_map.insert(v.to_string(), i); - } - self.upstream_maps = Some(UpstreamMap { - upstream_index_map, - upstream_id_map, - }); - self - } -} -impl<'a> LbStickyRoundRobin { - fn simple_increment_ptr(&self) -> usize { - // Get a current count of upstream served - let current_ptr = self.ptr.load(Ordering::Relaxed); - - if current_ptr < self.num_upstreams - 1 { - self.ptr.fetch_add(1, Ordering::Relaxed) - } else { - // Clear the counter - self.ptr.fetch_and(0, Ordering::Relaxed) - } - } - /// This is always called only internally. So 'unwrap()' is executed. - fn get_server_id_from_index(&self, index: usize) -> String { - self.upstream_maps.upstream_index_map.get(index).unwrap().to_owned() - } - /// This function takes value passed from outside. So 'result' is used. - fn get_server_index_from_id(&self, id: impl Into>) -> Option { - let id_str = id.into().to_string(); - self.upstream_maps.upstream_id_map.get(&id_str).map(|v| v.to_owned()) - } -} -impl LbWithPointer for LbStickyRoundRobin { - fn get_ptr(&self, req_info: Option<&LbContext>) -> PointerToUpstream { - // If given context is None or invalid (not contained), get_ptr() is invoked to increment the pointer. - // Otherwise, get the server index indicated by the server_id inside the cookie - let ptr = match req_info { - None => { - debug!("No sticky cookie"); - self.simple_increment_ptr() - } - Some(context) => { - let server_id = &context.sticky_cookie.value.value; - if let Some(server_index) = self.get_server_index_from_id(server_id) { - debug!("Valid sticky cookie: id={}, index={}", server_id, server_index); - server_index - } else { - debug!("Invalid sticky cookie: id={}", server_id); - self.simple_increment_ptr() - } - } - }; - - // Get the server id from the ptr. - // TODO: This should be simplified and optimized if ptr is not changed (id value exists in cookie). - let upstream_id = self.get_server_id_from_index(ptr); - let new_cookie = self.sticky_config.build_sticky_cookie(upstream_id).unwrap(); - let new_context = Some(LbContext { - sticky_cookie: new_cookie, - }); - PointerToUpstream { - ptr, - context_lb: new_context, - } - } -} diff --git a/legacy-lib/src/backend/mod.rs b/legacy-lib/src/backend/mod.rs deleted file mode 100644 index 73c44666..00000000 --- a/legacy-lib/src/backend/mod.rs +++ /dev/null @@ -1,77 +0,0 @@ -mod load_balance; -#[cfg(feature = "sticky-cookie")] -mod load_balance_sticky; -#[cfg(feature = "sticky-cookie")] -mod sticky_cookie; -mod upstream; -mod upstream_opts; - -#[cfg(feature = "sticky-cookie")] -pub use self::sticky_cookie::{StickyCookie, StickyCookieValue}; -pub use self::{ - load_balance::{LbContext, LoadBalance}, - upstream::{ReverseProxy, Upstream, UpstreamGroup, UpstreamGroupBuilder}, - upstream_opts::UpstreamOption, -}; -use crate::{ - certs::CryptoSource, - utils::{BytesName, PathNameBytesExp, ServerNameBytesExp}, -}; -use derive_builder::Builder; -use rustc_hash::FxHashMap as HashMap; -use std::borrow::Cow; - -/// Struct serving information to route incoming connections, like server name to be handled and tls certs/keys settings. -#[derive(Builder)] -pub struct Backend -where - T: CryptoSource, -{ - #[builder(setter(into))] - /// backend application name, e.g., app1 - pub app_name: String, - #[builder(setter(custom))] - /// server name, e.g., example.com, in String ascii lower case - pub server_name: String, - /// struct of reverse proxy serving incoming request - pub reverse_proxy: ReverseProxy, - - /// tls settings: https redirection with 30x - #[builder(default)] - pub https_redirection: Option, - - /// TLS settings: source meta for server cert, key, client ca cert - #[builder(default)] - pub crypto_source: Option, -} -impl<'a, T> BackendBuilder -where - T: CryptoSource, -{ - pub fn server_name(&mut self, server_name: impl Into>) -> &mut Self { - self.server_name = Some(server_name.into().to_ascii_lowercase()); - self - } -} - -/// HashMap and some meta information for multiple Backend structs. -pub struct Backends -where - T: CryptoSource, -{ - pub apps: HashMap>, // hyper::uriで抜いたhostで引っ掛ける - pub default_server_name_bytes: Option, // for plaintext http -} - -impl Backends -where - T: CryptoSource, -{ - #[allow(clippy::new_without_default)] - pub fn new() -> Self { - Backends { - apps: HashMap::>::default(), - default_server_name_bytes: None, - } - } -} diff --git a/legacy-lib/src/backend/sticky_cookie.rs b/legacy-lib/src/backend/sticky_cookie.rs deleted file mode 100644 index 998426bf..00000000 --- a/legacy-lib/src/backend/sticky_cookie.rs +++ /dev/null @@ -1,208 +0,0 @@ -use std::borrow::Cow; - -use crate::error::*; -use chrono::{TimeZone, Utc}; -use derive_builder::Builder; - -#[derive(Debug, Clone, Builder)] -/// Cookie value only, used for COOKIE in req -pub struct StickyCookieValue { - #[builder(setter(custom))] - /// Field name indicating sticky cookie - pub name: String, - #[builder(setter(custom))] - /// Upstream server_id - pub value: String, -} -impl<'a> StickyCookieValueBuilder { - pub fn name(&mut self, v: impl Into>) -> &mut Self { - self.name = Some(v.into().to_ascii_lowercase()); - self - } - pub fn value(&mut self, v: impl Into>) -> &mut Self { - self.value = Some(v.into().to_string()); - self - } -} -impl StickyCookieValue { - pub fn try_from(value: &str, expected_name: &str) -> Result { - if !value.starts_with(expected_name) { - return Err(RpxyError::LoadBalance( - "Failed to cookie conversion from string".to_string(), - )); - }; - let kv = value.split('=').map(|v| v.trim()).collect::>(); - if kv.len() != 2 { - return Err(RpxyError::LoadBalance("Invalid cookie structure".to_string())); - }; - if kv[1].is_empty() { - return Err(RpxyError::LoadBalance("No sticky cookie value".to_string())); - } - Ok(StickyCookieValue { - name: expected_name.to_string(), - value: kv[1].to_string(), - }) - } -} - -#[derive(Debug, Clone, Builder)] -/// Struct describing sticky cookie meta information used for SET-COOKIE in res -pub struct StickyCookieInfo { - #[builder(setter(custom))] - /// Unix time - pub expires: i64, - - #[builder(setter(custom))] - /// Domain - pub domain: String, - - #[builder(setter(custom))] - /// Path - pub path: String, -} -impl<'a> StickyCookieInfoBuilder { - pub fn domain(&mut self, v: impl Into>) -> &mut Self { - self.domain = Some(v.into().to_ascii_lowercase()); - self - } - pub fn path(&mut self, v: impl Into>) -> &mut Self { - self.path = Some(v.into().to_ascii_lowercase()); - self - } - pub fn expires(&mut self, duration_secs: i64) -> &mut Self { - let current = Utc::now().timestamp(); - self.expires = Some(current + duration_secs); - self - } -} - -#[derive(Debug, Clone, Builder)] -/// Struct describing sticky cookie -pub struct StickyCookie { - #[builder(setter(custom))] - /// Upstream server_id - pub value: StickyCookieValue, - #[builder(setter(custom), default)] - /// Upstream server_id - pub info: Option, -} - -impl<'a> StickyCookieBuilder { - pub fn value(&mut self, n: impl Into>, v: impl Into>) -> &mut Self { - self.value = Some(StickyCookieValueBuilder::default().name(n).value(v).build().unwrap()); - self - } - pub fn info( - &mut self, - domain: impl Into>, - path: impl Into>, - duration_secs: i64, - ) -> &mut Self { - let info = StickyCookieInfoBuilder::default() - .domain(domain) - .path(path) - .expires(duration_secs) - .build() - .unwrap(); - self.info = Some(Some(info)); - self - } -} - -impl TryInto for StickyCookie { - type Error = RpxyError; - - fn try_into(self) -> Result { - if self.info.is_none() { - return Err(RpxyError::LoadBalance( - "Failed to cookie conversion into string: no meta information".to_string(), - )); - } - let info = self.info.unwrap(); - let chrono::LocalResult::Single(expires_timestamp) = Utc.timestamp_opt(info.expires, 0) else { - return Err(RpxyError::LoadBalance("Failed to cookie conversion into string".to_string())); - }; - let exp_str = expires_timestamp.format("%a, %d-%b-%Y %T GMT").to_string(); - let max_age = info.expires - Utc::now().timestamp(); - - Ok(format!( - "{}={}; expires={}; Max-Age={}; path={}; domain={}", - self.value.name, self.value.value, exp_str, max_age, info.path, info.domain - )) - } -} - -#[derive(Debug, Clone)] -/// Configuration to serve incoming requests in the manner of "sticky cookie". -/// Including a dictionary to map Ids included in cookie and upstream destinations, -/// and expiration of cookie. -/// "domain" and "path" in the cookie will be the same as the reverse proxy options. -pub struct StickyCookieConfig { - pub name: String, - pub domain: String, - pub path: String, - pub duration: i64, -} -impl<'a> StickyCookieConfig { - pub fn build_sticky_cookie(&self, v: impl Into>) -> Result { - StickyCookieBuilder::default() - .value(self.name.clone(), v) - .info(&self.domain, &self.path, self.duration) - .build() - .map_err(|_| RpxyError::LoadBalance("Failed to build sticky cookie from config".to_string())) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::constants::STICKY_COOKIE_NAME; - - #[test] - fn config_works() { - let config = StickyCookieConfig { - name: STICKY_COOKIE_NAME.to_string(), - domain: "example.com".to_string(), - path: "/path".to_string(), - duration: 100, - }; - let expires_unix = Utc::now().timestamp() + 100; - let sc_string: Result = config.build_sticky_cookie("test_value").unwrap().try_into(); - let expires_date_string = Utc - .timestamp_opt(expires_unix, 0) - .unwrap() - .format("%a, %d-%b-%Y %T GMT") - .to_string(); - assert_eq!( - sc_string.unwrap(), - format!( - "{}=test_value; expires={}; Max-Age={}; path=/path; domain=example.com", - STICKY_COOKIE_NAME, expires_date_string, 100 - ) - ); - } - #[test] - fn to_string_works() { - let sc = StickyCookie { - value: StickyCookieValue { - name: STICKY_COOKIE_NAME.to_string(), - value: "test_value".to_string(), - }, - info: Some(StickyCookieInfo { - expires: 1686221173i64, - domain: "example.com".to_string(), - path: "/path".to_string(), - }), - }; - let sc_string: Result = sc.try_into(); - let max_age = 1686221173i64 - Utc::now().timestamp(); - assert!(sc_string.is_ok()); - assert_eq!( - sc_string.unwrap(), - format!( - "{}=test_value; expires=Thu, 08-Jun-2023 10:46:13 GMT; Max-Age={}; path=/path; domain=example.com", - STICKY_COOKIE_NAME, max_age - ) - ); - } -} diff --git a/legacy-lib/src/backend/upstream.rs b/legacy-lib/src/backend/upstream.rs deleted file mode 100644 index 2bfd2d68..00000000 --- a/legacy-lib/src/backend/upstream.rs +++ /dev/null @@ -1,201 +0,0 @@ -#[cfg(feature = "sticky-cookie")] -use super::load_balance::LbStickyRoundRobinBuilder; -use super::load_balance::{load_balance_options as lb_opts, LbRandomBuilder, LbRoundRobinBuilder, LoadBalance}; -use super::{BytesName, LbContext, PathNameBytesExp, UpstreamOption}; -use crate::log::*; -#[cfg(feature = "sticky-cookie")] -use base64::{engine::general_purpose, Engine as _}; -use derive_builder::Builder; -use rustc_hash::{FxHashMap as HashMap, FxHashSet as HashSet}; -#[cfg(feature = "sticky-cookie")] -use sha2::{Digest, Sha256}; -use std::borrow::Cow; -#[derive(Debug, Clone)] -pub struct ReverseProxy { - pub upstream: HashMap, // TODO: HashMapでいいのかは疑問。max_by_keyでlongest prefix matchしてるのも無駄っぽいが。。。 -} - -impl ReverseProxy { - /// Get an appropriate upstream destination for given path string. - pub fn get<'a>(&self, path_str: impl Into>) -> Option<&UpstreamGroup> { - // trie使ってlongest prefix match させてもいいけどルート記述は少ないと思われるので、 - // コスト的にこの程度で十分 - let path_bytes = &path_str.to_path_name_vec(); - - let matched_upstream = self - .upstream - .iter() - .filter(|(route_bytes, _)| { - match path_bytes.starts_with(route_bytes) { - true => { - route_bytes.len() == 1 // route = '/', i.e., default - || match path_bytes.get(route_bytes.len()) { - None => true, // exact case - Some(p) => p == &b'/', // sub-path case - } - } - _ => false, - } - }) - .max_by_key(|(route_bytes, _)| route_bytes.len()); - if let Some((_path, u)) = matched_upstream { - debug!( - "Found upstream: {:?}", - String::from_utf8(_path.0.clone()).unwrap_or_else(|_| "".to_string()) - ); - Some(u) - } else { - None - } - } -} - -#[derive(Debug, Clone)] -/// Upstream struct just containing uri without path -pub struct Upstream { - /// Base uri without specific path - pub uri: hyper::Uri, -} -impl Upstream { - #[cfg(feature = "sticky-cookie")] - /// Hashing uri with index to avoid collision - pub fn calculate_id_with_index(&self, index: usize) -> String { - let mut hasher = Sha256::new(); - let uri_string = format!("{}&index={}", self.uri.clone(), index); - hasher.update(uri_string.as_bytes()); - let digest = hasher.finalize(); - general_purpose::URL_SAFE_NO_PAD.encode(digest) - } -} -#[derive(Debug, Clone, Builder)] -/// Struct serving multiple upstream servers for, e.g., load balancing. -pub struct UpstreamGroup { - #[builder(setter(custom))] - /// Upstream server(s) - pub upstream: Vec, - #[builder(setter(custom), default)] - /// Path like "/path" in [[PathNameBytesExp]] associated with the upstream server(s) - pub path: PathNameBytesExp, - #[builder(setter(custom), default)] - /// Path in [[PathNameBytesExp]] that will be used to replace the "path" part of incoming url - pub replace_path: Option, - - #[builder(setter(custom), default)] - /// Load balancing option - pub lb: LoadBalance, - #[builder(setter(custom), default)] - /// Activated upstream options defined in [[UpstreamOption]] - pub opts: HashSet, -} - -impl UpstreamGroupBuilder { - pub fn upstream(&mut self, upstream_vec: &[Upstream]) -> &mut Self { - self.upstream = Some(upstream_vec.to_vec()); - self - } - pub fn path(&mut self, v: &Option) -> &mut Self { - let path = match v { - Some(p) => p.to_path_name_vec(), - None => "/".to_path_name_vec(), - }; - self.path = Some(path); - self - } - pub fn replace_path(&mut self, v: &Option) -> &mut Self { - self.replace_path = Some( - v.to_owned() - .as_ref() - .map_or_else(|| None, |v| Some(v.to_path_name_vec())), - ); - self - } - pub fn lb( - &mut self, - v: &Option, - // upstream_num: &usize, - upstream_vec: &Vec, - _server_name: &str, - _path_opt: &Option, - ) -> &mut Self { - let upstream_num = &upstream_vec.len(); - let lb = if let Some(x) = v { - match x.as_str() { - lb_opts::FIX_TO_FIRST => LoadBalance::FixToFirst, - lb_opts::RANDOM => LoadBalance::Random(LbRandomBuilder::default().num_upstreams(upstream_num).build().unwrap()), - lb_opts::ROUND_ROBIN => LoadBalance::RoundRobin( - LbRoundRobinBuilder::default() - .num_upstreams(upstream_num) - .build() - .unwrap(), - ), - #[cfg(feature = "sticky-cookie")] - lb_opts::STICKY_ROUND_ROBIN => LoadBalance::StickyRoundRobin( - LbStickyRoundRobinBuilder::default() - .num_upstreams(upstream_num) - .sticky_config(_server_name, _path_opt) - .upstream_maps(upstream_vec) // TODO: - .build() - .unwrap(), - ), - _ => { - error!("Specified load balancing option is invalid."); - LoadBalance::default() - } - } - } else { - LoadBalance::default() - }; - self.lb = Some(lb); - self - } - pub fn opts(&mut self, v: &Option>) -> &mut Self { - let opts = if let Some(opts) = v { - opts - .iter() - .filter_map(|str| UpstreamOption::try_from(str.as_str()).ok()) - .collect::>() - } else { - Default::default() - }; - self.opts = Some(opts); - self - } -} - -impl UpstreamGroup { - /// Get an enabled option of load balancing [[LoadBalance]] - pub fn get(&self, context_to_lb: &Option) -> (Option<&Upstream>, Option) { - let pointer_to_upstream = self.lb.get_context(context_to_lb); - debug!("Upstream of index {} is chosen.", pointer_to_upstream.ptr); - debug!("Context to LB (Cookie in Req): {:?}", context_to_lb); - debug!( - "Context from LB (Set-Cookie in Res): {:?}", - pointer_to_upstream.context_lb - ); - ( - self.upstream.get(pointer_to_upstream.ptr), - pointer_to_upstream.context_lb, - ) - } -} - -#[cfg(test)] -mod test { - #[allow(unused)] - use super::*; - - #[cfg(feature = "sticky-cookie")] - #[test] - fn calc_id_works() { - let uri = "https://www.rust-lang.org".parse::().unwrap(); - let upstream = Upstream { uri }; - assert_eq!( - "eGsjoPbactQ1eUJjafYjPT3ekYZQkaqJnHdA_FMSkgM", - upstream.calculate_id_with_index(0) - ); - assert_eq!( - "tNVXFJ9eNCT2mFgKbYq35XgH5q93QZtfU8piUiiDxVA", - upstream.calculate_id_with_index(1) - ); - } -} diff --git a/legacy-lib/src/backend/upstream_opts.rs b/legacy-lib/src/backend/upstream_opts.rs deleted file mode 100644 index a96bb58c..00000000 --- a/legacy-lib/src/backend/upstream_opts.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::error::*; - -#[derive(Debug, Clone, Hash, Eq, PartialEq)] -pub enum UpstreamOption { - OverrideHost, - UpgradeInsecureRequests, - ForceHttp11Upstream, - ForceHttp2Upstream, - // TODO: Adds more options for heder override -} -impl TryFrom<&str> for UpstreamOption { - type Error = RpxyError; - fn try_from(val: &str) -> Result { - match val { - "override_host" => Ok(Self::OverrideHost), - "upgrade_insecure_requests" => Ok(Self::UpgradeInsecureRequests), - "force_http11_upstream" => Ok(Self::ForceHttp11Upstream), - "force_http2_upstream" => Ok(Self::ForceHttp2Upstream), - _ => Err(RpxyError::Other(anyhow!("Unsupported header option"))), - } - } -} diff --git a/legacy-lib/src/certs.rs b/legacy-lib/src/certs.rs deleted file mode 100644 index c9cfafd5..00000000 --- a/legacy-lib/src/certs.rs +++ /dev/null @@ -1,91 +0,0 @@ -use async_trait::async_trait; -use rustc_hash::FxHashSet as HashSet; -use rustls::{ - sign::{any_supported_type, CertifiedKey}, - Certificate, OwnedTrustAnchor, PrivateKey, -}; -use std::io; -use x509_parser::prelude::*; - -#[async_trait] -// Trait to read certs and keys anywhere from KVS, file, sqlite, etc. -pub trait CryptoSource { - type Error; - - /// read crypto materials from source - async fn read(&self) -> Result; - - /// Returns true when mutual tls is enabled - fn is_mutual_tls(&self) -> bool; -} - -/// Certificates and private keys in rustls loaded from files -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct CertsAndKeys { - pub certs: Vec, - pub cert_keys: Vec, - pub client_ca_certs: Option>, -} - -impl CertsAndKeys { - pub fn parse_server_certs_and_keys(&self) -> Result { - // for (server_name_bytes_exp, certs_and_keys) in self.inner.iter() { - let signing_key = self - .cert_keys - .iter() - .find_map(|k| { - if let Ok(sk) = any_supported_type(k) { - Some(sk) - } else { - None - } - }) - .ok_or_else(|| { - io::Error::new( - io::ErrorKind::InvalidInput, - "Unable to find a valid certificate and key", - ) - })?; - Ok(CertifiedKey::new(self.certs.clone(), signing_key)) - } - - pub fn parse_client_ca_certs(&self) -> Result<(Vec, HashSet>), anyhow::Error> { - let certs = self.client_ca_certs.as_ref().ok_or(anyhow::anyhow!("No client cert"))?; - - let owned_trust_anchors: Vec<_> = certs - .iter() - .map(|v| { - // let trust_anchor = tokio_rustls::webpki::TrustAnchor::try_from_cert_der(&v.0).unwrap(); - let trust_anchor = webpki::TrustAnchor::try_from_cert_der(&v.0).unwrap(); - rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( - trust_anchor.subject, - trust_anchor.spki, - trust_anchor.name_constraints, - ) - }) - .collect(); - - // TODO: SKID is not used currently - let subject_key_identifiers: HashSet<_> = certs - .iter() - .filter_map(|v| { - // retrieve ca key id (subject key id) - let cert = parse_x509_certificate(&v.0).unwrap().1; - let subject_key_ids = cert - .iter_extensions() - .filter_map(|ext| match ext.parsed_extension() { - ParsedExtension::SubjectKeyIdentifier(skid) => Some(skid), - _ => None, - }) - .collect::>(); - if !subject_key_ids.is_empty() { - Some(subject_key_ids[0].0.to_owned()) - } else { - None - } - }) - .collect(); - - Ok((owned_trust_anchors, subject_key_identifiers)) - } -} diff --git a/legacy-lib/src/constants.rs b/legacy-lib/src/constants.rs deleted file mode 100644 index ebec1fc0..00000000 --- a/legacy-lib/src/constants.rs +++ /dev/null @@ -1,45 +0,0 @@ -pub const RESPONSE_HEADER_SERVER: &str = "rpxy"; -// pub const LISTEN_ADDRESSES_V4: &[&str] = &["0.0.0.0"]; -// pub const LISTEN_ADDRESSES_V6: &[&str] = &["[::]"]; -pub const TCP_LISTEN_BACKLOG: u32 = 1024; -// pub const HTTP_LISTEN_PORT: u16 = 8080; -// pub const HTTPS_LISTEN_PORT: u16 = 8443; -pub const PROXY_TIMEOUT_SEC: u64 = 60; -pub const UPSTREAM_TIMEOUT_SEC: u64 = 60; -pub const TLS_HANDSHAKE_TIMEOUT_SEC: u64 = 15; // default as with firefox browser -pub const MAX_CLIENTS: usize = 512; -pub const MAX_CONCURRENT_STREAMS: u32 = 64; -pub const CERTS_WATCH_DELAY_SECS: u32 = 60; -pub const LOAD_CERTS_ONLY_WHEN_UPDATED: bool = true; - -// #[cfg(feature = "http3")] -// pub const H3_RESPONSE_BUF_SIZE: usize = 65_536; // 64KB -// #[cfg(feature = "http3")] -// pub const H3_REQUEST_BUF_SIZE: usize = 65_536; // 64KB // handled by quinn - -#[allow(non_snake_case)] -#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] -pub mod H3 { - pub const ALT_SVC_MAX_AGE: u32 = 3600; - pub const REQUEST_MAX_BODY_SIZE: usize = 268_435_456; // 256MB - pub const MAX_CONCURRENT_CONNECTIONS: u32 = 4096; - pub const MAX_CONCURRENT_BIDISTREAM: u32 = 64; - pub const MAX_CONCURRENT_UNISTREAM: u32 = 64; - pub const MAX_IDLE_TIMEOUT: u64 = 10; // secs -} - -#[cfg(feature = "sticky-cookie")] -/// For load-balancing with sticky cookie -pub const STICKY_COOKIE_NAME: &str = "rpxy_srv_id"; - -#[cfg(feature = "cache")] -// # of entries in cache -pub const MAX_CACHE_ENTRY: usize = 1_000; -#[cfg(feature = "cache")] -// max size for each file in bytes -pub const MAX_CACHE_EACH_SIZE: usize = 65_535; -#[cfg(feature = "cache")] -// on memory cache if less than or equel to -pub const MAX_CACHE_EACH_SIZE_ON_MEMORY: usize = 4_096; - -// TODO: max cache size in total diff --git a/legacy-lib/src/error.rs b/legacy-lib/src/error.rs deleted file mode 100644 index c672682d..00000000 --- a/legacy-lib/src/error.rs +++ /dev/null @@ -1,86 +0,0 @@ -pub use anyhow::{anyhow, bail, ensure, Context}; -use std::io; -use thiserror::Error; - -pub type Result = std::result::Result; - -/// Describes things that can go wrong in the Rpxy -#[derive(Debug, Error)] -pub enum RpxyError { - #[error("Proxy build error: {0}")] - ProxyBuild(#[from] crate::proxy::ProxyBuilderError), - - #[error("Backend build error: {0}")] - BackendBuild(#[from] crate::backend::BackendBuilderError), - - #[error("MessageHandler build error: {0}")] - HandlerBuild(#[from] crate::handler::HttpMessageHandlerBuilderError), - - #[error("Config builder error: {0}")] - ConfigBuild(&'static str), - - #[error("Http Message Handler Error: {0}")] - Handler(&'static str), - - #[error("Cache Error: {0}")] - Cache(&'static str), - - #[error("Http Request Message Error: {0}")] - Request(&'static str), - - #[error("TCP/UDP Proxy Layer Error: {0}")] - Proxy(String), - - #[allow(unused)] - #[error("LoadBalance Layer Error: {0}")] - LoadBalance(String), - - #[error("I/O Error: {0}")] - Io(#[from] io::Error), - - // #[error("Toml Deserialization Error")] - // TomlDe(#[from] toml::de::Error), - #[cfg(feature = "http3-quinn")] - #[error("Quic Connection Error [quinn]: {0}")] - QuicConn(#[from] quinn::ConnectionError), - - #[cfg(feature = "http3-s2n")] - #[error("Quic Connection Error [s2n-quic]: {0}")] - QUicConn(#[from] s2n_quic::connection::Error), - - #[cfg(feature = "http3-quinn")] - #[error("H3 Error [quinn]: {0}")] - H3(#[from] h3::Error), - - #[cfg(feature = "http3-s2n")] - #[error("H3 Error [s2n-quic]: {0}")] - H3(#[from] s2n_quic_h3::h3::Error), - - #[error("rustls Connection Error: {0}")] - Rustls(#[from] rustls::Error), - - #[error("Hyper Error: {0}")] - Hyper(#[from] hyper::Error), - - #[error("Hyper Http Error: {0}")] - HyperHttp(#[from] hyper::http::Error), - - #[error("Hyper Http HeaderValue Error: {0}")] - HyperHeaderValue(#[from] hyper::header::InvalidHeaderValue), - - #[error("Hyper Http HeaderName Error: {0}")] - HyperHeaderName(#[from] hyper::header::InvalidHeaderName), - - #[error(transparent)] - Other(#[from] anyhow::Error), -} - -#[allow(dead_code)] -#[derive(Debug, Error, Clone)] -pub enum ClientCertsError { - #[error("TLS Client Certificate is Required for Given SNI: {0}")] - ClientCertRequired(String), - - #[error("Inconsistent TLS Client Certificate for Given SNI: {0}")] - InconsistentClientCert(String), -} diff --git a/legacy-lib/src/globals.rs b/legacy-lib/src/globals.rs deleted file mode 100644 index 02605a60..00000000 --- a/legacy-lib/src/globals.rs +++ /dev/null @@ -1,325 +0,0 @@ -use crate::{ - backend::{ - Backend, BackendBuilder, Backends, ReverseProxy, Upstream, UpstreamGroup, UpstreamGroupBuilder, UpstreamOption, - }, - certs::CryptoSource, - constants::*, - error::RpxyError, - log::*, - utils::{BytesName, PathNameBytesExp}, -}; -use rustc_hash::FxHashMap as HashMap; -use std::net::SocketAddr; -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; -use tokio::time::Duration; - -/// Global object containing proxy configurations and shared object like counters. -/// But note that in Globals, we do not have Mutex and RwLock. It is indeed, the context shared among async tasks. -pub struct Globals -where - T: CryptoSource, -{ - /// Configuration parameters for proxy transport and request handlers - pub proxy_config: ProxyConfig, // TODO: proxy configはarcに包んでこいつだけ使いまわせばいいように変えていく。backendsも? - - /// Backend application objects to which http request handler forward incoming requests - pub backends: Backends, - - /// Shared context - Counter for serving requests - pub request_count: RequestCount, - - /// Shared context - Async task runtime handler - pub runtime_handle: tokio::runtime::Handle, - - /// Shared context - Notify object to stop async tasks - pub term_notify: Option>, -} - -/// Configuration parameters for proxy transport and request handlers -#[derive(PartialEq, Eq, Clone)] -pub struct ProxyConfig { - pub listen_sockets: Vec, // when instantiate server - pub http_port: Option, // when instantiate server - pub https_port: Option, // when instantiate server - pub tcp_listen_backlog: u32, // when instantiate server - - pub proxy_timeout: Duration, // when serving requests at Proxy - pub upstream_timeout: Duration, // when serving requests at Handler - - pub max_clients: usize, // when serving requests - pub max_concurrent_streams: u32, // when instantiate server - pub keepalive: bool, // when instantiate server - - // experimentals - pub sni_consistency: bool, // Handler - - #[cfg(feature = "cache")] - pub cache_enabled: bool, - #[cfg(feature = "cache")] - pub cache_dir: Option, - #[cfg(feature = "cache")] - pub cache_max_entry: usize, - #[cfg(feature = "cache")] - pub cache_max_each_size: usize, - #[cfg(feature = "cache")] - pub cache_max_each_size_on_memory: usize, - - // All need to make packet acceptor - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - pub http3: bool, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - pub h3_alt_svc_max_age: u32, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - pub h3_request_max_body_size: usize, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - pub h3_max_concurrent_bidistream: u32, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - pub h3_max_concurrent_unistream: u32, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - pub h3_max_concurrent_connections: u32, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - pub h3_max_idle_timeout: Option, -} - -impl Default for ProxyConfig { - fn default() -> Self { - Self { - listen_sockets: Vec::new(), - http_port: None, - https_port: None, - tcp_listen_backlog: TCP_LISTEN_BACKLOG, - - // TODO: Reconsider each timeout values - proxy_timeout: Duration::from_secs(PROXY_TIMEOUT_SEC), - upstream_timeout: Duration::from_secs(UPSTREAM_TIMEOUT_SEC), - - max_clients: MAX_CLIENTS, - max_concurrent_streams: MAX_CONCURRENT_STREAMS, - keepalive: true, - - sni_consistency: true, - - #[cfg(feature = "cache")] - cache_enabled: false, - #[cfg(feature = "cache")] - cache_dir: None, - #[cfg(feature = "cache")] - cache_max_entry: MAX_CACHE_ENTRY, - #[cfg(feature = "cache")] - cache_max_each_size: MAX_CACHE_EACH_SIZE, - #[cfg(feature = "cache")] - cache_max_each_size_on_memory: MAX_CACHE_EACH_SIZE_ON_MEMORY, - - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - http3: false, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - h3_alt_svc_max_age: H3::ALT_SVC_MAX_AGE, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - h3_request_max_body_size: H3::REQUEST_MAX_BODY_SIZE, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - h3_max_concurrent_connections: H3::MAX_CONCURRENT_CONNECTIONS, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - h3_max_concurrent_bidistream: H3::MAX_CONCURRENT_BIDISTREAM, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - h3_max_concurrent_unistream: H3::MAX_CONCURRENT_UNISTREAM, - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - h3_max_idle_timeout: Some(Duration::from_secs(H3::MAX_IDLE_TIMEOUT)), - } - } -} - -/// Configuration parameters for backend applications -#[derive(PartialEq, Eq, Clone)] -pub struct AppConfigList -where - T: CryptoSource, -{ - pub inner: Vec>, - pub default_app: Option, -} -impl TryInto> for AppConfigList -where - T: CryptoSource + Clone, -{ - type Error = RpxyError; - - fn try_into(self) -> Result, Self::Error> { - let mut backends = Backends::new(); - for app_config in self.inner.iter() { - let backend = app_config.try_into()?; - backends - .apps - .insert(app_config.server_name.clone().to_server_name_vec(), backend); - info!( - "Registering application {} ({})", - &app_config.server_name, &app_config.app_name - ); - } - - // default backend application for plaintext http requests - if let Some(d) = self.default_app { - let d_sn: Vec<&str> = backends - .apps - .iter() - .filter(|(_k, v)| v.app_name == d) - .map(|(_, v)| v.server_name.as_ref()) - .collect(); - if !d_sn.is_empty() { - info!( - "Serving plaintext http for requests to unconfigured server_name by app {} (server_name: {}).", - d, d_sn[0] - ); - backends.default_server_name_bytes = Some(d_sn[0].to_server_name_vec()); - } - } - Ok(backends) - } -} - -/// Configuration parameters for single backend application -#[derive(PartialEq, Eq, Clone)] -pub struct AppConfig -where - T: CryptoSource, -{ - pub app_name: String, - pub server_name: String, - pub reverse_proxy: Vec, - pub tls: Option>, -} -impl TryInto> for &AppConfig -where - T: CryptoSource + Clone, -{ - type Error = RpxyError; - - fn try_into(self) -> Result, Self::Error> { - // backend builder - let mut backend_builder = BackendBuilder::default(); - // reverse proxy settings - let reverse_proxy = self.try_into()?; - - backend_builder - .app_name(self.app_name.clone()) - .server_name(self.server_name.clone()) - .reverse_proxy(reverse_proxy); - - // TLS settings and build backend instance - let backend = if self.tls.is_none() { - backend_builder.build().map_err(RpxyError::BackendBuild)? - } else { - let tls = self.tls.as_ref().unwrap(); - - backend_builder - .https_redirection(Some(tls.https_redirection)) - .crypto_source(Some(tls.inner.clone())) - .build()? - }; - Ok(backend) - } -} -impl TryInto for &AppConfig -where - T: CryptoSource + Clone, -{ - type Error = RpxyError; - - fn try_into(self) -> Result { - let mut upstream: HashMap = HashMap::default(); - - self.reverse_proxy.iter().for_each(|rpo| { - let upstream_vec: Vec = rpo.upstream.iter().map(|x| x.try_into().unwrap()).collect(); - // let upstream_iter = rpo.upstream.iter().map(|x| x.to_upstream().unwrap()); - // let lb_upstream_num = vec_upstream.len(); - let elem = UpstreamGroupBuilder::default() - .upstream(&upstream_vec) - .path(&rpo.path) - .replace_path(&rpo.replace_path) - .lb(&rpo.load_balance, &upstream_vec, &self.server_name, &rpo.path) - .opts(&rpo.upstream_options) - .build() - .unwrap(); - - upstream.insert(elem.path.clone(), elem); - }); - if self.reverse_proxy.iter().filter(|rpo| rpo.path.is_none()).count() >= 2 { - error!("Multiple default reverse proxy setting"); - return Err(RpxyError::ConfigBuild("Invalid reverse proxy setting")); - } - - if !(upstream.iter().all(|(_, elem)| { - !(elem.opts.contains(&UpstreamOption::ForceHttp11Upstream) - && elem.opts.contains(&UpstreamOption::ForceHttp2Upstream)) - })) { - error!("Either one of force_http11 or force_http2 can be enabled"); - return Err(RpxyError::ConfigBuild("Invalid upstream option setting")); - } - - Ok(ReverseProxy { upstream }) - } -} - -/// Configuration parameters for single reverse proxy corresponding to the path -#[derive(PartialEq, Eq, Clone)] -pub struct ReverseProxyConfig { - pub path: Option, - pub replace_path: Option, - pub upstream: Vec, - pub upstream_options: Option>, - pub load_balance: Option, -} - -/// Configuration parameters for single upstream destination from a reverse proxy -#[derive(PartialEq, Eq, Clone)] -pub struct UpstreamUri { - pub inner: hyper::Uri, -} -impl TryInto for &UpstreamUri { - type Error = anyhow::Error; - - fn try_into(self) -> std::result::Result { - Ok(Upstream { - uri: self.inner.clone(), - }) - } -} - -/// Configuration parameters on TLS for a single backend application -#[derive(PartialEq, Eq, Clone)] -pub struct TlsConfig -where - T: CryptoSource, -{ - pub inner: T, - pub https_redirection: bool, -} - -#[derive(Debug, Clone, Default)] -/// Counter for serving requests -pub struct RequestCount(Arc); - -impl RequestCount { - pub fn current(&self) -> usize { - self.0.load(Ordering::Relaxed) - } - - pub fn increment(&self) -> usize { - self.0.fetch_add(1, Ordering::Relaxed) - } - - pub fn decrement(&self) -> usize { - let mut count; - while { - count = self.0.load(Ordering::Relaxed); - count > 0 - && self - .0 - .compare_exchange(count, count - 1, Ordering::Relaxed, Ordering::Relaxed) - != Ok(count) - } {} - count - } -} diff --git a/legacy-lib/src/handler/cache.rs b/legacy-lib/src/handler/cache.rs deleted file mode 100644 index 44cdc113..00000000 --- a/legacy-lib/src/handler/cache.rs +++ /dev/null @@ -1,393 +0,0 @@ -use crate::{error::*, globals::Globals, log::*, CryptoSource}; -use base64::{engine::general_purpose, Engine as _}; -use bytes::{Buf, Bytes, BytesMut}; -use http_cache_semantics::CachePolicy; -use hyper::{ - http::{Request, Response}, - Body, -}; -use lru::LruCache; -use sha2::{Digest, Sha256}; -use std::{ - fmt::Debug, - path::{Path, PathBuf}, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, Mutex, - }, - time::SystemTime, -}; -use tokio::{ - fs::{self, File}, - io::{AsyncReadExt, AsyncWriteExt}, - sync::RwLock, -}; - -#[derive(Clone, Debug)] -/// Cache target in hybrid manner of on-memory and file system -pub enum CacheFileOrOnMemory { - /// Pointer to the temporary cache file - File(PathBuf), - /// Cached body itself - OnMemory(Vec), -} - -#[derive(Clone, Debug)] -/// Cache object definition -struct CacheObject { - /// Cache policy to determine if the stored cache can be used as a response to a new incoming request - pub policy: CachePolicy, - /// Cache target: on-memory object or temporary file - pub target: CacheFileOrOnMemory, -} - -#[derive(Debug)] -/// Manager inner for cache on file system -struct CacheFileManagerInner { - /// Directory of temporary files - cache_dir: PathBuf, - /// Counter of current cached files - cnt: usize, - /// Async runtime - runtime_handle: tokio::runtime::Handle, -} - -impl CacheFileManagerInner { - /// Build new cache file manager. - /// This first creates cache file dir if not exists, and cleans up the file inside the directory. - /// TODO: Persistent cache is really difficult. `sqlite` or something like that is needed. - async fn new(path: impl AsRef, runtime_handle: &tokio::runtime::Handle) -> Self { - let path_buf = path.as_ref().to_path_buf(); - if let Err(e) = fs::remove_dir_all(path).await { - warn!("Failed to clean up the cache dir: {e}"); - }; - fs::create_dir_all(&path_buf).await.unwrap(); - Self { - cache_dir: path_buf.clone(), - cnt: 0, - runtime_handle: runtime_handle.clone(), - } - } - - /// Create a new temporary file cache - async fn create(&mut self, cache_filename: &str, body_bytes: &Bytes) -> Result { - let cache_filepath = self.cache_dir.join(cache_filename); - let Ok(mut file) = File::create(&cache_filepath).await else { - return Err(RpxyError::Cache("Failed to create file")); - }; - let mut bytes_clone = body_bytes.clone(); - while bytes_clone.has_remaining() { - if let Err(e) = file.write_buf(&mut bytes_clone).await { - error!("Failed to write file cache: {e}"); - return Err(RpxyError::Cache("Failed to write file cache: {e}")); - }; - } - self.cnt += 1; - Ok(CacheFileOrOnMemory::File(cache_filepath)) - } - - /// Retrieve a stored temporary file cache - async fn read(&self, path: impl AsRef) -> Result { - let Ok(mut file) = File::open(&path).await else { - warn!("Cache file object cannot be opened"); - return Err(RpxyError::Cache("Cache file object cannot be opened")); - }; - let (body_sender, res_body) = Body::channel(); - self.runtime_handle.spawn(async move { - let mut sender = body_sender; - let mut buf = BytesMut::new(); - loop { - match file.read_buf(&mut buf).await { - Ok(0) => break, - Ok(_) => sender.send_data(buf.copy_to_bytes(buf.remaining())).await?, - Err(_) => break, - }; - } - Ok(()) as Result<()> - }); - - Ok(res_body) - } - - /// Remove file - async fn remove(&mut self, path: impl AsRef) -> Result<()> { - fs::remove_file(path.as_ref()).await?; - self.cnt -= 1; - debug!("Removed a cache file at {:?} (file count: {})", path.as_ref(), self.cnt); - - Ok(()) - } -} - -#[derive(Debug, Clone)] -/// Cache file manager outer that is responsible to handle `RwLock` -struct CacheFileManager { - inner: Arc>, -} - -impl CacheFileManager { - /// Build manager - async fn new(path: impl AsRef, runtime_handle: &tokio::runtime::Handle) -> Self { - Self { - inner: Arc::new(RwLock::new(CacheFileManagerInner::new(path, runtime_handle).await)), - } - } - /// Evict a temporary file cache - async fn evict(&self, path: impl AsRef) { - // Acquire the write lock - let mut inner = self.inner.write().await; - if let Err(e) = inner.remove(path).await { - warn!("Eviction failed during file object removal: {:?}", e); - }; - } - /// Read a temporary file cache - async fn read(&self, path: impl AsRef) -> Result { - let mgr = self.inner.read().await; - mgr.read(&path).await - } - /// Create a temporary file cache - async fn create(&mut self, cache_filename: &str, body_bytes: &Bytes) -> Result { - let mut mgr = self.inner.write().await; - mgr.create(cache_filename, body_bytes).await - } - async fn count(&self) -> usize { - let mgr = self.inner.read().await; - mgr.cnt - } -} - -#[derive(Debug, Clone)] -/// Lru cache manager that is responsible to handle `Mutex` as an outer of `LruCache` -struct LruCacheManager { - inner: Arc>>, // TODO: keyはstring urlでいいのか疑問。全requestに対してcheckすることになりそう - cnt: Arc, -} - -impl LruCacheManager { - /// Build LruCache - fn new(cache_max_entry: usize) -> Self { - Self { - inner: Arc::new(Mutex::new(LruCache::new( - std::num::NonZeroUsize::new(cache_max_entry).unwrap(), - ))), - cnt: Arc::new(AtomicUsize::default()), - } - } - /// Count entries - fn count(&self) -> usize { - self.cnt.load(Ordering::Relaxed) - } - /// Evict an entry - fn evict(&self, cache_key: &str) -> Option<(String, CacheObject)> { - let Ok(mut lock) = self.inner.lock() else { - error!("Mutex can't be locked to evict a cache entry"); - return None; - }; - let res = lock.pop_entry(cache_key); - self.cnt.store(lock.len(), Ordering::Relaxed); - res - } - /// Get an entry - fn get(&self, cache_key: &str) -> Result> { - let Ok(mut lock) = self.inner.lock() else { - error!("Mutex can't be locked for checking cache entry"); - return Err(RpxyError::Cache("Mutex can't be locked for checking cache entry")); - }; - let Some(cached_object) = lock.get(cache_key) else { - return Ok(None); - }; - Ok(Some(cached_object.clone())) - } - /// Push an entry - fn push(&self, cache_key: &str, cache_object: CacheObject) -> Result> { - let Ok(mut lock) = self.inner.lock() else { - error!("Failed to acquire mutex lock for writing cache entry"); - return Err(RpxyError::Cache("Failed to acquire mutex lock for writing cache entry")); - }; - let res = Ok(lock.push(cache_key.to_string(), cache_object)); - self.cnt.store(lock.len(), Ordering::Relaxed); - res - } -} - -#[derive(Clone, Debug)] -pub struct RpxyCache { - /// Managing cache file objects through RwLock's lock mechanism for file lock - cache_file_manager: CacheFileManager, - /// Lru cache storing http message caching policy - inner: LruCacheManager, - /// Async runtime - runtime_handle: tokio::runtime::Handle, - /// Maximum size of each cache file object - max_each_size: usize, - /// Maximum size of cache object on memory - max_each_size_on_memory: usize, -} - -impl RpxyCache { - /// Generate cache storage - pub async fn new(globals: &Globals) -> Option { - if !globals.proxy_config.cache_enabled { - return None; - } - - let path = globals.proxy_config.cache_dir.as_ref().unwrap(); - let cache_file_manager = CacheFileManager::new(path, &globals.runtime_handle).await; - let inner = LruCacheManager::new(globals.proxy_config.cache_max_entry); - - let max_each_size = globals.proxy_config.cache_max_each_size; - let mut max_each_size_on_memory = globals.proxy_config.cache_max_each_size_on_memory; - if max_each_size < max_each_size_on_memory { - warn!( - "Maximum size of on memory cache per entry must be smaller than or equal to the maximum of each file cache" - ); - max_each_size_on_memory = max_each_size; - } - - Some(Self { - cache_file_manager, - inner, - runtime_handle: globals.runtime_handle.clone(), - max_each_size, - max_each_size_on_memory, - }) - } - - /// Count cache entries - pub async fn count(&self) -> (usize, usize, usize) { - let total = self.inner.count(); - let file = self.cache_file_manager.count().await; - let on_memory = total - file; - (total, on_memory, file) - } - - /// Get cached response - pub async fn get(&self, req: &Request) -> Option> { - debug!( - "Current cache status: (total, on-memory, file) = {:?}", - self.count().await - ); - let cache_key = req.uri().to_string(); - - // First check cache chance - let Ok(Some(cached_object)) = self.inner.get(&cache_key) else { - return None; - }; - - // Secondly check the cache freshness as an HTTP message - let now = SystemTime::now(); - let http_cache_semantics::BeforeRequest::Fresh(res_parts) = cached_object.policy.before_request(req, now) else { - // Evict stale cache entry. - // This might be okay to keep as is since it would be updated later. - // However, there is no guarantee that newly got objects will be still cacheable. - // So, we have to evict stale cache entries and cache file objects if found. - debug!("Stale cache entry: {cache_key}"); - let _evicted_entry = self.inner.evict(&cache_key); - // For cache file - if let CacheFileOrOnMemory::File(path) = &cached_object.target { - self.cache_file_manager.evict(&path).await; - } - return None; - }; - - // Finally retrieve the file/on-memory object - match cached_object.target { - CacheFileOrOnMemory::File(path) => { - let res_body = match self.cache_file_manager.read(&path).await { - Ok(res_body) => res_body, - Err(e) => { - warn!("Failed to read from file cache: {e}"); - let _evicted_entry = self.inner.evict(&cache_key); - self.cache_file_manager.evict(&path).await; - return None; - } - }; - - debug!("Cache hit from file: {cache_key}"); - Some(Response::from_parts(res_parts, res_body)) - } - CacheFileOrOnMemory::OnMemory(object) => { - debug!("Cache hit from on memory: {cache_key}"); - Some(Response::from_parts(res_parts, Body::from(object))) - } - } - } - - /// Put response into the cache - pub async fn put(&self, uri: &hyper::Uri, body_bytes: &Bytes, policy: &CachePolicy) -> Result<()> { - let my_cache = self.inner.clone(); - let mut mgr = self.cache_file_manager.clone(); - let uri = uri.clone(); - let bytes_clone = body_bytes.clone(); - let policy_clone = policy.clone(); - let max_each_size = self.max_each_size; - let max_each_size_on_memory = self.max_each_size_on_memory; - - self.runtime_handle.spawn(async move { - if bytes_clone.len() > max_each_size { - warn!("Too large to cache"); - return Err(RpxyError::Cache("Too large to cache")); - } - let cache_key = derive_cache_key_from_uri(&uri); - - debug!("Object of size {:?} bytes to be cached", bytes_clone.len()); - - let cache_object = if bytes_clone.len() > max_each_size_on_memory { - let cache_filename = derive_filename_from_uri(&uri); - let target = mgr.create(&cache_filename, &bytes_clone).await?; - debug!("Cached a new cache file: {} - {}", cache_key, cache_filename); - CacheObject { - policy: policy_clone, - target, - } - } else { - debug!("Cached a new object on memory: {}", cache_key); - CacheObject { - policy: policy_clone, - target: CacheFileOrOnMemory::OnMemory(bytes_clone.to_vec()), - } - }; - - if let Some((k, v)) = my_cache.push(&cache_key, cache_object)? { - if k != cache_key { - info!("Over the cache capacity. Evict least recent used entry"); - if let CacheFileOrOnMemory::File(path) = v.target { - mgr.evict(&path).await; - } - } - } - Ok(()) - }); - - Ok(()) - } -} - -fn derive_filename_from_uri(uri: &hyper::Uri) -> String { - let mut hasher = Sha256::new(); - hasher.update(uri.to_string()); - let digest = hasher.finalize(); - general_purpose::URL_SAFE_NO_PAD.encode(digest) -} - -fn derive_cache_key_from_uri(uri: &hyper::Uri) -> String { - uri.to_string() -} - -pub fn get_policy_if_cacheable(req: Option<&Request>, res: Option<&Response>) -> Result> -where - R: Debug, -{ - // deduce cache policy from req and res - let (Some(req), Some(res)) = (req, res) else { - return Err(RpxyError::Cache("Invalid null request and/or response")); - }; - - let new_policy = CachePolicy::new(req, res); - if new_policy.is_storable() { - // debug!("Response is cacheable: {:?}\n{:?}", req, res.headers()); - Ok(Some(new_policy)) - } else { - Ok(None) - } -} diff --git a/legacy-lib/src/handler/error.rs b/legacy-lib/src/handler/error.rs deleted file mode 100644 index 8fb9d79d..00000000 --- a/legacy-lib/src/handler/error.rs +++ /dev/null @@ -1,16 +0,0 @@ -use http::StatusCode; -use thiserror::Error; - -pub type HttpResult = std::result::Result; - -/// Describes things that can go wrong in the handler -#[derive(Debug, Error)] -pub enum HttpError {} - -impl From for StatusCode { - fn from(e: HttpError) -> StatusCode { - match e { - _ => StatusCode::INTERNAL_SERVER_ERROR, - } - } -} diff --git a/legacy-lib/src/handler/forwarder.rs b/legacy-lib/src/handler/forwarder.rs deleted file mode 100644 index 4764d369..00000000 --- a/legacy-lib/src/handler/forwarder.rs +++ /dev/null @@ -1,147 +0,0 @@ -#[cfg(feature = "cache")] -use super::cache::{get_policy_if_cacheable, RpxyCache}; -use crate::{error::RpxyError, globals::Globals, log::*, CryptoSource}; -use async_trait::async_trait; -#[cfg(feature = "cache")] -use bytes::Buf; -use hyper::{ - body::{Body, HttpBody}, - client::{connect::Connect, HttpConnector}, - http::Version, - Client, Request, Response, -}; -use hyper_rustls::HttpsConnector; - -#[cfg(feature = "cache")] -/// Build synthetic request to cache -fn build_synth_req_for_cache(req: &Request) -> Request<()> { - let mut builder = Request::builder() - .method(req.method()) - .uri(req.uri()) - .version(req.version()); - // TODO: omit extensions. is this approach correct? - for (header_key, header_value) in req.headers() { - builder = builder.header(header_key, header_value); - } - builder.body(()).unwrap() -} - -#[async_trait] -/// Definition of the forwarder that simply forward requests from downstream client to upstream app servers. -pub trait ForwardRequest { - type Error; - async fn request(&self, req: Request) -> Result, Self::Error>; -} - -/// Forwarder struct responsible to cache handling -pub struct Forwarder -where - C: Connect + Clone + Sync + Send + 'static, -{ - #[cfg(feature = "cache")] - cache: Option, - inner: Client, - inner_h2: Client, // `h2c` or http/2-only client is defined separately -} - -#[async_trait] -impl ForwardRequest for Forwarder -where - B: HttpBody + Send + Sync + 'static, - B::Data: Send, - B::Error: Into>, - C: Connect + Clone + Sync + Send + 'static, -{ - type Error = RpxyError; - - #[cfg(feature = "cache")] - async fn request(&self, req: Request) -> Result, Self::Error> { - let mut synth_req = None; - if self.cache.is_some() { - if let Some(cached_response) = self.cache.as_ref().unwrap().get(&req).await { - // if found, return it as response. - info!("Cache hit - Return from cache"); - return Ok(cached_response); - }; - - // Synthetic request copy used just for caching (cannot clone request object...) - synth_req = Some(build_synth_req_for_cache(&req)); - } - - // TODO: This 'match' condition is always evaluated at every 'request' invocation. So, it is inefficient. - // Needs to be reconsidered. Currently, this is a kind of work around. - // This possibly relates to https://github.com/hyperium/hyper/issues/2417. - let res = match req.version() { - Version::HTTP_2 => self.inner_h2.request(req).await.map_err(RpxyError::Hyper), // handles `h2c` requests - _ => self.inner.request(req).await.map_err(RpxyError::Hyper), - }; - - if self.cache.is_none() { - return res; - } - - // check cacheability and store it if cacheable - let Ok(Some(cache_policy)) = get_policy_if_cacheable(synth_req.as_ref(), res.as_ref().ok()) else { - return res; - }; - let (parts, body) = res.unwrap().into_parts(); - let Ok(mut bytes) = hyper::body::aggregate(body).await else { - return Err(RpxyError::Cache("Failed to write byte buffer")); - }; - let aggregated = bytes.copy_to_bytes(bytes.remaining()); - - if let Err(cache_err) = self - .cache - .as_ref() - .unwrap() - .put(synth_req.unwrap().uri(), &aggregated, &cache_policy) - .await - { - error!("{:?}", cache_err); - }; - - // res - Ok(Response::from_parts(parts, Body::from(aggregated))) - } - - #[cfg(not(feature = "cache"))] - async fn request(&self, req: Request) -> Result, Self::Error> { - match req.version() { - Version::HTTP_2 => self.inner_h2.request(req).await.map_err(RpxyError::Hyper), // handles `h2c` requests - _ => self.inner.request(req).await.map_err(RpxyError::Hyper), - } - } -} - -impl Forwarder, Body> { - /// Build forwarder - pub async fn new(_globals: &std::sync::Arc>) -> Self { - #[cfg(feature = "native-roots")] - let builder = hyper_rustls::HttpsConnectorBuilder::new().with_native_roots(); - #[cfg(feature = "native-roots")] - let builder_h2 = hyper_rustls::HttpsConnectorBuilder::new().with_native_roots(); - #[cfg(feature = "native-roots")] - info!("Native cert store is used for the connection to backend applications"); - - #[cfg(not(feature = "native-roots"))] - let builder = hyper_rustls::HttpsConnectorBuilder::new().with_webpki_roots(); - #[cfg(not(feature = "native-roots"))] - let builder_h2 = hyper_rustls::HttpsConnectorBuilder::new().with_webpki_roots(); - #[cfg(not(feature = "native-roots"))] - info!("Mozilla WebPKI root certs is used for the connection to backend applications"); - - let connector = builder.https_or_http().enable_http1().enable_http2().build(); - let connector_h2 = builder_h2.https_or_http().enable_http2().build(); - - let inner = Client::builder().build::<_, Body>(connector); - let inner_h2 = Client::builder().http2_only(true).build::<_, Body>(connector_h2); - - #[cfg(feature = "cache")] - { - let cache = RpxyCache::new(_globals).await; - Self { inner, inner_h2, cache } - } - #[cfg(not(feature = "cache"))] - Self { inner, inner_h2 } - } -} diff --git a/legacy-lib/src/handler/handler_main.rs b/legacy-lib/src/handler/handler_main.rs deleted file mode 100644 index 2720c2fe..00000000 --- a/legacy-lib/src/handler/handler_main.rs +++ /dev/null @@ -1,384 +0,0 @@ -// Highly motivated by https://github.com/felipenoris/hyper-reverse-proxy -use super::{ - error::*, - // forwarder::{ForwardRequest, Forwarder}, - utils_headers::*, - utils_request::*, - // utils_synth_response::*, - HandlerContext, -}; -use crate::{ - backend::{Backend, UpstreamGroup}, - certs::CryptoSource, - constants::RESPONSE_HEADER_SERVER, - error::*, - globals::Globals, - log::*, - utils::ServerNameBytesExp, -}; -use derive_builder::Builder; -use http::{ - header::{self, HeaderValue}, - uri::Scheme, - Request, Response, StatusCode, Uri, Version, -}; -use hyper::body::Incoming; -use hyper_util::client::legacy::connect::Connect; -use std::{net::SocketAddr, sync::Arc}; -use tokio::{io::copy_bidirectional, time::timeout}; - -#[derive(Clone, Builder)] -/// HTTP message handler for requests from clients and responses from backend applications, -/// responsible to manipulate and forward messages to upstream backends and downstream clients. -// pub struct HttpMessageHandler -pub struct HttpMessageHandler -where - // T: Connect + Clone + Sync + Send + 'static, - U: CryptoSource + Clone, -{ - // forwarder: Arc>, - globals: Arc>, -} - -impl HttpMessageHandler -where - // T: Connect + Clone + Sync + Send + 'static, - U: CryptoSource + Clone, -{ - // /// Return with an arbitrary status code of error and log message - // fn return_with_error_log(&self, status_code: StatusCode, log_data: &mut MessageLog) -> Result> { - // log_data.status_code(&status_code).output(); - // http_error(status_code) - // } - - /// Handle incoming request message from a client - pub async fn handle_request( - &self, - mut req: Request, - client_addr: SocketAddr, // アクセス制御用 - listen_addr: SocketAddr, - tls_enabled: bool, - tls_server_name: Option, - ) -> Result>> { - //////// - let mut log_data = MessageLog::from(&req); - log_data.client_addr(&client_addr); - ////// - - // // Here we start to handle with server_name - // let server_name = if let Ok(v) = req.parse_host() { - // ServerNameBytesExp::from(v) - // } else { - // return self.return_with_error_log(StatusCode::BAD_REQUEST, &mut log_data); - // }; - // // check consistency of between TLS SNI and HOST/Request URI Line. - // #[allow(clippy::collapsible_if)] - // if tls_enabled && self.globals.proxy_config.sni_consistency { - // if server_name != tls_server_name.unwrap_or_default() { - // return self.return_with_error_log(StatusCode::MISDIRECTED_REQUEST, &mut log_data); - // } - // } - // // Find backend application for given server_name, and drop if incoming request is invalid as request. - // let backend = match self.globals.backends.apps.get(&server_name) { - // Some(be) => be, - // None => { - // let Some(default_server_name) = &self.globals.backends.default_server_name_bytes else { - // return self.return_with_error_log(StatusCode::SERVICE_UNAVAILABLE, &mut log_data); - // }; - // debug!("Serving by default app"); - // self.globals.backends.apps.get(default_server_name).unwrap() - // } - // }; - - // // Redirect to https if !tls_enabled and redirect_to_https is true - // if !tls_enabled && backend.https_redirection.unwrap_or(false) { - // debug!("Redirect to secure connection: {}", &backend.server_name); - // log_data.status_code(&StatusCode::PERMANENT_REDIRECT).output(); - // return secure_redirection(&backend.server_name, self.globals.proxy_config.https_port, &req); - // } - - // // Find reverse proxy for given path and choose one of upstream host - // // Longest prefix match - // let path = req.uri().path(); - // let Some(upstream_group) = backend.reverse_proxy.get(path) else { - // return self.return_with_error_log(StatusCode::NOT_FOUND, &mut log_data); - // }; - - // // Upgrade in request header - // let upgrade_in_request = extract_upgrade(req.headers()); - // let request_upgraded = req.extensions_mut().remove::(); - - // // Build request from destination information - // let _context = match self.generate_request_forwarded( - // &client_addr, - // &listen_addr, - // &mut req, - // &upgrade_in_request, - // upstream_group, - // tls_enabled, - // ) { - // Err(e) => { - // error!("Failed to generate destination uri for reverse proxy: {}", e); - // return self.return_with_error_log(StatusCode::SERVICE_UNAVAILABLE, &mut log_data); - // } - // Ok(v) => v, - // }; - // debug!("Request to be forwarded: {:?}", req); - // log_data.xff(&req.headers().get("x-forwarded-for")); - // log_data.upstream(req.uri()); - // ////// - - // // Forward request to a chosen backend - // let mut res_backend = { - // let Ok(result) = timeout(self.globals.proxy_config.upstream_timeout, self.forwarder.request(req)).await else { - // return self.return_with_error_log(StatusCode::GATEWAY_TIMEOUT, &mut log_data); - // }; - // match result { - // Ok(res) => res, - // Err(e) => { - // error!("Failed to get response from backend: {}", e); - // return self.return_with_error_log(StatusCode::SERVICE_UNAVAILABLE, &mut log_data); - // } - // } - // }; - - // // Process reverse proxy context generated during the forwarding request generation. - // #[cfg(feature = "sticky-cookie")] - // if let Some(context_from_lb) = _context.context_lb { - // let res_headers = res_backend.headers_mut(); - // if let Err(e) = set_sticky_cookie_lb_context(res_headers, &context_from_lb) { - // error!("Failed to append context to the response given from backend: {}", e); - // return self.return_with_error_log(StatusCode::BAD_GATEWAY, &mut log_data); - // } - // } - - // if res_backend.status() != StatusCode::SWITCHING_PROTOCOLS { - // // Generate response to client - // if self.generate_response_forwarded(&mut res_backend, backend).is_err() { - // return self.return_with_error_log(StatusCode::INTERNAL_SERVER_ERROR, &mut log_data); - // } - // log_data.status_code(&res_backend.status()).output(); - // return Ok(res_backend); - // } - - // // Handle StatusCode::SWITCHING_PROTOCOLS in response - // let upgrade_in_response = extract_upgrade(res_backend.headers()); - // let should_upgrade = if let (Some(u_req), Some(u_res)) = (upgrade_in_request.as_ref(), upgrade_in_response.as_ref()) - // { - // u_req.to_ascii_lowercase() == u_res.to_ascii_lowercase() - // } else { - // false - // }; - // if !should_upgrade { - // error!( - // "Backend tried to switch to protocol {:?} when {:?} was requested", - // upgrade_in_response, upgrade_in_request - // ); - // return self.return_with_error_log(StatusCode::INTERNAL_SERVER_ERROR, &mut log_data); - // } - // let Some(request_upgraded) = request_upgraded else { - // error!("Request does not have an upgrade extension"); - // return self.return_with_error_log(StatusCode::BAD_REQUEST, &mut log_data); - // }; - // let Some(onupgrade) = res_backend.extensions_mut().remove::() else { - // error!("Response does not have an upgrade extension"); - // return self.return_with_error_log(StatusCode::INTERNAL_SERVER_ERROR, &mut log_data); - // }; - - // self.globals.runtime_handle.spawn(async move { - // let mut response_upgraded = onupgrade.await.map_err(|e| { - // error!("Failed to upgrade response: {}", e); - // RpxyError::Hyper(e) - // })?; - // let mut request_upgraded = request_upgraded.await.map_err(|e| { - // error!("Failed to upgrade request: {}", e); - // RpxyError::Hyper(e) - // })?; - // copy_bidirectional(&mut response_upgraded, &mut request_upgraded) - // .await - // .map_err(|e| { - // error!("Coping between upgraded connections failed: {}", e); - // RpxyError::Io(e) - // })?; - // Ok(()) as Result<()> - // }); - // log_data.status_code(&res_backend.status()).output(); - // Ok(res_backend) - todo!() - } - - //////////////////////////////////////////////////// - // Functions to generate messages - //////////////////////////////////////////////////// - - // /// Manipulate a response message sent from a backend application to forward downstream to a client. - // fn generate_response_forwarded(&self, response: &mut Response, chosen_backend: &Backend) -> Result<()> - // where - // B: core::fmt::Debug, - // { - // let headers = response.headers_mut(); - // remove_connection_header(headers); - // remove_hop_header(headers); - // add_header_entry_overwrite_if_exist(headers, "server", RESPONSE_HEADER_SERVER)?; - - // #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - // { - // // Manipulate ALT_SVC allowing h3 in response message only when mutual TLS is not enabled - // // TODO: This is a workaround for avoiding a client authentication in HTTP/3 - // if self.globals.proxy_config.http3 - // && chosen_backend - // .crypto_source - // .as_ref() - // .is_some_and(|v| !v.is_mutual_tls()) - // { - // if let Some(port) = self.globals.proxy_config.https_port { - // add_header_entry_overwrite_if_exist( - // headers, - // header::ALT_SVC.as_str(), - // format!( - // "h3=\":{}\"; ma={}, h3-29=\":{}\"; ma={}", - // port, self.globals.proxy_config.h3_alt_svc_max_age, port, self.globals.proxy_config.h3_alt_svc_max_age - // ), - // )?; - // } - // } else { - // // remove alt-svc to disallow requests via http3 - // headers.remove(header::ALT_SVC.as_str()); - // } - // } - // #[cfg(not(any(feature = "http3-quinn", feature = "http3-s2n")))] - // { - // if let Some(port) = self.globals.proxy_config.https_port { - // headers.remove(header::ALT_SVC.as_str()); - // } - // } - - // Ok(()) - // } - - // #[allow(clippy::too_many_arguments)] - // /// Manipulate a request message sent from a client to forward upstream to a backend application - // fn generate_request_forwarded( - // &self, - // client_addr: &SocketAddr, - // listen_addr: &SocketAddr, - // req: &mut Request, - // upgrade: &Option, - // upstream_group: &UpstreamGroup, - // tls_enabled: bool, - // ) -> Result { - // debug!("Generate request to be forwarded"); - - // // Add te: trailer if contained in original request - // let contains_te_trailers = { - // if let Some(te) = req.headers().get(header::TE) { - // te.as_bytes() - // .split(|v| v == &b',' || v == &b' ') - // .any(|x| x == "trailers".as_bytes()) - // } else { - // false - // } - // }; - - // let uri = req.uri().to_string(); - // let headers = req.headers_mut(); - // // delete headers specified in header.connection - // remove_connection_header(headers); - // // delete hop headers including header.connection - // remove_hop_header(headers); - // // X-Forwarded-For - // add_forwarding_header(headers, client_addr, listen_addr, tls_enabled, &uri)?; - - // // Add te: trailer if te_trailer - // if contains_te_trailers { - // headers.insert(header::TE, HeaderValue::from_bytes("trailers".as_bytes()).unwrap()); - // } - - // // add "host" header of original server_name if not exist (default) - // if req.headers().get(header::HOST).is_none() { - // let org_host = req.uri().host().ok_or_else(|| anyhow!("Invalid request"))?.to_owned(); - // req - // .headers_mut() - // .insert(header::HOST, HeaderValue::from_str(&org_host)?); - // }; - - // ///////////////////////////////////////////// - // // Fix unique upstream destination since there could be multiple ones. - // #[cfg(feature = "sticky-cookie")] - // let (upstream_chosen_opt, context_from_lb) = { - // let context_to_lb = if let crate::backend::LoadBalance::StickyRoundRobin(lb) = &upstream_group.lb { - // takeout_sticky_cookie_lb_context(req.headers_mut(), &lb.sticky_config.name)? - // } else { - // None - // }; - // upstream_group.get(&context_to_lb) - // }; - // #[cfg(not(feature = "sticky-cookie"))] - // let (upstream_chosen_opt, _) = upstream_group.get(&None); - - // let upstream_chosen = upstream_chosen_opt.ok_or_else(|| anyhow!("Failed to get upstream"))?; - // let context = HandlerContext { - // #[cfg(feature = "sticky-cookie")] - // context_lb: context_from_lb, - // #[cfg(not(feature = "sticky-cookie"))] - // context_lb: None, - // }; - // ///////////////////////////////////////////// - - // // apply upstream-specific headers given in upstream_option - // let headers = req.headers_mut(); - // apply_upstream_options_to_header(headers, client_addr, upstream_group, &upstream_chosen.uri)?; - - // // update uri in request - // if !(upstream_chosen.uri.authority().is_some() && upstream_chosen.uri.scheme().is_some()) { - // return Err(RpxyError::Handler("Upstream uri `scheme` and `authority` is broken")); - // }; - // let new_uri = Uri::builder() - // .scheme(upstream_chosen.uri.scheme().unwrap().as_str()) - // .authority(upstream_chosen.uri.authority().unwrap().as_str()); - // let org_pq = match req.uri().path_and_query() { - // Some(pq) => pq.to_string(), - // None => "/".to_string(), - // } - // .into_bytes(); - - // // replace some parts of path if opt_replace_path is enabled for chosen upstream - // let new_pq = match &upstream_group.replace_path { - // Some(new_path) => { - // let matched_path: &[u8] = upstream_group.path.as_ref(); - // if matched_path.is_empty() || org_pq.len() < matched_path.len() { - // return Err(RpxyError::Handler("Upstream uri `path and query` is broken")); - // }; - // let mut new_pq = Vec::::with_capacity(org_pq.len() - matched_path.len() + new_path.len()); - // new_pq.extend_from_slice(new_path.as_ref()); - // new_pq.extend_from_slice(&org_pq[matched_path.len()..]); - // new_pq - // } - // None => org_pq, - // }; - // *req.uri_mut() = new_uri.path_and_query(new_pq).build()?; - - // // upgrade - // if let Some(v) = upgrade { - // req.headers_mut().insert(header::UPGRADE, v.parse()?); - // req - // .headers_mut() - // .insert(header::CONNECTION, HeaderValue::from_str("upgrade")?); - // } - - // // If not specified (force_httpXX_upstream) and https, version is preserved except for http/3 - // if upstream_chosen.uri.scheme() == Some(&Scheme::HTTP) { - // // Change version to http/1.1 when destination scheme is http - // debug!("Change version to http/1.1 when destination scheme is http unless upstream option enabled."); - // *req.version_mut() = Version::HTTP_11; - // } else if req.version() == Version::HTTP_3 { - // // HTTP/3 is always https - // debug!("HTTP/3 is currently unsupported for request to upstream."); - // *req.version_mut() = Version::HTTP_2; - // } - - // apply_upstream_options_to_request_line(req, upstream_group)?; - - // Ok(context) - // } -} diff --git a/legacy-lib/src/handler/mod.rs b/legacy-lib/src/handler/mod.rs deleted file mode 100644 index 2ae5aba6..00000000 --- a/legacy-lib/src/handler/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -#[cfg(feature = "cache")] -// mod cache; -mod error; -// mod forwarder; -mod handler_main; -mod utils_headers; -mod utils_request; -// mod utils_synth_response; - -#[cfg(feature = "sticky-cookie")] -use crate::backend::LbContext; -pub use handler_main::{HttpMessageHandler, HttpMessageHandlerBuilder, HttpMessageHandlerBuilderError}; - -#[allow(dead_code)] -#[derive(Debug)] -/// Context object to handle sticky cookies at HTTP message handler -struct HandlerContext { - #[cfg(feature = "sticky-cookie")] - context_lb: Option, - #[cfg(not(feature = "sticky-cookie"))] - context_lb: Option<()>, -} diff --git a/legacy-lib/src/handler/utils_headers.rs b/legacy-lib/src/handler/utils_headers.rs deleted file mode 100644 index 6a09c1da..00000000 --- a/legacy-lib/src/handler/utils_headers.rs +++ /dev/null @@ -1,276 +0,0 @@ -#[cfg(feature = "sticky-cookie")] -use crate::backend::{LbContext, StickyCookie, StickyCookieValue}; -use crate::backend::{UpstreamGroup, UpstreamOption}; - -use crate::{error::*, log::*, utils::*}; -use bytes::BufMut; -use hyper::{ - header::{self, HeaderMap, HeaderName, HeaderValue}, - Uri, -}; -use std::{borrow::Cow, net::SocketAddr}; - -//////////////////////////////////////////////////// -// Functions to manipulate headers - -#[cfg(feature = "sticky-cookie")] -/// Take sticky cookie header value from request header, -/// and returns LbContext to be forwarded to LB if exist and if needed. -/// Removing sticky cookie is needed and it must not be passed to the upstream. -pub(super) fn takeout_sticky_cookie_lb_context( - headers: &mut HeaderMap, - expected_cookie_name: &str, -) -> Result> { - let mut headers_clone = headers.clone(); - - match headers_clone.entry(header::COOKIE) { - header::Entry::Vacant(_) => Ok(None), - header::Entry::Occupied(entry) => { - let cookies_iter = entry - .iter() - .flat_map(|v| v.to_str().unwrap_or("").split(';').map(|v| v.trim())); - let (sticky_cookies, without_sticky_cookies): (Vec<_>, Vec<_>) = cookies_iter - .into_iter() - .partition(|v| v.starts_with(expected_cookie_name)); - if sticky_cookies.is_empty() { - return Ok(None); - } - if sticky_cookies.len() > 1 { - error!("Multiple sticky cookie values in request"); - return Err(RpxyError::Other(anyhow!( - "Invalid cookie: Multiple sticky cookie values" - ))); - } - let cookies_passed_to_upstream = without_sticky_cookies.join("; "); - let cookie_passed_to_lb = sticky_cookies.first().unwrap(); - headers.remove(header::COOKIE); - headers.insert(header::COOKIE, cookies_passed_to_upstream.parse()?); - - let sticky_cookie = StickyCookie { - value: StickyCookieValue::try_from(cookie_passed_to_lb, expected_cookie_name)?, - info: None, - }; - Ok(Some(LbContext { sticky_cookie })) - } - } -} - -#[cfg(feature = "sticky-cookie")] -/// Set-Cookie if LB Sticky is enabled and if cookie is newly created/updated. -/// Set-Cookie response header could be in multiple lines. -/// https://developer.mozilla.org/ja/docs/Web/HTTP/Headers/Set-Cookie -pub(super) fn set_sticky_cookie_lb_context(headers: &mut HeaderMap, context_from_lb: &LbContext) -> Result<()> { - let sticky_cookie_string: String = context_from_lb.sticky_cookie.clone().try_into()?; - let new_header_val: HeaderValue = sticky_cookie_string.parse()?; - let expected_cookie_name = &context_from_lb.sticky_cookie.value.name; - match headers.entry(header::SET_COOKIE) { - header::Entry::Vacant(entry) => { - entry.insert(new_header_val); - } - header::Entry::Occupied(mut entry) => { - let mut flag = false; - for e in entry.iter_mut() { - if e.to_str().unwrap_or("").starts_with(expected_cookie_name) { - *e = new_header_val.clone(); - flag = true; - } - } - if !flag { - entry.append(new_header_val); - } - } - }; - Ok(()) -} - -/// Apply options to request header, which are specified in the configuration -pub(super) fn apply_upstream_options_to_header( - headers: &mut HeaderMap, - _client_addr: &SocketAddr, - upstream: &UpstreamGroup, - upstream_base_uri: &Uri, -) -> Result<()> { - for opt in upstream.opts.iter() { - match opt { - UpstreamOption::OverrideHost => { - // overwrite HOST value with upstream hostname (like 192.168.xx.x seen from rpxy) - let upstream_host = upstream_base_uri - .host() - .ok_or_else(|| anyhow!("No hostname is given in override_host option"))?; - headers - .insert(header::HOST, HeaderValue::from_str(upstream_host)?) - .ok_or_else(|| anyhow!("Failed to insert host header in override_host option"))?; - } - UpstreamOption::UpgradeInsecureRequests => { - // add upgrade-insecure-requests in request header if not exist - headers - .entry(header::UPGRADE_INSECURE_REQUESTS) - .or_insert(HeaderValue::from_bytes(&[b'1']).unwrap()); - } - _ => (), - } - } - - Ok(()) -} - -/// Append header entry with comma according to [RFC9110](https://datatracker.ietf.org/doc/html/rfc9110) -pub(super) fn append_header_entry_with_comma(headers: &mut HeaderMap, key: &str, value: &str) -> Result<()> { - match headers.entry(HeaderName::from_bytes(key.as_bytes())?) { - header::Entry::Vacant(entry) => { - entry.insert(value.parse::()?); - } - header::Entry::Occupied(mut entry) => { - // entry.append(value.parse::()?); - let mut new_value = Vec::::with_capacity(entry.get().as_bytes().len() + 2 + value.len()); - new_value.put_slice(entry.get().as_bytes()); - new_value.put_slice(&[b',', b' ']); - new_value.put_slice(value.as_bytes()); - entry.insert(HeaderValue::from_bytes(&new_value)?); - } - } - - Ok(()) -} - -/// Add header entry if not exist -pub(super) fn add_header_entry_if_not_exist( - headers: &mut HeaderMap, - key: impl Into>, - value: impl Into>, -) -> Result<()> { - match headers.entry(HeaderName::from_bytes(key.into().as_bytes())?) { - header::Entry::Vacant(entry) => { - entry.insert(value.into().parse::()?); - } - header::Entry::Occupied(_) => (), - }; - - Ok(()) -} - -/// Overwrite header entry if exist -pub(super) fn add_header_entry_overwrite_if_exist( - headers: &mut HeaderMap, - key: impl Into>, - value: impl Into>, -) -> Result<()> { - match headers.entry(HeaderName::from_bytes(key.into().as_bytes())?) { - header::Entry::Vacant(entry) => { - entry.insert(value.into().parse::()?); - } - header::Entry::Occupied(mut entry) => { - entry.insert(HeaderValue::from_bytes(value.into().as_bytes())?); - } - } - - Ok(()) -} - -/// Align cookie values in single line -/// Sometimes violates [RFC6265](https://www.rfc-editor.org/rfc/rfc6265#section-5.4) (for http/1.1). -/// This is allowed in RFC7540 (for http/2) as mentioned [here](https://stackoverflow.com/questions/4843556/in-http-specification-what-is-the-string-that-separates-cookies). -pub(super) fn make_cookie_single_line(headers: &mut HeaderMap) -> Result<()> { - let cookies = headers - .iter() - .filter(|(k, _)| **k == header::COOKIE) - .map(|(_, v)| v.to_str().unwrap_or("")) - .collect::>() - .join("; "); - if !cookies.is_empty() { - headers.remove(header::COOKIE); - headers.insert(header::COOKIE, HeaderValue::from_bytes(cookies.as_bytes())?); - } - Ok(()) -} - -/// Add forwarding headers like `x-forwarded-for`. -pub(super) fn add_forwarding_header( - headers: &mut HeaderMap, - client_addr: &SocketAddr, - listen_addr: &SocketAddr, - tls: bool, - uri_str: &str, -) -> Result<()> { - // default process - // optional process defined by upstream_option is applied in fn apply_upstream_options - let canonical_client_addr = client_addr.to_canonical().ip().to_string(); - append_header_entry_with_comma(headers, "x-forwarded-for", &canonical_client_addr)?; - - // Single line cookie header - // TODO: This should be only for HTTP/1.1. For 2+, this can be multi-lined. - make_cookie_single_line(headers)?; - - /////////// As Nginx - // If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the - // scheme used to connect to this server - add_header_entry_if_not_exist(headers, "x-forwarded-proto", if tls { "https" } else { "http" })?; - // If we receive X-Forwarded-Port, pass it through; otherwise, pass along the - // server port the client connected to - add_header_entry_if_not_exist(headers, "x-forwarded-port", listen_addr.port().to_string())?; - - /////////// As Nginx-Proxy - // x-real-ip - add_header_entry_overwrite_if_exist(headers, "x-real-ip", canonical_client_addr)?; - // x-forwarded-ssl - add_header_entry_overwrite_if_exist(headers, "x-forwarded-ssl", if tls { "on" } else { "off" })?; - // x-original-uri - add_header_entry_overwrite_if_exist(headers, "x-original-uri", uri_str.to_string())?; - // proxy - add_header_entry_overwrite_if_exist(headers, "proxy", "")?; - - Ok(()) -} - -/// Remove connection header -pub(super) fn remove_connection_header(headers: &mut HeaderMap) { - if let Some(values) = headers.get(header::CONNECTION) { - if let Ok(v) = values.clone().to_str() { - for m in v.split(',') { - if !m.is_empty() { - headers.remove(m.trim()); - } - } - } - } -} - -/// Hop header values which are removed at proxy -const HOP_HEADERS: &[&str] = &[ - "connection", - "te", - "trailer", - "keep-alive", - "proxy-connection", - "proxy-authenticate", - "proxy-authorization", - "transfer-encoding", - "upgrade", -]; - -/// Remove hop headers -pub(super) fn remove_hop_header(headers: &mut HeaderMap) { - HOP_HEADERS.iter().for_each(|key| { - headers.remove(*key); - }); -} - -/// Extract upgrade header value if exist -pub(super) fn extract_upgrade(headers: &HeaderMap) -> Option { - if let Some(c) = headers.get(header::CONNECTION) { - if c - .to_str() - .unwrap_or("") - .split(',') - .any(|w| w.trim().to_ascii_lowercase() == header::UPGRADE.as_str().to_ascii_lowercase()) - { - if let Some(u) = headers.get(header::UPGRADE) { - if let Ok(m) = u.to_str() { - debug!("Upgrade in request header: {}", m); - return Some(m.to_owned()); - } - } - } - } - None -} diff --git a/legacy-lib/src/handler/utils_request.rs b/legacy-lib/src/handler/utils_request.rs deleted file mode 100644 index 6204f413..00000000 --- a/legacy-lib/src/handler/utils_request.rs +++ /dev/null @@ -1,64 +0,0 @@ -use crate::{ - backend::{UpstreamGroup, UpstreamOption}, - error::*, -}; -use hyper::{header, Request}; - -//////////////////////////////////////////////////// -// Functions to manipulate request line - -/// Apply upstream options in request line, specified in the configuration -pub(super) fn apply_upstream_options_to_request_line(req: &mut Request, upstream: &UpstreamGroup) -> Result<()> { - for opt in upstream.opts.iter() { - match opt { - UpstreamOption::ForceHttp11Upstream => *req.version_mut() = hyper::Version::HTTP_11, - UpstreamOption::ForceHttp2Upstream => { - // case: h2c -> https://www.rfc-editor.org/rfc/rfc9113.txt - // Upgrade from HTTP/1.1 to HTTP/2 is deprecated. So, http-2 prior knowledge is required. - *req.version_mut() = hyper::Version::HTTP_2; - } - _ => (), - } - } - - Ok(()) -} - -/// Trait defining parser of hostname -pub trait ParseHost { - fn parse_host(&self) -> Result<&[u8]>; -} -impl ParseHost for Request { - /// Extract hostname from either the request HOST header or request line - fn parse_host(&self) -> Result<&[u8]> { - let headers_host = self.headers().get(header::HOST); - let uri_host = self.uri().host(); - // let uri_port = self.uri().port_u16(); - - if !(!(headers_host.is_none() && uri_host.is_none())) { - return Err(RpxyError::Request("No host in request header")); - } - - // prioritize server_name in uri - uri_host.map_or_else( - || { - let m = headers_host.unwrap().as_bytes(); - if m.starts_with(&[b'[']) { - // v6 address with bracket case. if port is specified, always it is in this case. - let mut iter = m.split(|ptr| ptr == &b'[' || ptr == &b']'); - iter.next().ok_or(RpxyError::Request("Invalid Host"))?; // first item is always blank - iter.next().ok_or(RpxyError::Request("Invalid Host")) - } else if m.len() - m.split(|v| v == &b':').fold(0, |acc, s| acc + s.len()) >= 2 { - // v6 address case, if 2 or more ':' is contained - Ok(m) - } else { - // v4 address or hostname - m.split(|colon| colon == &b':') - .next() - .ok_or(RpxyError::Request("Invalid Host")) - } - }, - |v| Ok(v.as_bytes()), - ) - } -} diff --git a/legacy-lib/src/handler/utils_synth_response.rs b/legacy-lib/src/handler/utils_synth_response.rs deleted file mode 100644 index baa69870..00000000 --- a/legacy-lib/src/handler/utils_synth_response.rs +++ /dev/null @@ -1,35 +0,0 @@ -// Highly motivated by https://github.com/felipenoris/hyper-reverse-proxy -use crate::error::*; -use hyper::{Body, Request, Response, StatusCode, Uri}; - -//////////////////////////////////////////////////// -// Functions to create response (error or redirect) - -/// Generate a synthetic response message of a certain error status code -pub(super) fn http_error(status_code: StatusCode) -> Result> { - let response = Response::builder().status(status_code).body(Body::empty())?; - Ok(response) -} - -/// Generate synthetic response message of a redirection to https host with 301 -pub(super) fn secure_redirection( - server_name: &str, - tls_port: Option, - req: &Request, -) -> Result> { - let pq = match req.uri().path_and_query() { - Some(x) => x.as_str(), - _ => "", - }; - let new_uri = Uri::builder().scheme("https").path_and_query(pq); - let dest_uri = match tls_port { - Some(443) | None => new_uri.authority(server_name), - Some(p) => new_uri.authority(format!("{server_name}:{p}")), - } - .build()?; - let response = Response::builder() - .status(StatusCode::MOVED_PERMANENTLY) - .header("Location", dest_uri.to_string()) - .body(Body::empty())?; - Ok(response) -} diff --git a/legacy-lib/src/hyper_executor.rs b/legacy-lib/src/hyper_executor.rs deleted file mode 100644 index 152bbe92..00000000 --- a/legacy-lib/src/hyper_executor.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::sync::Arc; - -use hyper_util::server::{self, conn::auto::Builder as ConnectionBuilder}; -use tokio::runtime::Handle; - -use crate::{globals::Globals, CryptoSource}; - -#[derive(Clone)] -/// Executor for hyper -pub struct LocalExecutor { - runtime_handle: Handle, -} - -impl LocalExecutor { - pub fn new(runtime_handle: Handle) -> Self { - LocalExecutor { runtime_handle } - } -} - -impl hyper::rt::Executor for LocalExecutor -where - F: std::future::Future + Send + 'static, - F::Output: Send, -{ - fn execute(&self, fut: F) { - self.runtime_handle.spawn(fut); - } -} - -/// build connection builder shared with proxy instances -pub(crate) fn build_http_server(globals: &Arc>) -> ConnectionBuilder -where - T: CryptoSource, -{ - let executor = LocalExecutor::new(globals.runtime_handle.clone()); - let mut http_server = server::conn::auto::Builder::new(executor); - http_server - .http1() - .keep_alive(globals.proxy_config.keepalive) - .pipeline_flush(true); - http_server - .http2() - .max_concurrent_streams(globals.proxy_config.max_concurrent_streams); - http_server -} diff --git a/legacy-lib/src/lib.rs b/legacy-lib/src/lib.rs deleted file mode 100644 index a9f48abc..00000000 --- a/legacy-lib/src/lib.rs +++ /dev/null @@ -1,112 +0,0 @@ -mod backend; -mod certs; -mod constants; -mod error; -mod globals; -mod handler; -mod hyper_executor; -mod log; -mod proxy; -mod utils; - -use crate::{error::*, globals::Globals, handler::HttpMessageHandlerBuilder, log::*, proxy::ProxyBuilder}; -use futures::future::select_all; -use hyper_executor::build_http_server; -use std::sync::Arc; - -pub use crate::{ - certs::{CertsAndKeys, CryptoSource}, - globals::{AppConfig, AppConfigList, ProxyConfig, ReverseProxyConfig, TlsConfig, UpstreamUri}, -}; -pub mod reexports { - pub use hyper::Uri; - pub use rustls::{Certificate, PrivateKey}; -} - -#[cfg(all(feature = "http3-quinn", feature = "http3-s2n"))] -compile_error!("feature \"http3-quinn\" and feature \"http3-s2n\" cannot be enabled at the same time"); - -/// Entrypoint that creates and spawns tasks of reverse proxy services -pub async fn entrypoint( - proxy_config: &ProxyConfig, - app_config_list: &AppConfigList, - runtime_handle: &tokio::runtime::Handle, - term_notify: Option>, -) -> Result<()> -where - T: CryptoSource + Clone + Send + Sync + 'static, -{ - // For initial message logging - if proxy_config.listen_sockets.iter().any(|addr| addr.is_ipv6()) { - info!("Listen both IPv4 and IPv6") - } else { - info!("Listen IPv4") - } - if proxy_config.http_port.is_some() { - info!("Listen port: {}", proxy_config.http_port.unwrap()); - } - if proxy_config.https_port.is_some() { - info!("Listen port: {} (for TLS)", proxy_config.https_port.unwrap()); - } - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - if proxy_config.http3 { - info!("Experimental HTTP/3.0 is enabled. Note it is still very unstable."); - } - if !proxy_config.sni_consistency { - info!("Ignore consistency between TLS SNI and Host header (or Request line). Note it violates RFC."); - } - #[cfg(feature = "cache")] - if proxy_config.cache_enabled { - info!( - "Cache is enabled: cache dir = {:?}", - proxy_config.cache_dir.as_ref().unwrap() - ); - } else { - info!("Cache is disabled") - } - - // build global - let globals = Arc::new(Globals { - proxy_config: proxy_config.clone(), - backends: app_config_list.clone().try_into()?, - request_count: Default::default(), - runtime_handle: runtime_handle.clone(), - term_notify: term_notify.clone(), - }); - - // build message handler including a request forwarder - let msg_handler = Arc::new( - HttpMessageHandlerBuilder::default() - // .forwarder(Arc::new(Forwarder::new(&globals).await)) - .globals(globals.clone()) - .build()?, - ); - - let http_server = Arc::new(build_http_server(&globals)); - - let addresses = globals.proxy_config.listen_sockets.clone(); - let futures = select_all(addresses.into_iter().map(|addr| { - let mut tls_enabled = false; - if let Some(https_port) = globals.proxy_config.https_port { - tls_enabled = https_port == addr.port() - } - - let proxy = ProxyBuilder::default() - .globals(globals.clone()) - .listening_on(addr) - .tls_enabled(tls_enabled) - .http_server(http_server.clone()) - .msg_handler(msg_handler.clone()) - .build() - .unwrap(); - - globals.runtime_handle.spawn(async move { proxy.start().await }) - })); - - // wait for all future - if let (Ok(Err(e)), _, _) = futures.await { - error!("Some proxy services are down: {}", e); - }; - - Ok(()) -} diff --git a/legacy-lib/src/log.rs b/legacy-lib/src/log.rs deleted file mode 100644 index 6b8afbec..00000000 --- a/legacy-lib/src/log.rs +++ /dev/null @@ -1,98 +0,0 @@ -use crate::utils::ToCanonical; -use hyper::header; -use std::net::SocketAddr; -pub use tracing::{debug, error, info, warn}; - -#[derive(Debug, Clone)] -pub struct MessageLog { - // pub tls_server_name: String, - pub client_addr: String, - pub method: String, - pub host: String, - pub p_and_q: String, - pub version: hyper::Version, - pub uri_scheme: String, - pub uri_host: String, - pub ua: String, - pub xff: String, - pub status: String, - pub upstream: String, -} - -impl From<&hyper::Request> for MessageLog { - fn from(req: &hyper::Request) -> Self { - let header_mapper = |v: header::HeaderName| { - req - .headers() - .get(v) - .map_or_else(|| "", |s| s.to_str().unwrap_or("")) - .to_string() - }; - Self { - // tls_server_name: "".to_string(), - client_addr: "".to_string(), - method: req.method().to_string(), - host: header_mapper(header::HOST), - p_and_q: req - .uri() - .path_and_query() - .map_or_else(|| "", |v| v.as_str()) - .to_string(), - version: req.version(), - uri_scheme: req.uri().scheme_str().unwrap_or("").to_string(), - uri_host: req.uri().host().unwrap_or("").to_string(), - ua: header_mapper(header::USER_AGENT), - xff: header_mapper(header::HeaderName::from_static("x-forwarded-for")), - status: "".to_string(), - upstream: "".to_string(), - } - } -} - -impl MessageLog { - pub fn client_addr(&mut self, client_addr: &SocketAddr) -> &mut Self { - self.client_addr = client_addr.to_canonical().to_string(); - self - } - // pub fn tls_server_name(&mut self, tls_server_name: &str) -> &mut Self { - // self.tls_server_name = tls_server_name.to_string(); - // self - // } - pub fn status_code(&mut self, status_code: &hyper::StatusCode) -> &mut Self { - self.status = status_code.to_string(); - self - } - pub fn xff(&mut self, xff: &Option<&header::HeaderValue>) -> &mut Self { - self.xff = xff.map_or_else(|| "", |v| v.to_str().unwrap_or("")).to_string(); - self - } - pub fn upstream(&mut self, upstream: &hyper::Uri) -> &mut Self { - self.upstream = upstream.to_string(); - self - } - - pub fn output(&self) { - info!( - "{} <- {} -- {} {} {:?} -- {} -- {} \"{}\", \"{}\" \"{}\"", - if !self.host.is_empty() { - self.host.as_str() - } else { - self.uri_host.as_str() - }, - self.client_addr, - self.method, - self.p_and_q, - self.version, - self.status, - if !self.uri_scheme.is_empty() && !self.uri_host.is_empty() { - format!("{}://{}", self.uri_scheme, self.uri_host) - } else { - "".to_string() - }, - self.ua, - self.xff, - self.upstream, - // self.tls_server_name - ); - } -} diff --git a/legacy-lib/src/proxy/crypto_service.rs b/legacy-lib/src/proxy/crypto_service.rs deleted file mode 100644 index ae0f9936..00000000 --- a/legacy-lib/src/proxy/crypto_service.rs +++ /dev/null @@ -1,276 +0,0 @@ -use crate::{ - certs::{CertsAndKeys, CryptoSource}, - globals::Globals, - log::*, - utils::ServerNameBytesExp, -}; -use async_trait::async_trait; -use hot_reload::*; -use rustc_hash::FxHashMap as HashMap; -use rustls::{server::ResolvesServerCertUsingSni, sign::CertifiedKey, RootCertStore, ServerConfig}; -use std::sync::Arc; - -#[derive(Clone)] -/// Reloader service for certificates and keys for TLS -pub struct CryptoReloader -where - T: CryptoSource, -{ - globals: Arc>, -} - -pub type SniServerCryptoMap = HashMap>; -pub struct ServerCrypto { - // For Quic/HTTP3, only servers with no client authentication - #[cfg(feature = "http3-quinn")] - pub inner_global_no_client_auth: Arc, - #[cfg(feature = "http3-s2n")] - pub inner_global_no_client_auth: s2n_quic_rustls::Server, - // For TLS over TCP/HTTP2 and 1.1, map of SNI to server_crypto for all given servers - pub inner_local_map: Arc, -} - -/// Reloader target for the certificate reloader service -#[derive(Debug, PartialEq, Eq, Clone, Default)] -pub struct ServerCryptoBase { - inner: HashMap, -} - -#[async_trait] -impl Reload for CryptoReloader -where - T: CryptoSource + Sync + Send, -{ - type Source = Arc>; - async fn new(source: &Self::Source) -> Result> { - Ok(Self { - globals: source.clone(), - }) - } - - async fn reload(&self) -> Result, ReloaderError> { - let mut certs_and_keys_map = ServerCryptoBase::default(); - - for (server_name_bytes_exp, backend) in self.globals.backends.apps.iter() { - if let Some(crypto_source) = &backend.crypto_source { - let certs_and_keys = crypto_source - .read() - .await - .map_err(|_e| ReloaderError::::Reload("Failed to reload cert, key or ca cert"))?; - certs_and_keys_map - .inner - .insert(server_name_bytes_exp.to_owned(), certs_and_keys); - } - } - - Ok(Some(certs_and_keys_map)) - } -} - -impl TryInto> for &ServerCryptoBase { - type Error = anyhow::Error; - - fn try_into(self) -> Result, Self::Error> { - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - let server_crypto_global = self.build_server_crypto_global()?; - let server_crypto_local_map: SniServerCryptoMap = self.build_server_crypto_local_map()?; - - Ok(Arc::new(ServerCrypto { - #[cfg(feature = "http3-quinn")] - inner_global_no_client_auth: Arc::new(server_crypto_global), - #[cfg(feature = "http3-s2n")] - inner_global_no_client_auth: server_crypto_global, - inner_local_map: Arc::new(server_crypto_local_map), - })) - } -} - -impl ServerCryptoBase { - fn build_server_crypto_local_map(&self) -> Result> { - let mut server_crypto_local_map: SniServerCryptoMap = HashMap::default(); - - for (server_name_bytes_exp, certs_and_keys) in self.inner.iter() { - let server_name: String = server_name_bytes_exp.try_into()?; - - // Parse server certificates and private keys - let Ok(certified_key): Result = certs_and_keys.parse_server_certs_and_keys() else { - warn!("Failed to add certificate for {}", server_name); - continue; - }; - - let mut resolver_local = ResolvesServerCertUsingSni::new(); - let mut client_ca_roots_local = RootCertStore::empty(); - - // add server certificate and key - if let Err(e) = resolver_local.add(server_name.as_str(), certified_key.to_owned()) { - error!( - "{}: Failed to read some certificates and keys {}", - server_name.as_str(), - e - ) - } - - // add client certificate if specified - if certs_and_keys.client_ca_certs.is_some() { - // add client certificate if specified - match certs_and_keys.parse_client_ca_certs() { - Ok((owned_trust_anchors, _subject_key_ids)) => { - client_ca_roots_local.add_trust_anchors(owned_trust_anchors.into_iter()); - } - Err(e) => { - warn!( - "Failed to add client CA certificate for {}: {}", - server_name.as_str(), - e - ); - } - } - } - - let mut server_config_local = if client_ca_roots_local.is_empty() { - // with no client auth, enable http1.1 -- 3 - #[cfg(not(any(feature = "http3-quinn", feature = "http3-s2n")))] - { - ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth() - .with_cert_resolver(Arc::new(resolver_local)) - } - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - { - let mut sc = ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth() - .with_cert_resolver(Arc::new(resolver_local)); - sc.alpn_protocols = vec![b"h3".to_vec(), b"hq-29".to_vec()]; // TODO: remove hq-29 later? - sc - } - } else { - // with client auth, enable only http1.1 and 2 - // let client_certs_verifier = rustls::server::AllowAnyAnonymousOrAuthenticatedClient::new(client_ca_roots); - let client_certs_verifier = rustls::server::AllowAnyAuthenticatedClient::new(client_ca_roots_local); - ServerConfig::builder() - .with_safe_defaults() - .with_client_cert_verifier(Arc::new(client_certs_verifier)) - .with_cert_resolver(Arc::new(resolver_local)) - }; - server_config_local.alpn_protocols.push(b"h2".to_vec()); - server_config_local.alpn_protocols.push(b"http/1.1".to_vec()); - - server_crypto_local_map.insert(server_name_bytes_exp.to_owned(), Arc::new(server_config_local)); - } - Ok(server_crypto_local_map) - } - - #[cfg(feature = "http3-quinn")] - fn build_server_crypto_global(&self) -> Result> { - let mut resolver_global = ResolvesServerCertUsingSni::new(); - - for (server_name_bytes_exp, certs_and_keys) in self.inner.iter() { - let server_name: String = server_name_bytes_exp.try_into()?; - - // Parse server certificates and private keys - let Ok(certified_key): Result = certs_and_keys.parse_server_certs_and_keys() else { - warn!("Failed to add certificate for {}", server_name); - continue; - }; - - if certs_and_keys.client_ca_certs.is_none() { - // aggregated server config for no client auth server for http3 - if let Err(e) = resolver_global.add(server_name.as_str(), certified_key) { - error!( - "{}: Failed to read some certificates and keys {}", - server_name.as_str(), - e - ) - } - } - } - - ////////////// - let mut server_crypto_global = ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth() - .with_cert_resolver(Arc::new(resolver_global)); - - ////////////////////////////// - - server_crypto_global.alpn_protocols = vec![ - b"h3".to_vec(), - b"hq-29".to_vec(), // TODO: remove later? - b"h2".to_vec(), - b"http/1.1".to_vec(), - ]; - Ok(server_crypto_global) - } - - #[cfg(feature = "http3-s2n")] - fn build_server_crypto_global(&self) -> Result> { - let mut resolver_global = s2n_quic_rustls::rustls::server::ResolvesServerCertUsingSni::new(); - - for (server_name_bytes_exp, certs_and_keys) in self.inner.iter() { - let server_name: String = server_name_bytes_exp.try_into()?; - - // Parse server certificates and private keys - let Ok(certified_key) = parse_server_certs_and_keys_s2n(certs_and_keys) else { - warn!("Failed to add certificate for {}", server_name); - continue; - }; - - if certs_and_keys.client_ca_certs.is_none() { - // aggregated server config for no client auth server for http3 - if let Err(e) = resolver_global.add(server_name.as_str(), certified_key) { - error!( - "{}: Failed to read some certificates and keys {}", - server_name.as_str(), - e - ) - } - } - } - let alpn = vec![ - b"h3".to_vec(), - b"hq-29".to_vec(), // TODO: remove later? - b"h2".to_vec(), - b"http/1.1".to_vec(), - ]; - let server_crypto_global = s2n_quic::provider::tls::rustls::Server::builder() - .with_cert_resolver(Arc::new(resolver_global)) - .map_err(|e| anyhow::anyhow!(e))? - .with_application_protocols(alpn.iter()) - .map_err(|e| anyhow::anyhow!(e))? - .build() - .map_err(|e| anyhow::anyhow!(e))?; - Ok(server_crypto_global) - } -} - -#[cfg(feature = "http3-s2n")] -/// This is workaround for the version difference between rustls and s2n-quic-rustls -fn parse_server_certs_and_keys_s2n( - certs_and_keys: &CertsAndKeys, -) -> Result { - let signing_key = certs_and_keys - .cert_keys - .iter() - .find_map(|k| { - let s2n_private_key = s2n_quic_rustls::PrivateKey(k.0.clone()); - if let Ok(sk) = s2n_quic_rustls::rustls::sign::any_supported_type(&s2n_private_key) { - Some(sk) - } else { - None - } - }) - .ok_or_else(|| { - std::io::Error::new( - std::io::ErrorKind::InvalidInput, - "Unable to find a valid certificate and key", - ) - })?; - let certs: Vec<_> = certs_and_keys - .certs - .iter() - .map(|c| s2n_quic_rustls::rustls::Certificate(c.0.clone())) - .collect(); - Ok(s2n_quic_rustls::rustls::sign::CertifiedKey::new(certs, signing_key)) -} diff --git a/legacy-lib/src/proxy/mod.rs b/legacy-lib/src/proxy/mod.rs deleted file mode 100644 index c89c3942..00000000 --- a/legacy-lib/src/proxy/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -mod crypto_service; -mod proxy_client_cert; -#[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] -mod proxy_h3; -mod proxy_main; -#[cfg(feature = "http3-quinn")] -mod proxy_quic_quinn; -#[cfg(feature = "http3-s2n")] -mod proxy_quic_s2n; -mod proxy_tls; -mod socket; - -use crate::error::*; -use http::{Response, StatusCode}; -use http_body_util::{combinators, BodyExt, Either, Empty}; -use hyper::body::{Bytes, Incoming}; - -pub use proxy_main::{Proxy, ProxyBuilder, ProxyBuilderError}; - -/// Type for synthetic boxed body -type BoxBody = combinators::BoxBody; -/// Type for either passthrough body or synthetic body -type EitherBody = Either; - -/// helper function to build http response with passthrough body -fn passthrough_response(response: Response) -> Result> { - Ok(response.map(EitherBody::Left)) -} - -/// build http response with status code of 4xx and 5xx -fn synthetic_error_response(status_code: StatusCode) -> Result> { - let res = Response::builder() - .status(status_code) - .body(EitherBody::Right(BoxBody::new(empty()))) - .unwrap(); - Ok(res) -} - -/// helper function to build a empty body -fn empty() -> BoxBody { - Empty::::new().map_err(|never| match never {}).boxed() -} diff --git a/legacy-lib/src/proxy/proxy_client_cert.rs b/legacy-lib/src/proxy/proxy_client_cert.rs deleted file mode 100644 index dfba4ce4..00000000 --- a/legacy-lib/src/proxy/proxy_client_cert.rs +++ /dev/null @@ -1,47 +0,0 @@ -use crate::{error::*, log::*}; -use rustc_hash::FxHashSet as HashSet; -use rustls::Certificate; -use x509_parser::extensions::ParsedExtension; -use x509_parser::prelude::*; - -#[allow(dead_code)] -// TODO: consider move this function to the layer of handle_request (L7) to return 403 -pub(super) fn check_client_authentication( - client_certs: Option<&[Certificate]>, - client_ca_keyids_set_for_sni: Option<&HashSet>>, -) -> std::result::Result<(), ClientCertsError> { - let Some(client_ca_keyids_set) = client_ca_keyids_set_for_sni else { - // No client cert settings for given server name - return Ok(()); - }; - - let Some(client_certs) = client_certs else { - error!("Client certificate is needed for given server name"); - return Err(ClientCertsError::ClientCertRequired( - "Client certificate is needed for given server name".to_string(), - )); - }; - debug!("Incoming TLS client is (temporarily) authenticated via client cert"); - - // Check client certificate key ids - let mut client_certs_parsed_iter = client_certs.iter().filter_map(|d| parse_x509_certificate(&d.0).ok()); - let match_server_crypto_and_client_cert = client_certs_parsed_iter.any(|c| { - let mut filtered = c.1.iter_extensions().filter_map(|e| { - if let ParsedExtension::AuthorityKeyIdentifier(key_id) = e.parsed_extension() { - key_id.key_identifier.as_ref() - } else { - None - } - }); - filtered.any(|id| client_ca_keyids_set.contains(id.0)) - }); - - if !match_server_crypto_and_client_cert { - error!("Inconsistent client certificate was provided for SNI"); - return Err(ClientCertsError::InconsistentClientCert( - "Inconsistent client certificate was provided for SNI".to_string(), - )); - } - - Ok(()) -} diff --git a/legacy-lib/src/proxy/proxy_h3.rs b/legacy-lib/src/proxy/proxy_h3.rs deleted file mode 100644 index 699938b7..00000000 --- a/legacy-lib/src/proxy/proxy_h3.rs +++ /dev/null @@ -1,186 +0,0 @@ -use super::Proxy; -use crate::{certs::CryptoSource, error::*, log::*, utils::ServerNameBytesExp}; -use bytes::{Buf, Bytes}; -use futures::Stream; -#[cfg(feature = "http3-quinn")] -use h3::{quic::BidiStream, quic::Connection as ConnectionQuic, server::RequestStream}; -use http::{Request, Response}; -use http_body_util::{BodyExt, BodyStream, StreamBody}; -use hyper::body::{Body, Incoming}; -use hyper_util::client::legacy::connect::Connect; -#[cfg(feature = "http3-s2n")] -use s2n_quic_h3::h3::{self, quic::BidiStream, quic::Connection as ConnectionQuic, server::RequestStream}; -use std::net::SocketAddr; -use tokio::time::{timeout, Duration}; - -impl Proxy -where - // T: Connect + Clone + Sync + Send + 'static, - U: CryptoSource + Clone + Sync + Send + 'static, -{ - pub(super) async fn connection_serve_h3( - &self, - quic_connection: C, - tls_server_name: ServerNameBytesExp, - client_addr: SocketAddr, - ) -> Result<()> - where - C: ConnectionQuic, - >::BidiStream: BidiStream + Send + 'static, - <>::BidiStream as BidiStream>::RecvStream: Send, - <>::BidiStream as BidiStream>::SendStream: Send, - { - let mut h3_conn = h3::server::Connection::<_, Bytes>::new(quic_connection).await?; - info!( - "QUIC/HTTP3 connection established from {:?} {:?}", - client_addr, tls_server_name - ); - // TODO: Is here enough to fetch server_name from NewConnection? - // to avoid deep nested call from listener_service_h3 - loop { - // this routine follows hyperium/h3 examples https://github.com/hyperium/h3/blob/master/examples/server.rs - match h3_conn.accept().await { - Ok(None) => { - break; - } - Err(e) => { - warn!("HTTP/3 error on accept incoming connection: {}", e); - match e.get_error_level() { - h3::error::ErrorLevel::ConnectionError => break, - h3::error::ErrorLevel::StreamError => continue, - } - } - Ok(Some((req, stream))) => { - // We consider the connection count separately from the stream count. - // Max clients for h1/h2 = max 'stream' for h3. - let request_count = self.globals.request_count.clone(); - if request_count.increment() > self.globals.proxy_config.max_clients { - request_count.decrement(); - h3_conn.shutdown(0).await?; - break; - } - debug!("Request incoming: current # {}", request_count.current()); - - let self_inner = self.clone(); - let tls_server_name_inner = tls_server_name.clone(); - self.globals.runtime_handle.spawn(async move { - if let Err(e) = timeout( - self_inner.globals.proxy_config.proxy_timeout + Duration::from_secs(1), // timeout per stream are considered as same as one in http2 - self_inner.stream_serve_h3(req, stream, client_addr, tls_server_name_inner), - ) - .await - { - error!("HTTP/3 failed to process stream: {}", e); - } - request_count.decrement(); - debug!("Request processed: current # {}", request_count.current()); - }); - } - } - } - - Ok(()) - } - - async fn stream_serve_h3( - &self, - req: Request<()>, - stream: RequestStream, - client_addr: SocketAddr, - tls_server_name: ServerNameBytesExp, - ) -> Result<()> - where - S: BidiStream + Send + 'static, - >::RecvStream: Send, - { - println!("stream_serve_h3"); - let (req_parts, _) = req.into_parts(); - // split stream and async body handling - let (mut send_stream, mut recv_stream) = stream.split(); - - // let max_body_size = self.globals.proxy_config.h3_request_max_body_size; - // // let max = body_stream.size_hint().upper().unwrap_or(u64::MAX); - // // if max > max_body_size as u64 { - // // return Err(HttpError::TooLargeRequestBody); - // // } - // let new_req = Request::from_parts(req_parts, body_stream); - - //////////////////// - // TODO: TODO: TODO: TODO: - // TODO: Body in hyper-0.14 was changed to Incoming in hyper-1.0, and it is not accessible from outside. - // Thus, we need to implement IncomingLike trait using channel. Also, the backend handler must feed the body in the form of - // Either as body. - // Also, the downstream from the backend handler could be Incoming, but will be wrapped as Either as well due to H3. - // Result, E> type includes E as HttpError to generate the status code and related Response. - // Thus to handle synthetic error messages in BoxBody, the serve() function outputs Response, BoxBody>>>. - //////////////////// - - // // generate streamed body with trailers using channel - // let (body_sender, req_body) = Incoming::channel(); - - // Buffering and sending body through channel for protocol conversion like h3 -> h2/http1.1 - // The underling buffering, i.e., buffer given by the API recv_data.await?, is handled by quinn. - let max_body_size = self.globals.proxy_config.h3_request_max_body_size; - self.globals.runtime_handle.spawn(async move { - // let mut sender = body_sender; - let mut size = 0usize; - while let Some(mut body) = recv_stream.recv_data().await? { - debug!("HTTP/3 incoming request body: remaining {}", body.remaining()); - size += body.remaining(); - if size > max_body_size { - error!( - "Exceeds max request body size for HTTP/3: received {}, maximum_allowd {}", - size, max_body_size - ); - return Err(RpxyError::Proxy("Exceeds max request body size for HTTP/3".to_string())); - } - // create stream body to save memory, shallow copy (increment of ref-count) to Bytes using copy_to_bytes - // sender.send_data(body.copy_to_bytes(body.remaining())).await?; - } - - // trailers: use inner for work around. (directly get trailer) - let trailers = recv_stream.as_mut().recv_trailers().await?; - if trailers.is_some() { - debug!("HTTP/3 incoming request trailers"); - // sender.send_trailers(trailers.unwrap()).await?; - } - Ok(()) - }); - - // let new_req: Request = Request::from_parts(req_parts, req_body); - // let res = self - // .msg_handler - // .clone() - // .handle_request( - // new_req, - // client_addr, - // self.listening_on, - // self.tls_enabled, - // Some(tls_server_name), - // ) - // .await?; - - // let (new_res_parts, new_body) = res.into_parts(); - // let new_res = Response::from_parts(new_res_parts, ()); - - // match send_stream.send_response(new_res).await { - // Ok(_) => { - // debug!("HTTP/3 response to connection successful"); - // // aggregate body without copying - // let body_data = new_body.collect().await?.aggregate(); - - // // create stream body to save memory, shallow copy (increment of ref-count) to Bytes using copy_to_bytes - // send_stream - // .send_data(body_data.copy_to_bytes(body_data.remaining())) - // .await?; - - // // TODO: needs handling trailer? should be included in body from handler. - // } - // Err(err) => { - // error!("Unable to send response to connection peer: {:?}", err); - // } - // } - // Ok(send_stream.finish().await?) - todo!() - } -} diff --git a/legacy-lib/src/proxy/proxy_main.rs b/legacy-lib/src/proxy/proxy_main.rs deleted file mode 100644 index ec1008a3..00000000 --- a/legacy-lib/src/proxy/proxy_main.rs +++ /dev/null @@ -1,150 +0,0 @@ -use super::{passthrough_response, socket::bind_tcp_socket, synthetic_error_response, EitherBody}; -use crate::{ - certs::CryptoSource, error::*, globals::Globals, handler::HttpMessageHandler, hyper_executor::LocalExecutor, log::*, - utils::ServerNameBytesExp, -}; -use derive_builder::{self, Builder}; -use http::{Request, StatusCode}; -use hyper::{ - body::Incoming, - rt::{Read, Write}, - service::service_fn, -}; -use hyper_util::{client::legacy::connect::Connect, rt::TokioIo, server::conn::auto::Builder as ConnectionBuilder}; -use std::{net::SocketAddr, sync::Arc}; -use tokio::time::{timeout, Duration}; - -#[derive(Clone, Builder)] -/// Proxy main object -pub struct Proxy -where - // T: Connect + Clone + Sync + Send + 'static, - U: CryptoSource + Clone + Sync + Send + 'static, -{ - pub listening_on: SocketAddr, - pub tls_enabled: bool, // TCP待受がTLSかどうか - /// hyper server receiving http request - pub http_server: Arc>, - // pub msg_handler: Arc>, - pub msg_handler: Arc>, - pub globals: Arc>, -} - -/// Wrapper function to handle request -async fn serve_request( - req: Request, - // handler: Arc>, - handler: Arc>, - client_addr: SocketAddr, - listen_addr: SocketAddr, - tls_enabled: bool, - tls_server_name: Option, -) -> Result> -where - U: CryptoSource + Clone + Sync + Send + 'static, -{ - match handler - .handle_request(req, client_addr, listen_addr, tls_enabled, tls_server_name) - .await? - { - Ok(res) => passthrough_response(res), - Err(e) => synthetic_error_response(StatusCode::from(e)), - } -} - -impl Proxy -where - // T: Connect + Clone + Sync + Send + 'static, - U: CryptoSource + Clone + Sync + Send, -{ - /// Serves requests from clients - pub(super) fn serve_connection( - &self, - stream: I, - peer_addr: SocketAddr, - tls_server_name: Option, - ) where - I: Read + Write + Send + Unpin + 'static, - { - let request_count = self.globals.request_count.clone(); - if request_count.increment() > self.globals.proxy_config.max_clients { - request_count.decrement(); - return; - } - debug!("Request incoming: current # {}", request_count.current()); - - let server_clone = self.http_server.clone(); - let msg_handler_clone = self.msg_handler.clone(); - let timeout_sec = self.globals.proxy_config.proxy_timeout; - let tls_enabled = self.tls_enabled; - let listening_on = self.listening_on; - self.globals.runtime_handle.clone().spawn(async move { - timeout( - timeout_sec + Duration::from_secs(1), - server_clone.serve_connection_with_upgrades( - stream, - service_fn(move |req: Request| { - serve_request( - req, - msg_handler_clone.clone(), - peer_addr, - listening_on, - tls_enabled, - tls_server_name.clone(), - ) - }), - ), - ) - .await - .ok(); - - request_count.decrement(); - debug!("Request processed: current # {}", request_count.current()); - }); - } - - /// Start without TLS (HTTP cleartext) - async fn start_without_tls(&self) -> Result<()> { - let listener_service = async { - let tcp_socket = bind_tcp_socket(&self.listening_on)?; - let tcp_listener = tcp_socket.listen(self.globals.proxy_config.tcp_listen_backlog)?; - info!("Start TCP proxy serving with HTTP request for configured host names"); - while let Ok((stream, client_addr)) = tcp_listener.accept().await { - self.serve_connection(TokioIo::new(stream), client_addr, None); - } - Ok(()) as Result<()> - }; - listener_service.await?; - Ok(()) - } - - /// Entrypoint for HTTP/1.1 and HTTP/2 servers - pub async fn start(&self) -> Result<()> { - let proxy_service = async { - if self.tls_enabled { - self.start_with_tls().await - } else { - self.start_without_tls().await - } - }; - - match &self.globals.term_notify { - Some(term) => { - tokio::select! { - _ = proxy_service => { - warn!("Proxy service got down"); - } - _ = term.notified() => { - info!("Proxy service listening on {} receives term signal", self.listening_on); - } - } - } - None => { - proxy_service.await?; - warn!("Proxy service got down"); - } - } - - Ok(()) - } -} diff --git a/legacy-lib/src/proxy/proxy_quic_quinn.rs b/legacy-lib/src/proxy/proxy_quic_quinn.rs deleted file mode 100644 index 1828e5f7..00000000 --- a/legacy-lib/src/proxy/proxy_quic_quinn.rs +++ /dev/null @@ -1,124 +0,0 @@ -use super::socket::bind_udp_socket; -use super::{ - crypto_service::{ServerCrypto, ServerCryptoBase}, - proxy_main::Proxy, -}; -use crate::{certs::CryptoSource, error::*, log::*, utils::BytesName}; -use hot_reload::ReloaderReceiver; -use hyper_util::client::legacy::connect::Connect; -use quinn::{crypto::rustls::HandshakeData, Endpoint, ServerConfig as QuicServerConfig, TransportConfig}; -use rustls::ServerConfig; -use std::sync::Arc; - -impl Proxy -where - // T: Connect + Clone + Sync + Send + 'static, - U: CryptoSource + Clone + Sync + Send + 'static, -{ - pub(super) async fn listener_service_h3( - &self, - mut server_crypto_rx: ReloaderReceiver, - ) -> Result<()> { - info!("Start UDP proxy serving with HTTP/3 request for configured host names [quinn]"); - // first set as null config server - let rustls_server_config = ServerConfig::builder() - .with_safe_default_cipher_suites() - .with_safe_default_kx_groups() - .with_protocol_versions(&[&rustls::version::TLS13])? - .with_no_client_auth() - .with_cert_resolver(Arc::new(rustls::server::ResolvesServerCertUsingSni::new())); - - let mut transport_config_quic = TransportConfig::default(); - transport_config_quic - .max_concurrent_bidi_streams(self.globals.proxy_config.h3_max_concurrent_bidistream.into()) - .max_concurrent_uni_streams(self.globals.proxy_config.h3_max_concurrent_unistream.into()) - .max_idle_timeout( - self - .globals - .proxy_config - .h3_max_idle_timeout - .map(|v| quinn::IdleTimeout::try_from(v).unwrap()), - ); - - let mut server_config_h3 = QuicServerConfig::with_crypto(Arc::new(rustls_server_config)); - server_config_h3.transport = Arc::new(transport_config_quic); - server_config_h3.concurrent_connections(self.globals.proxy_config.h3_max_concurrent_connections); - - // To reuse address - let udp_socket = bind_udp_socket(&self.listening_on)?; - let runtime = quinn::default_runtime() - .ok_or_else(|| std::io::Error::new(std::io::ErrorKind::Other, "No async runtime found"))?; - let endpoint = Endpoint::new( - quinn::EndpointConfig::default(), - Some(server_config_h3), - udp_socket, - runtime, - )?; - - let mut server_crypto: Option> = None; - loop { - tokio::select! { - new_conn = endpoint.accept() => { - if server_crypto.is_none() || new_conn.is_none() { - continue; - } - let mut conn: quinn::Connecting = new_conn.unwrap(); - let Ok(hsd) = conn.handshake_data().await else { - continue - }; - - let Ok(hsd_downcast) = hsd.downcast::() else { - continue - }; - let Some(new_server_name) = hsd_downcast.server_name else { - warn!("HTTP/3 no SNI is given"); - continue; - }; - debug!( - "HTTP/3 connection incoming (SNI {:?})", - new_server_name - ); - // TODO: server_nameをここで出してどんどん深く投げていくのは効率が悪い。connecting -> connectionsの後でいいのでは? - // TODO: 通常のTLSと同じenumか何かにまとめたい - let self_clone = self.clone(); - self.globals.runtime_handle.spawn(async move { - let client_addr = conn.remote_address(); - let quic_connection = match conn.await { - Ok(new_conn) => { - info!("New connection established"); - h3_quinn::Connection::new(new_conn) - }, - Err(e) => { - warn!("QUIC accepting connection failed: {:?}", e); - return Err(RpxyError::QuicConn(e)); - } - }; - // Timeout is based on underlying quic - if let Err(e) = self_clone.connection_serve_h3(quic_connection, new_server_name.to_server_name_vec(), client_addr).await { - warn!("QUIC or HTTP/3 connection failed: {}", e); - }; - Ok(()) - }); - } - _ = server_crypto_rx.changed() => { - if server_crypto_rx.borrow().is_none() { - error!("Reloader is broken"); - break; - } - let cert_keys_map = server_crypto_rx.borrow().clone().unwrap(); - - server_crypto = (&cert_keys_map).try_into().ok(); - let Some(inner) = server_crypto.clone() else { - error!("Failed to update server crypto for h3"); - break; - }; - endpoint.set_server_config(Some(QuicServerConfig::with_crypto(inner.clone().inner_global_no_client_auth.clone()))); - - } - else => break - } - } - endpoint.wait_idle().await; - Ok(()) as Result<()> - } -} diff --git a/legacy-lib/src/proxy/proxy_quic_s2n.rs b/legacy-lib/src/proxy/proxy_quic_s2n.rs deleted file mode 100644 index d1d15807..00000000 --- a/legacy-lib/src/proxy/proxy_quic_s2n.rs +++ /dev/null @@ -1,135 +0,0 @@ -use super::{ - crypto_service::{ServerCrypto, ServerCryptoBase}, - proxy_main::Proxy, -}; -use crate::{certs::CryptoSource, error::*, log::*, utils::BytesName}; -use hot_reload::ReloaderReceiver; -use hyper_util::client::legacy::connect::Connect; -use s2n_quic::provider; -use std::sync::Arc; - -impl Proxy -where - // T: Connect + Clone + Sync + Send + 'static, - U: CryptoSource + Clone + Sync + Send + 'static, -{ - pub(super) async fn listener_service_h3( - &self, - mut server_crypto_rx: ReloaderReceiver, - ) -> Result<()> { - info!("Start UDP proxy serving with HTTP/3 request for configured host names [s2n-quic]"); - - // initially wait for receipt - let mut server_crypto: Option> = { - let _ = server_crypto_rx.changed().await; - let sc = self.receive_server_crypto(server_crypto_rx.clone())?; - Some(sc) - }; - - // event loop - loop { - tokio::select! { - v = self.listener_service_h3_inner(&server_crypto) => { - if let Err(e) = v { - error!("Quic connection event loop illegally shutdown [s2n-quic] {e}"); - break; - } - } - _ = server_crypto_rx.changed() => { - server_crypto = match self.receive_server_crypto(server_crypto_rx.clone()) { - Ok(sc) => Some(sc), - Err(e) => { - error!("{e}"); - break; - } - }; - } - else => break - } - } - - Ok(()) - } - - fn receive_server_crypto(&self, server_crypto_rx: ReloaderReceiver) -> Result> { - let cert_keys_map = server_crypto_rx.borrow().clone().ok_or_else(|| { - error!("Reloader is broken"); - RpxyError::Other(anyhow!("Reloader is broken")) - })?; - - let server_crypto: Option> = (&cert_keys_map).try_into().ok(); - server_crypto.ok_or_else(|| { - error!("Failed to update server crypto for h3 [s2n-quic]"); - RpxyError::Other(anyhow!("Failed to update server crypto for h3 [s2n-quic]")) - }) - } - - async fn listener_service_h3_inner(&self, server_crypto: &Option>) -> Result<()> { - // setup UDP socket - let io = provider::io::tokio::Builder::default() - .with_receive_address(self.listening_on)? - .with_reuse_port()? - .build()?; - - // setup limits - let mut limits = provider::limits::Limits::default() - .with_max_open_local_bidirectional_streams(self.globals.proxy_config.h3_max_concurrent_bidistream as u64) - .map_err(|e| anyhow!(e))? - .with_max_open_remote_bidirectional_streams(self.globals.proxy_config.h3_max_concurrent_bidistream as u64) - .map_err(|e| anyhow!(e))? - .with_max_open_local_unidirectional_streams(self.globals.proxy_config.h3_max_concurrent_unistream as u64) - .map_err(|e| anyhow!(e))? - .with_max_open_remote_unidirectional_streams(self.globals.proxy_config.h3_max_concurrent_unistream as u64) - .map_err(|e| anyhow!(e))? - .with_max_active_connection_ids(self.globals.proxy_config.h3_max_concurrent_connections as u64) - .map_err(|e| anyhow!(e))?; - limits = if let Some(v) = self.globals.proxy_config.h3_max_idle_timeout { - limits.with_max_idle_timeout(v).map_err(|e| anyhow!(e))? - } else { - limits - }; - - // setup tls - let Some(server_crypto) = server_crypto else { - warn!("No server crypto is given [s2n-quic]"); - return Err(RpxyError::Other(anyhow!("No server crypto is given [s2n-quic]"))); - }; - let tls = server_crypto.inner_global_no_client_auth.clone(); - - let mut server = s2n_quic::Server::builder() - .with_tls(tls) - .map_err(|e| anyhow::anyhow!(e))? - .with_io(io) - .map_err(|e| anyhow!(e))? - .with_limits(limits) - .map_err(|e| anyhow!(e))? - .start() - .map_err(|e| anyhow!(e))?; - - // quic event loop. this immediately cancels when crypto is updated by tokio::select! - while let Some(new_conn) = server.accept().await { - debug!("New QUIC connection established"); - let Ok(Some(new_server_name)) = new_conn.server_name() else { - warn!("HTTP/3 no SNI is given"); - continue; - }; - debug!("HTTP/3 connection incoming (SNI {:?})", new_server_name); - let self_clone = self.clone(); - - self.globals.runtime_handle.spawn(async move { - let client_addr = new_conn.remote_addr()?; - let quic_connection = s2n_quic_h3::Connection::new(new_conn); - // Timeout is based on underlying quic - if let Err(e) = self_clone - .connection_serve_h3(quic_connection, new_server_name.to_server_name_vec(), client_addr) - .await - { - warn!("QUIC or HTTP/3 connection failed: {}", e); - }; - Ok(()) as Result<()> - }); - } - - Ok(()) - } -} diff --git a/legacy-lib/src/proxy/proxy_tls.rs b/legacy-lib/src/proxy/proxy_tls.rs deleted file mode 100644 index 6ed62126..00000000 --- a/legacy-lib/src/proxy/proxy_tls.rs +++ /dev/null @@ -1,155 +0,0 @@ -use super::{ - crypto_service::{CryptoReloader, ServerCrypto, ServerCryptoBase, SniServerCryptoMap}, - proxy_main::Proxy, - socket::bind_tcp_socket, -}; -use crate::{certs::CryptoSource, constants::*, error::*, log::*, utils::BytesName}; -use hot_reload::{ReloaderReceiver, ReloaderService}; -use hyper_util::{client::legacy::connect::Connect, rt::TokioIo, server::conn::auto::Builder as ConnectionBuilder}; -use std::sync::Arc; -use tokio::time::{timeout, Duration}; - -impl Proxy -where - // T: Connect + Clone + Sync + Send + 'static, - U: CryptoSource + Clone + Sync + Send + 'static, -{ - // TCP Listener Service, i.e., http/2 and http/1.1 - async fn listener_service(&self, mut server_crypto_rx: ReloaderReceiver) -> Result<()> { - let tcp_socket = bind_tcp_socket(&self.listening_on)?; - let tcp_listener = tcp_socket.listen(self.globals.proxy_config.tcp_listen_backlog)?; - info!("Start TCP proxy serving with HTTPS request for configured host names"); - - let mut server_crypto_map: Option> = None; - loop { - tokio::select! { - tcp_cnx = tcp_listener.accept() => { - if tcp_cnx.is_err() || server_crypto_map.is_none() { - continue; - } - let (raw_stream, client_addr) = tcp_cnx.unwrap(); - let sc_map_inner = server_crypto_map.clone(); - let self_inner = self.clone(); - - // spawns async handshake to avoid blocking thread by sequential handshake. - let handshake_fut = async move { - let acceptor = tokio_rustls::LazyConfigAcceptor::new(tokio_rustls::rustls::server::Acceptor::default(), raw_stream).await; - if let Err(e) = acceptor { - return Err(RpxyError::Proxy(format!("Failed to handshake TLS: {e}"))); - } - let start = acceptor.unwrap(); - let client_hello = start.client_hello(); - let server_name = client_hello.server_name(); - debug!("HTTP/2 or 1.1: SNI in ClientHello: {:?}", server_name); - let server_name_in_bytes = server_name.map_or_else(|| None, |v| Some(v.to_server_name_vec())); - if server_name_in_bytes.is_none(){ - return Err(RpxyError::Proxy("No SNI is given".to_string())); - } - let server_crypto = sc_map_inner.as_ref().unwrap().get(server_name_in_bytes.as_ref().unwrap()); - if server_crypto.is_none() { - return Err(RpxyError::Proxy(format!("No TLS serving app for {:?}", server_name.unwrap()))); - } - let stream = match start.into_stream(server_crypto.unwrap().clone()).await { - Ok(s) => TokioIo::new(s), - Err(e) => { - return Err(RpxyError::Proxy(format!("Failed to handshake TLS: {e}"))); - } - }; - self_inner.serve_connection(stream, client_addr, server_name_in_bytes); - Ok(()) - }; - - self.globals.runtime_handle.spawn( async move { - // timeout is introduced to avoid get stuck here. - let Ok(v) = timeout( - Duration::from_secs(TLS_HANDSHAKE_TIMEOUT_SEC), - handshake_fut - ).await else { - error!("Timeout to handshake TLS"); - return; - }; - if let Err(e) = v { - error!("{}", e); - } - }); - } - _ = server_crypto_rx.changed() => { - if server_crypto_rx.borrow().is_none() { - error!("Reloader is broken"); - break; - } - let cert_keys_map = server_crypto_rx.borrow().clone().unwrap(); - let Some(server_crypto): Option> = (&cert_keys_map).try_into().ok() else { - error!("Failed to update server crypto"); - break; - }; - server_crypto_map = Some(server_crypto.inner_local_map.clone()); - } - else => break - } - } - Ok(()) as Result<()> - } - - pub async fn start_with_tls(&self) -> Result<()> { - let (cert_reloader_service, cert_reloader_rx) = ReloaderService::, ServerCryptoBase>::new( - &self.globals.clone(), - CERTS_WATCH_DELAY_SECS, - !LOAD_CERTS_ONLY_WHEN_UPDATED, - ) - .await - .map_err(|e| anyhow::anyhow!(e))?; - - #[cfg(not(any(feature = "http3-quinn", feature = "http3-s2n")))] - { - tokio::select! { - _= cert_reloader_service.start() => { - error!("Cert service for TLS exited"); - }, - _ = self.listener_service(cert_reloader_rx) => { - error!("TCP proxy service for TLS exited"); - }, - else => { - error!("Something went wrong"); - return Ok(()) - } - }; - Ok(()) - } - #[cfg(any(feature = "http3-quinn", feature = "http3-s2n"))] - { - if self.globals.proxy_config.http3 { - tokio::select! { - _= cert_reloader_service.start() => { - error!("Cert service for TLS exited"); - }, - _ = self.listener_service(cert_reloader_rx.clone()) => { - error!("TCP proxy service for TLS exited"); - }, - _= self.listener_service_h3(cert_reloader_rx) => { - error!("UDP proxy service for QUIC exited"); - }, - else => { - error!("Something went wrong"); - return Ok(()) - } - }; - Ok(()) - } else { - tokio::select! { - _= cert_reloader_service.start() => { - error!("Cert service for TLS exited"); - }, - _ = self.listener_service(cert_reloader_rx) => { - error!("TCP proxy service for TLS exited"); - }, - else => { - error!("Something went wrong"); - return Ok(()) - } - }; - Ok(()) - } - } - } -} diff --git a/legacy-lib/src/proxy/socket.rs b/legacy-lib/src/proxy/socket.rs deleted file mode 100644 index cf38ddc0..00000000 --- a/legacy-lib/src/proxy/socket.rs +++ /dev/null @@ -1,46 +0,0 @@ -use crate::{error::*, log::*}; -#[cfg(feature = "http3-quinn")] -use socket2::{Domain, Protocol, Socket, Type}; -use std::net::SocketAddr; -#[cfg(feature = "http3-quinn")] -use std::net::UdpSocket; -use tokio::net::TcpSocket; - -/// Bind TCP socket to the given `SocketAddr`, and returns the TCP socket with `SO_REUSEADDR` and `SO_REUSEPORT` options. -/// This option is required to re-bind the socket address when the proxy instance is reconstructed. -pub(super) fn bind_tcp_socket(listening_on: &SocketAddr) -> Result { - let tcp_socket = if listening_on.is_ipv6() { - TcpSocket::new_v6() - } else { - TcpSocket::new_v4() - }?; - tcp_socket.set_reuseaddr(true)?; - tcp_socket.set_reuseport(true)?; - if let Err(e) = tcp_socket.bind(*listening_on) { - error!("Failed to bind TCP socket: {}", e); - return Err(RpxyError::Io(e)); - }; - Ok(tcp_socket) -} - -#[cfg(feature = "http3-quinn")] -/// Bind UDP socket to the given `SocketAddr`, and returns the UDP socket with `SO_REUSEADDR` and `SO_REUSEPORT` options. -/// This option is required to re-bind the socket address when the proxy instance is reconstructed. -pub(super) fn bind_udp_socket(listening_on: &SocketAddr) -> Result { - let socket = if listening_on.is_ipv6() { - Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) - } else { - Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) - }?; - socket.set_reuse_address(true)?; // This isn't necessary? - socket.set_reuse_port(true)?; - socket.set_nonblocking(true)?; // This was made true inside quinn. so this line isn't necessary here. but just in case. - - if let Err(e) = socket.bind(&(*listening_on).into()) { - error!("Failed to bind UDP socket: {}", e); - return Err(RpxyError::Io(e)); - }; - let udp_socket: UdpSocket = socket.into(); - - Ok(udp_socket) -} diff --git a/legacy-lib/src/utils/bytes_name.rs b/legacy-lib/src/utils/bytes_name.rs deleted file mode 100644 index 5d2fef50..00000000 --- a/legacy-lib/src/utils/bytes_name.rs +++ /dev/null @@ -1,123 +0,0 @@ -/// Server name (hostname or ip address) representation in bytes-based struct -/// for searching hashmap or key list by exact or longest-prefix matching -#[derive(Clone, Debug, PartialEq, Eq, Hash, Default)] -pub struct ServerNameBytesExp(pub Vec); // lowercase ascii bytes -impl From<&[u8]> for ServerNameBytesExp { - fn from(b: &[u8]) -> Self { - Self(b.to_ascii_lowercase()) - } -} -impl TryInto for &ServerNameBytesExp { - type Error = anyhow::Error; - fn try_into(self) -> Result { - let s = std::str::from_utf8(&self.0)?; - Ok(s.to_string()) - } -} - -/// Path name, like "/path/ok", represented in bytes-based struct -/// for searching hashmap or key list by exact or longest-prefix matching -#[derive(Clone, Debug, PartialEq, Eq, Hash, Default)] -pub struct PathNameBytesExp(pub Vec); // lowercase ascii bytes -impl PathNameBytesExp { - pub fn len(&self) -> usize { - self.0.len() - } - pub fn is_empty(&self) -> bool { - self.0.len() == 0 - } - pub fn get(&self, index: I) -> Option<&I::Output> - where - I: std::slice::SliceIndex<[u8]>, - { - self.0.get(index) - } - pub fn starts_with(&self, needle: &Self) -> bool { - self.0.starts_with(&needle.0) - } -} -impl AsRef<[u8]> for PathNameBytesExp { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -/// Trait to express names in ascii-lowercased bytes -pub trait BytesName { - type OutputSv: Send + Sync + 'static; - type OutputPath; - fn to_server_name_vec(self) -> Self::OutputSv; - fn to_path_name_vec(self) -> Self::OutputPath; -} - -impl<'a, T: Into>> BytesName for T { - type OutputSv = ServerNameBytesExp; - type OutputPath = PathNameBytesExp; - - fn to_server_name_vec(self) -> Self::OutputSv { - let name = self.into().bytes().collect::>().to_ascii_lowercase(); - ServerNameBytesExp(name) - } - - fn to_path_name_vec(self) -> Self::OutputPath { - let name = self.into().bytes().collect::>().to_ascii_lowercase(); - PathNameBytesExp(name) - } -} - -#[cfg(test)] -mod tests { - use super::*; - #[test] - fn bytes_name_str_works() { - let s = "OK_string"; - let bn = s.to_path_name_vec(); - let bn_lc = s.to_server_name_vec(); - - assert_eq!(Vec::from("ok_string".as_bytes()), bn.0); - assert_eq!(Vec::from("ok_string".as_bytes()), bn_lc.0); - } - - #[test] - fn from_works() { - let s = "OK_string".to_server_name_vec(); - let m = ServerNameBytesExp::from("OK_strinG".as_bytes()); - assert_eq!(s, m); - assert_eq!(s.0, "ok_string".as_bytes().to_vec()); - assert_eq!(m.0, "ok_string".as_bytes().to_vec()); - } - - #[test] - fn get_works() { - let s = "OK_str".to_path_name_vec(); - let i = s.get(0); - assert_eq!(Some(&"o".as_bytes()[0]), i); - let i = s.get(1); - assert_eq!(Some(&"k".as_bytes()[0]), i); - let i = s.get(2); - assert_eq!(Some(&"_".as_bytes()[0]), i); - let i = s.get(3); - assert_eq!(Some(&"s".as_bytes()[0]), i); - let i = s.get(4); - assert_eq!(Some(&"t".as_bytes()[0]), i); - let i = s.get(5); - assert_eq!(Some(&"r".as_bytes()[0]), i); - let i = s.get(6); - assert_eq!(None, i); - } - - #[test] - fn start_with_works() { - let s = "OK_str".to_path_name_vec(); - let correct = "OK".to_path_name_vec(); - let incorrect = "KO".to_path_name_vec(); - assert!(s.starts_with(&correct)); - assert!(!s.starts_with(&incorrect)); - } - - #[test] - fn as_ref_works() { - let s = "OK_str".to_path_name_vec(); - assert_eq!(s.as_ref(), "ok_str".as_bytes()); - } -} diff --git a/legacy-lib/src/utils/mod.rs b/legacy-lib/src/utils/mod.rs deleted file mode 100644 index ed8d4ff9..00000000 --- a/legacy-lib/src/utils/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod bytes_name; -mod socket_addr; - -pub use bytes_name::{BytesName, PathNameBytesExp, ServerNameBytesExp}; -pub use socket_addr::ToCanonical; diff --git a/legacy-lib/src/utils/socket_addr.rs b/legacy-lib/src/utils/socket_addr.rs deleted file mode 100644 index 105fc557..00000000 --- a/legacy-lib/src/utils/socket_addr.rs +++ /dev/null @@ -1,60 +0,0 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - -pub trait ToCanonical { - fn to_canonical(&self) -> Self; -} - -impl ToCanonical for SocketAddr { - fn to_canonical(&self) -> Self { - match self { - SocketAddr::V4(_) => *self, - SocketAddr::V6(v6) => match v6.ip().to_ipv4() { - Some(mapped) => { - if mapped == Ipv4Addr::new(0, 0, 0, 1) { - *self - } else { - SocketAddr::new(IpAddr::V4(mapped), self.port()) - } - } - None => *self, - }, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::net::Ipv6Addr; - #[test] - fn ipv4_loopback_to_canonical() { - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - assert_eq!(socket.to_canonical(), socket); - } - #[test] - fn ipv6_loopback_to_canonical() { - let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080); - assert_eq!(socket.to_canonical(), socket); - } - #[test] - fn ipv4_to_canonical() { - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080); - assert_eq!(socket.to_canonical(), socket); - } - #[test] - fn ipv6_to_canonical() { - let socket = SocketAddr::new( - IpAddr::V6(Ipv6Addr::new(0x2001, 0x0db8, 0, 0, 0, 0, 0xdead, 0xbeef)), - 8080, - ); - assert_eq!(socket.to_canonical(), socket); - } - #[test] - fn ipv4_mapped_to_ipv6_to_canonical() { - let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc00a, 0x2ff)), 8080); - assert_eq!( - socket.to_canonical(), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 10, 2, 255)), 8080) - ); - } -} diff --git a/rpxy-bin/Cargo.toml b/rpxy-bin/Cargo.toml index f77f0fae..4ad5d1ce 100644 --- a/rpxy-bin/Cargo.toml +++ b/rpxy-bin/Cargo.toml @@ -1,13 +1,14 @@ [package] name = "rpxy" -version = "0.7.0-alpha.4" -authors = ["Jun Kurihara"] -homepage = "https://github.com/junkurihara/rust-rpxy" -repository = "https://github.com/junkurihara/rust-rpxy" -license = "MIT" -readme = "../README.md" -edition = "2021" -publish = false +description = "`rpxy`: a simple and ultrafast http reverse proxy" +version.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme.workspace = true +edition.workspace = true +publish.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -28,7 +29,7 @@ rpxy-lib = { path = "../rpxy-lib/", default-features = false, features = [ anyhow = "1.0.79" rustc-hash = "1.1.0" serde = { version = "1.0.196", default-features = false, features = ["derive"] } -derive_builder = "0.13.1" +derive_builder = "0.20.0" tokio = { version = "1.36.0", default-features = false, features = [ "net", "rt-multi-thread", diff --git a/rpxy-lib/Cargo.toml b/rpxy-lib/Cargo.toml index 17c21ff0..65fb9e24 100644 --- a/rpxy-lib/Cargo.toml +++ b/rpxy-lib/Cargo.toml @@ -1,13 +1,14 @@ [package] name = "rpxy-lib" -version = "0.7.0-alpha.4" -authors = ["Jun Kurihara"] -homepage = "https://github.com/junkurihara/rust-rpxy" -repository = "https://github.com/junkurihara/rust-rpxy" -license = "MIT" -readme = "../README.md" -edition = "2021" -publish = false +description = "Library of `rpxy`: a simple and ultrafast http reverse proxy" +version.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +readme.workspace = true +edition.workspace = true +publish.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -31,7 +32,7 @@ webpki-roots = ["rustls-backend", "hyper-rustls/webpki-tokio"] rand = "0.8.5" rustc-hash = "1.1.0" bytes = "1.5.0" -derive_builder = "0.13.1" +derive_builder = "0.20.0" futures = { version = "0.3.30", features = ["alloc", "async-await"] } tokio = { version = "1.36.0", default-features = false, features = [ "net", @@ -82,12 +83,12 @@ tracing = { version = "0.1.40" } quinn = { version = "0.10.2", optional = true } h3 = { path = "../submodules/h3/h3/", optional = true } h3-quinn = { path = "../submodules/h3/h3-quinn/", optional = true } -s2n-quic = { version = "1.32.0", default-features = false, features = [ +s2n-quic = { version = "1.33.0", default-features = false, features = [ "provider-tls-rustls", ], optional = true } -s2n-quic-core = { version = "0.32.0", default-features = false, optional = true } +s2n-quic-core = { version = "0.33.0", default-features = false, optional = true } s2n-quic-h3 = { path = "../submodules/s2n-quic-h3/", optional = true } -s2n-quic-rustls = { version = "0.32.0", optional = true } +s2n-quic-rustls = { version = "0.33.0", optional = true } # for UDP socket wit SO_REUSEADDR when h3 with quinn socket2 = { version = "0.5.5", features = ["all"], optional = true } diff --git a/rpxy-lib/src/backend/upstream.rs b/rpxy-lib/src/backend/upstream.rs index ac50d695..702be290 100644 --- a/rpxy-lib/src/backend/upstream.rs +++ b/rpxy-lib/src/backend/upstream.rs @@ -1,8 +1,7 @@ #[cfg(feature = "sticky-cookie")] use super::load_balance::LoadBalanceStickyBuilder; use super::load_balance::{ - load_balance_options as lb_opts, LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, - LoadBalanceRoundRobinBuilder, + load_balance_options as lb_opts, LoadBalance, LoadBalanceContext, LoadBalanceRandomBuilder, LoadBalanceRoundRobinBuilder, }; // use super::{BytesName, LbContext, PathNameBytesExp, UpstreamOption}; use super::upstream_opts::UpstreamOption; @@ -56,8 +55,7 @@ where } if !(inner.iter().all(|(_, elem)| { - !(elem.options.contains(&UpstreamOption::ForceHttp11Upstream) - && elem.options.contains(&UpstreamOption::ForceHttp2Upstream)) + !(elem.options.contains(&UpstreamOption::ForceHttp11Upstream) && elem.options.contains(&UpstreamOption::ForceHttp2Upstream)) })) { error!("Either one of force_http11 or force_http2 can be enabled"); return Err(RpxyError::InvalidUpstreamOptionSetting); @@ -175,7 +173,7 @@ impl UpstreamCandidatesBuilder { &mut self, v: &Option, // upstream_num: &usize, - upstream_vec: &Vec, + upstream_vec: &[Upstream], _server_name: &str, _path_opt: &Option, ) -> &mut Self { @@ -236,10 +234,7 @@ impl UpstreamCandidates { let pointer_to_upstream = self.load_balance.get_context(context_to_lb); debug!("Upstream of index {} is chosen.", pointer_to_upstream.ptr); debug!("Context to LB (Cookie in Request): {:?}", context_to_lb); - debug!( - "Context from LB (Set-Cookie in Response): {:?}", - pointer_to_upstream.context - ); + debug!("Context from LB (Set-Cookie in Response): {:?}", pointer_to_upstream.context); (self.inner.get(pointer_to_upstream.ptr), pointer_to_upstream.context) } } diff --git a/submodules/h3 b/submodules/h3 index e7c7ab9d..c11410c7 160000 --- a/submodules/h3 +++ b/submodules/h3 @@ -1 +1 @@ -Subproject commit e7c7ab9d634ef73784d6cbc424a270dbaa4f1c99 +Subproject commit c11410c76e738a62e62e7766b82f814547621f6f