diff --git a/src/rust/cryptography-x509-validation/src/certificate.rs b/src/rust/cryptography-x509-validation/src/certificate.rs index 8aa65a4a8ac8..335312ccd265 100644 --- a/src/rust/cryptography-x509-validation/src/certificate.rs +++ b/src/rust/cryptography-x509-validation/src/certificate.rs @@ -6,38 +6,24 @@ use cryptography_x509::certificate::Certificate; -use crate::ops::CryptoOps; - -// TODO: Remove these attributes once we start using these helpers. -#[allow(dead_code)] pub(crate) fn cert_is_self_issued(cert: &Certificate<'_>) -> bool { cert.issuer() == cert.subject() } -#[allow(dead_code)] -pub(crate) fn cert_is_self_signed(cert: &Certificate<'_>, ops: &B) -> bool { - match ops.public_key(cert) { - Ok(pk) => cert_is_self_issued(cert) && ops.verify_signed_by(cert, pk).is_ok(), - Err(_) => false, - } -} - #[cfg(test)] -mod tests { +pub(crate) mod tests { use crate::certificate::Certificate; - use crate::ops::tests::{cert, v1_cert_pem, NullOps}; + use crate::ops::tests::{cert, v1_cert_pem}; use crate::ops::CryptoOps; - use super::{cert_is_self_issued, cert_is_self_signed}; + use super::cert_is_self_issued; #[test] fn test_certificate_v1() { let cert_pem = v1_cert_pem(); let cert = cert(&cert_pem); - let ops = NullOps {}; assert!(!cert_is_self_issued(&cert)); - assert!(!cert_is_self_signed(&cert, &ops)); } fn ca_pem() -> pem::Pem { @@ -61,13 +47,11 @@ Xw4nMqk= fn test_certificate_ca() { let cert_pem = ca_pem(); let cert = cert(&cert_pem); - let ops = NullOps {}; assert!(cert_is_self_issued(&cert)); - assert!(cert_is_self_signed(&cert, &ops)); } - struct PublicKeyErrorOps {} + pub(crate) struct PublicKeyErrorOps {} impl CryptoOps for PublicKeyErrorOps { type Key = (); type Err = (); @@ -90,10 +74,8 @@ Xw4nMqk= fn test_certificate_public_key_error() { let cert_pem = ca_pem(); let cert = cert(&cert_pem); - let ops = PublicKeyErrorOps {}; assert!(cert_is_self_issued(&cert)); - assert!(!cert_is_self_signed(&cert, &ops)); } #[test] diff --git a/src/rust/cryptography-x509-validation/src/lib.rs b/src/rust/cryptography-x509-validation/src/lib.rs index 4cb7f363ce2b..084eb2a505da 100644 --- a/src/rust/cryptography-x509-validation/src/lib.rs +++ b/src/rust/cryptography-x509-validation/src/lib.rs @@ -11,6 +11,332 @@ pub mod policy; pub mod trust_store; pub mod types; +use std::collections::HashSet; +use std::vec; + +use crate::certificate::cert_is_self_issued; +use crate::types::{DNSConstraint, IPAddress, IPConstraint}; +use crate::ApplyNameConstraintStatus::{Applied, Skipped}; +use cryptography_x509::extensions::{DuplicateExtensionsError, Extensions}; +use cryptography_x509::{ + certificate::Certificate, + extensions::{NameConstraints, SubjectAlternativeName}, + name::GeneralName, + oid::{NAME_CONSTRAINTS_OID, SUBJECT_ALTERNATIVE_NAME_OID}, +}; +use ops::CryptoOps; +use policy::Policy; +use trust_store::Store; +use types::DNSName; + +#[derive(Debug, PartialEq, Eq)] pub enum ValidationError { - Other(&'static str), + CandidatesExhausted(Box), + Malformed(asn1::ParseError), + DuplicateExtension(DuplicateExtensionsError), + Other(String), +} + +impl From for ValidationError { + fn from(value: asn1::ParseError) -> Self { + Self::Malformed(value) + } +} + +impl From for ValidationError { + fn from(value: DuplicateExtensionsError) -> Self { + Self::DuplicateExtension(value) + } +} + +struct NameChain<'a, 'chain> { + child: Option<&'a NameChain<'a, 'chain>>, + sans: Vec>, +} + +impl<'a, 'chain> NameChain<'a, 'chain> { + fn new( + child: Option<&'a NameChain<'a, 'chain>>, + extensions: &Extensions<'chain>, + self_issued_intermediate: bool, + ) -> Result { + let sans = match ( + self_issued_intermediate, + extensions.get_extension(&SUBJECT_ALTERNATIVE_NAME_OID), + ) { + (false, Some(sans)) => sans.value::>()?.collect(), + _ => vec![], + }; + + Ok(Self { child, sans }) + } + + fn evaluate_single_constraint( + &self, + constraint: &GeneralName<'chain>, + san: &GeneralName<'chain>, + ) -> Result { + match (constraint, san) { + (GeneralName::DNSName(pattern), GeneralName::DNSName(name)) => { + match (DNSConstraint::new(pattern.0), DNSName::new(name.0)) { + (Some(pattern), Some(name)) => Ok(Applied(pattern.matches(&name))), + (_, None) => Err(ValidationError::Other(format!( + "unsatisfiable DNS name constraint: malformed SAN {}", + name.0 + ))), + (None, _) => Err(ValidationError::Other(format!( + "malformed DNS name constraint: {}", + pattern.0 + ))), + } + } + (GeneralName::IPAddress(pattern), GeneralName::IPAddress(name)) => { + match ( + IPConstraint::from_bytes(pattern), + IPAddress::from_bytes(name), + ) { + (Some(pattern), Some(name)) => Ok(Applied(pattern.matches(&name))), + (_, None) => Err(ValidationError::Other(format!( + "unsatisfiable IP name constraint: malformed SAN {:?}", + name, + ))), + (None, _) => Err(ValidationError::Other(format!( + "malformed IP name constraints: {:?}", + pattern + ))), + } + } + _ => Ok(Skipped), + } + } + + fn evaluate_constraints( + &self, + constraints: &NameConstraints<'chain>, + ) -> Result<(), ValidationError> { + if let Some(child) = self.child { + child.evaluate_constraints(constraints)?; + } + + for san in &self.sans { + // If there are no applicable constraints, the SAN is considered valid so the default is true. + let mut permit = true; + if let Some(permitted_subtrees) = &constraints.permitted_subtrees { + for p in permitted_subtrees.unwrap_read().clone() { + let status = self.evaluate_single_constraint(&p.base, san)?; + if status.is_applied() { + permit = status.is_match(); + if permit { + break; + } + } + } + } + + if !permit { + return Err(ValidationError::Other( + "no permitted name constraints matched SAN".into(), + )); + } + + if let Some(excluded_subtrees) = &constraints.excluded_subtrees { + for e in excluded_subtrees.unwrap_read().clone() { + let status = self.evaluate_single_constraint(&e.base, san)?; + if status.is_match() { + return Err(ValidationError::Other( + "excluded name constraint matched SAN".into(), + )); + } + } + } + } + + Ok(()) + } +} + +pub type Chain<'c> = Vec>; + +pub fn verify<'a, 'chain, B: CryptoOps>( + leaf: &'a Certificate<'chain>, + intermediates: impl IntoIterator>, + policy: &Policy<'_, B>, + store: &'a Store<'chain>, +) -> Result, ValidationError> { + let builder = ChainBuilder::new(intermediates.into_iter().collect(), policy, store); + + builder.build_chain(leaf) +} + +struct ChainBuilder<'a, 'chain, B: CryptoOps> { + intermediates: HashSet>, + policy: &'a Policy<'a, B>, + store: &'a Store<'chain>, +} + +// When applying a name constraint, we need to distinguish between a few different scenarios: +// * `Applied(true)`: The name constraint is the same type as the SAN and matches. +// * `Applied(false)`: The name constraint is the same type as the SAN and does not match. +// * `Skipped`: The name constraint is a different type to the SAN. +enum ApplyNameConstraintStatus { + Applied(bool), + Skipped, +} + +impl ApplyNameConstraintStatus { + fn is_applied(&self) -> bool { + matches!(self, Applied(_)) + } + + fn is_match(&self) -> bool { + matches!(self, Applied(true)) + } +} + +impl<'a, 'chain, B: CryptoOps> ChainBuilder<'a, 'chain, B> { + fn new( + intermediates: HashSet>, + policy: &'a Policy<'a, B>, + store: &'a Store<'chain>, + ) -> Self { + Self { + intermediates, + policy, + store, + } + } + + fn potential_issuers( + &'a self, + cert: &'a Certificate<'chain>, + ) -> impl Iterator> + '_ { + // TODO: Optimizations: + // * Use a backing structure that allows us to search by name + // rather than doing a linear scan + // * Search by AKI and other identifiers? + self.store + .iter() + .chain(self.intermediates.iter()) + .filter(|&candidate| candidate.subject() == cert.issuer()) + } + + fn build_chain_inner( + &self, + working_cert: &'a Certificate<'chain>, + current_depth: u8, + working_cert_extensions: &'a Extensions<'chain>, + name_chain: NameChain<'a, 'chain>, + ) -> Result, ValidationError> { + if let Some(nc) = working_cert_extensions.get_extension(&NAME_CONSTRAINTS_OID) { + name_chain.evaluate_constraints(&nc.value()?)?; + } + + // Look in the store's root set to see if the working cert is listed. + // If it is, we've reached the end. + if self.store.contains(working_cert) { + return Ok(vec![working_cert.clone()]); + } + + // Check that our current depth does not exceed our policy-configured + // max depth. We do this after the root set check, since the depth + // only measures the intermediate chain's length, not the root or leaf. + if current_depth > self.policy.max_chain_depth { + return Err(ValidationError::Other( + "chain construction exceeds max depth".into(), + )); + } + + // Otherwise, we collect a list of potential issuers for this cert, + // and continue with the first that verifies. + let mut last_err: Option = None; + for issuing_cert_candidate in self.potential_issuers(working_cert) { + // A candidate issuer is said to verify if it both + // signs for the working certificate and conforms to the + // policy. + let issuer_extensions = issuing_cert_candidate.extensions()?; + match self.policy.valid_issuer( + issuing_cert_candidate, + working_cert, + current_depth, + &issuer_extensions, + ) { + Ok(_) => { + match self.build_chain_inner( + issuing_cert_candidate, + // NOTE(ww): According to RFC 5280, we should only + // increase the chain depth when the certificate is **not** + // self-issued. In practice however, implementations widely + // ignore this requirement, and unconditionally increment + // the depth with every chain member. We choose to do the same; + // see `pathlen::self-issued-certs-pathlen` from x509-limbo + // for the testcase we intentionally fail. + // + // Implementation note for someone looking to change this in the future: + // care should be taken to avoid infinite recursion with self-signed + // certificates in the intermediate set; changing this behavior will + // also require a "is not self-signed" check on intermediate candidates. + // + // See https://gist.github.com/woodruffw/776153088e0df3fc2f0675c5e835f7b8 + // for an example of this change. + current_depth.checked_add(1).ok_or_else(|| { + ValidationError::Other( + "current depth calculation overflowed".to_string(), + ) + })?, + &issuer_extensions, + NameChain::new( + Some(&name_chain), + &issuer_extensions, + // Per RFC 5280 4.2.1.10: Name constraints are not applied + // to subjects in self-issued certificates, *unless* the + // certificate is the "final" (i.e., leaf) certificate in the path. + // We accomplish this by only collecting the SANs when the issuing + // candidate (which is a non-leaf by definition) isn't self-issued. + cert_is_self_issued(issuing_cert_candidate), + )?, + ) { + Ok(mut chain) => { + chain.insert(0, working_cert.clone()); + return Ok(chain); + } + Err(e) => last_err = Some(e), + }; + } + Err(e) => last_err = Some(e), + }; + } + + // We only reach this if we fail to hit our base case above, or if + // a chain building step fails to find a next valid certificate. + Err(ValidationError::CandidatesExhausted(last_err.map_or_else( + || { + Box::new(ValidationError::Other( + "all candidates exhausted with no interior errors".to_string(), + )) + }, + |e| match e { + // Avoid spamming the user with nested `CandidatesExhausted` errors. + ValidationError::CandidatesExhausted(e) => e, + _ => Box::new(e), + }, + ))) + } + + fn build_chain(&self, leaf: &'a Certificate<'chain>) -> Result, ValidationError> { + // Before anything else, check whether the given leaf cert + // is well-formed according to our policy (and its underlying + // certificate profile). + // + // The leaf must be an EE; a CA cert in the leaf position will be rejected. + let leaf_extensions = leaf.extensions()?; + + self.policy.permits_ee(leaf, &leaf_extensions)?; + + self.build_chain_inner( + leaf, + 0, + &leaf_extensions, + NameChain::new(None, &leaf_extensions, false)?, + ) + } } diff --git a/src/rust/cryptography-x509-validation/src/ops.rs b/src/rust/cryptography-x509-validation/src/ops.rs index 47529cf0bc0f..719d9aa04617 100644 --- a/src/rust/cryptography-x509-validation/src/ops.rs +++ b/src/rust/cryptography-x509-validation/src/ops.rs @@ -25,26 +25,6 @@ pub trait CryptoOps { pub(crate) mod tests { use cryptography_x509::certificate::Certificate; - use super::CryptoOps; - - pub(crate) struct NullOps {} - impl CryptoOps for NullOps { - type Key = (); - type Err = (); - - fn public_key(&self, _cert: &Certificate<'_>) -> Result { - Ok(()) - } - - fn verify_signed_by( - &self, - _cert: &Certificate<'_>, - _key: Self::Key, - ) -> Result<(), Self::Err> { - Ok(()) - } - } - pub(crate) fn v1_cert_pem() -> pem::Pem { pem::parse( " @@ -65,16 +45,4 @@ zl9HYIMxATFyqSiD9jsx pub(crate) fn cert(cert_pem: &pem::Pem) -> Certificate<'_> { asn1::parse_single(cert_pem.contents()).unwrap() } - - #[test] - fn test_nullops() { - let cert_pem = v1_cert_pem(); - let cert = cert(&cert_pem); - - let ops = NullOps {}; - assert_eq!(ops.public_key(&cert), Ok(())); - assert!(ops - .verify_signed_by(&cert, ops.public_key(&cert).unwrap()) - .is_ok()); - } } diff --git a/src/rust/cryptography-x509-validation/src/policy/extension.rs b/src/rust/cryptography-x509-validation/src/policy/extension.rs index e4f1397bb8d2..834506af6594 100644 --- a/src/rust/cryptography-x509-validation/src/policy/extension.rs +++ b/src/rust/cryptography-x509-validation/src/policy/extension.rs @@ -8,14 +8,9 @@ use cryptography_x509::{ extensions::{Extension, Extensions}, }; -use crate::{ops::CryptoOps, ValidationError}; - -use super::Policy; - -// TODO: Remove `dead_code` attributes once we start using these helpers. +use crate::{ops::CryptoOps, policy::Policy, ValidationError}; /// Represents different criticality states for an extension. -#[allow(dead_code)] pub(crate) enum Criticality { /// The extension MUST be marked as critical. Critical, @@ -25,7 +20,6 @@ pub(crate) enum Criticality { NonCritical, } -#[allow(dead_code)] impl Criticality { pub(crate) fn permits(&self, critical: bool) -> bool { match (self, critical) { @@ -38,16 +32,13 @@ impl Criticality { } } -#[allow(dead_code)] type PresentExtensionValidatorCallback = fn(&Policy<'_, B>, &Certificate<'_>, &Extension<'_>) -> Result<(), ValidationError>; -#[allow(dead_code)] type MaybeExtensionValidatorCallback = fn(&Policy<'_, B>, &Certificate<'_>, Option<&Extension<'_>>) -> Result<(), ValidationError>; /// Represents different validation states for an extension. -#[allow(dead_code)] pub(crate) enum ExtensionValidator { /// The extension MUST NOT be present. NotPresent, @@ -69,13 +60,11 @@ pub(crate) enum ExtensionValidator { /// A "policy" for validating a specific X.509v3 extension, identified by /// its OID. -#[allow(dead_code)] pub(crate) struct ExtensionPolicy { pub(crate) oid: asn1::ObjectIdentifier, pub(crate) validator: ExtensionValidator, } -#[allow(dead_code)] impl ExtensionPolicy { pub(crate) fn not_present(oid: ObjectIdentifier) -> Self { Self { @@ -123,12 +112,13 @@ impl ExtensionPolicy { (ExtensionValidator::NotPresent, None) => Ok(()), // Extension MUST NOT be present but is; NOT OK. (ExtensionValidator::NotPresent, Some(_)) => Err(ValidationError::Other( - "EE certificate contains prohibited extension", + "EE certificate contains prohibited extension".to_string(), )), // Extension MUST be present but is not; NOT OK. - (ExtensionValidator::Present { .. }, None) => Err(ValidationError::Other( - "EE certificate is missing required extension", - )), + (ExtensionValidator::Present { .. }, None) => Err(ValidationError::Other(format!( + "EE certificate is missing required extension: {}", + self.oid + ))), // Extension MUST be present and is; check it. ( ExtensionValidator::Present { @@ -139,7 +129,7 @@ impl ExtensionPolicy { ) => { if !criticality.permits(extn.critical) { return Err(ValidationError::Other( - "EE certificate extension has incorrect criticality", + "EE certificate extension has incorrect criticality".to_string(), )); } @@ -160,7 +150,7 @@ impl ExtensionPolicy { .map_or(false, |extn| !criticality.permits(extn.critical)) { return Err(ValidationError::Other( - "EE certificate extension has incorrect criticality", + "EE certificate extension has incorrect criticality".to_string(), )); } @@ -171,14 +161,297 @@ impl ExtensionPolicy { } } +pub(crate) mod ee { + use cryptography_x509::{ + certificate::Certificate, + extensions::{ + BasicConstraints, ExtendedKeyUsage, Extension, KeyUsage, SubjectAlternativeName, + }, + }; + + use crate::{ + ops::CryptoOps, + policy::{Policy, ValidationError}, + }; + + pub(crate) fn basic_constraints( + _policy: &Policy<'_, B>, + _cert: &Certificate<'_>, + extn: Option<&Extension<'_>>, + ) -> Result<(), ValidationError> { + if let Some(extn) = extn { + let basic_constraints: BasicConstraints = extn.value()?; + + if basic_constraints.ca { + return Err(ValidationError::Other( + "basicConstraints.cA must not be asserted in an EE certificate".to_string(), + )); + } + } + + Ok(()) + } + + pub(crate) fn subject_alternative_name( + policy: &Policy<'_, B>, + cert: &Certificate<'_>, + extn: &Extension<'_>, + ) -> Result<(), ValidationError> { + match (cert.subject().is_empty(), extn.critical) { + // If the subject is empty, the SAN MUST be critical. + (true, false) => { + return Err(ValidationError::Other( + "EE subjectAltName MUST be critical when subject is empty".to_string(), + )); + } + // If the subject is non-empty, the SAN MUST NOT be critical. + (false, true) => { + return Err(ValidationError::Other( + "EE subjectAltName MUST NOT be critical when subject is nonempty".to_string(), + )) + } + _ => (), + }; + + let san: SubjectAlternativeName<'_> = extn.value()?; + if !policy.subject.matches(&san) { + return Err(ValidationError::Other( + "leaf certificate has no matching subjectAltName".into(), + )); + } + + Ok(()) + } + + pub(crate) fn extended_key_usage( + policy: &Policy<'_, B>, + _cert: &Certificate<'_>, + extn: Option<&Extension<'_>>, + ) -> Result<(), ValidationError> { + if let Some(extn) = extn { + let mut ekus: ExtendedKeyUsage<'_> = extn.value()?; + + // CABF requires EKUs in EE certs, but this is widely ignored + // by implementations (which treat a missing EKU as "any EKU"). + // On the other hand, if the EKU is present, it **must** be + // the one specified in the policy (e.g., `serverAuth`) and + // **must not** be the explicit `anyExtendedKeyUsage` EKU. + // See: CABF 7.1.2.7.10. + if ekus.any(|eku| eku == policy.extended_key_usage) { + Ok(()) + } else { + Err(ValidationError::Other("required EKU not found".to_string())) + } + } else { + Ok(()) + } + } + + pub(crate) fn key_usage( + _policy: &Policy<'_, B>, + _cert: &Certificate<'_>, + extn: Option<&Extension<'_>>, + ) -> Result<(), ValidationError> { + if let Some(extn) = extn { + let key_usage: KeyUsage<'_> = extn.value()?; + + if key_usage.key_cert_sign() { + return Err(ValidationError::Other( + "EE keyUsage must not assert keyCertSign".to_string(), + )); + } + } + + Ok(()) + } +} + +pub(crate) mod ca { + use cryptography_x509::{ + certificate::Certificate, + extensions::{ + AuthorityKeyIdentifier, BasicConstraints, ExtendedKeyUsage, Extension, KeyUsage, + NameConstraints, + }, + oid::EKU_ANY_KEY_USAGE_OID, + }; + + use crate::{ + ops::CryptoOps, + policy::{Policy, ValidationError}, + }; + + pub(crate) fn authority_key_identifier( + _policy: &Policy<'_, B>, + _cert: &Certificate<'_>, + extn: Option<&Extension<'_>>, + ) -> Result<(), ValidationError> { + // CABF: AKI is required on all CA certificates *except* root CA certificates, + // where is it merely recommended. This is slightly different from RFC 5280, + // which requires AKI on all CA certificates *except* self-signed root CA certificates. + // + // This discrepancy poses a challenge: from a strict CABF perspective we should + // require the AKI unless we're on a root CA, but we lack the context to determine that + // here. We *could* infer that we're on a root by checking whether the CA is self-signed, + // but many root CAs still use RSA with SHA-1 (which is intentionally unsupported + // for signature verification). + // + // Consequently, the best we can currently do here is check whether the AKI conforms + // to the CABF mandated format, *if* it exists. This means that we will accept + // some chains that are not strictly CABF compliant (e.g. ones where intermediate + // CAs are missing AKIs), but this is a relatively minor discrepancy. + if let Some(extn) = extn { + let aki: AuthorityKeyIdentifier<'_> = extn.value()?; + // 7.1.2.11.1 Authority Key Identifier: + + // keyIdentifier MUST be present. + // TODO: Check that keyIdentifier matches subjectKeyIdentifier. + if aki.key_identifier.is_none() { + return Err(ValidationError::Other( + "authorityKeyIdentifier must contain keyIdentifier".to_string(), + )); + } + + // authorityCertIssuer and authorityCertSerialNumber MUST NOT be present. + if aki.authority_cert_issuer.is_some() { + return Err(ValidationError::Other( + "authorityKeyIdentifier must not contain authorityCertIssuer".to_string(), + )); + } + + if aki.authority_cert_serial_number.is_some() { + return Err(ValidationError::Other( + "authorityKeyIdentifier must not contain authorityCertSerialNumber".to_string(), + )); + } + } + + Ok(()) + } + + pub(crate) fn key_usage( + _policy: &Policy<'_, B>, + _cert: &Certificate<'_>, + extn: &Extension<'_>, + ) -> Result<(), ValidationError> { + let key_usage: KeyUsage<'_> = extn.value()?; + + if !key_usage.key_cert_sign() { + return Err(ValidationError::Other( + "keyUsage.keyCertSign must be asserted in a CA certificate".to_string(), + )); + } + + Ok(()) + } + + pub(crate) fn basic_constraints( + _policy: &Policy<'_, B>, + _cert: &Certificate<'_>, + extn: &Extension<'_>, + ) -> Result<(), ValidationError> { + let basic_constraints: BasicConstraints = extn.value()?; + + if !basic_constraints.ca { + return Err(ValidationError::Other( + "basicConstraints.cA must be asserted in a CA certificate".to_string(), + )); + } + + // NOTE: basicConstraints.pathLength is checked as part of + // `Policy::permits_ca`, since we need the current chain building + // depth to check it. + + Ok(()) + } + + pub(crate) fn name_constraints( + _policy: &Policy<'_, B>, + _cert: &Certificate<'_>, + extn: Option<&Extension<'_>>, + ) -> Result<(), ValidationError> { + if let Some(extn) = extn { + let name_constraints: NameConstraints<'_> = extn.value()?; + + let permitted_subtrees_empty = name_constraints + .permitted_subtrees + .as_ref() + .map_or(true, |pst| pst.unwrap_read().is_empty()); + let excluded_subtrees_empty = name_constraints + .excluded_subtrees + .as_ref() + .map_or(true, |est| est.unwrap_read().is_empty()); + + if permitted_subtrees_empty && excluded_subtrees_empty { + return Err(ValidationError::Other( + "nameConstraints must have non-empty permittedSubtrees or excludedSubtrees" + .to_string(), + )); + } + + // NOTE: Both RFC 5280 and CABF require each `GeneralSubtree` + // to have `minimum=0` and `maximum=NULL`, but experimentally + // not many validators check for this. + } + + Ok(()) + } + + pub(crate) fn extended_key_usage( + policy: &Policy<'_, B>, + _cert: &Certificate<'_>, + extn: Option<&Extension<'_>>, + ) -> Result<(), ValidationError> { + if let Some(extn) = extn { + let mut ekus: ExtendedKeyUsage<'_> = extn.value()?; + + // NOTE: CABF explicitly forbids anyEKU in and most CA certs, + // but this is widely (universally?) ignored by other implementations. + if ekus.any(|eku| eku == policy.extended_key_usage || eku == EKU_ANY_KEY_USAGE_OID) { + Ok(()) + } else { + Err(ValidationError::Other("required EKU not found".to_string())) + } + } else { + Ok(()) + } + } +} + +pub(crate) mod common { + use cryptography_x509::{ + certificate::Certificate, + extensions::{Extension, SequenceOfAccessDescriptions}, + }; + + use crate::{ + ops::CryptoOps, + policy::{Policy, ValidationError}, + }; + + pub(crate) fn authority_information_access( + _policy: &Policy<'_, B>, + _cert: &Certificate<'_>, + extn: Option<&Extension<'_>>, + ) -> Result<(), ValidationError> { + if let Some(extn) = extn { + // We don't currently do anything useful with these, but we + // do check that they're well-formed. + let _: SequenceOfAccessDescriptions<'_> = extn.value()?; + } + + Ok(()) + } +} + #[cfg(test)] mod tests { use super::{Criticality, ExtensionPolicy}; - use crate::ops::tests::{cert, v1_cert_pem, NullOps}; + use crate::certificate::tests::PublicKeyErrorOps; + use crate::ops::tests::{cert, v1_cert_pem}; use crate::ops::CryptoOps; - use crate::policy::{Policy, Subject}; + use crate::policy::{Policy, Subject, ValidationError}; use crate::types::DNSName; - use crate::ValidationError; use asn1::{ObjectIdentifier, SimpleAsn1Writable}; use cryptography_x509::certificate::Certificate; use cryptography_x509::extensions::{BasicConstraints, Extension, Extensions}; @@ -237,7 +510,7 @@ mod tests { // The certificate doesn't get used for this validator, so the certificate we use isn't important. let cert_pem = v1_cert_pem(); let cert = cert(&cert_pem); - let ops = NullOps {}; + let ops = PublicKeyErrorOps {}; let policy = Policy::new( ops, Subject::DNS(DNSName::new("example.com").unwrap()), @@ -286,7 +559,7 @@ mod tests { // The certificate doesn't get used for this validator, so the certificate we use isn't important. let cert_pem = v1_cert_pem(); let cert = cert(&cert_pem); - let ops = NullOps {}; + let ops = PublicKeyErrorOps {}; let policy = Policy::new( ops, Subject::DNS(DNSName::new("example.com").unwrap()), @@ -327,7 +600,7 @@ mod tests { // The certificate doesn't get used for this validator, so the certificate we use isn't important. let cert_pem = v1_cert_pem(); let cert = cert(&cert_pem); - let ops = NullOps {}; + let ops = PublicKeyErrorOps {}; let policy = Policy::new( ops, Subject::DNS(DNSName::new("example.com").unwrap()), @@ -364,7 +637,7 @@ mod tests { // The certificate doesn't get used for this validator, so the certificate we use isn't important. let cert_pem = v1_cert_pem(); let cert = cert(&cert_pem); - let ops = NullOps {}; + let ops = PublicKeyErrorOps {}; let policy = Policy::new( ops, Subject::DNS(DNSName::new("example.com").unwrap()), @@ -397,7 +670,7 @@ mod tests { // The certificate doesn't get used for this validator, so the certificate we use isn't important. let cert_pem = v1_cert_pem(); let cert = cert(&cert_pem); - let ops = NullOps {}; + let ops = PublicKeyErrorOps {}; let policy = Policy::new( ops, Subject::DNS(DNSName::new("example.com").unwrap()), diff --git a/src/rust/cryptography-x509-validation/src/policy/mod.rs b/src/rust/cryptography-x509-validation/src/policy/mod.rs index 4e897c3c932e..2e3652505e57 100644 --- a/src/rust/cryptography-x509-validation/src/policy/mod.rs +++ b/src/rust/cryptography-x509-validation/src/policy/mod.rs @@ -5,21 +5,30 @@ mod extension; use std::collections::HashSet; +use std::ops::Range; use asn1::ObjectIdentifier; +use cryptography_x509::certificate::Certificate; use once_cell::sync::Lazy; use cryptography_x509::common::{ - AlgorithmIdentifier, AlgorithmParameters, EcParameters, RsaPssParameters, PSS_SHA256_HASH_ALG, - PSS_SHA256_MASK_GEN_ALG, PSS_SHA384_HASH_ALG, PSS_SHA384_MASK_GEN_ALG, PSS_SHA512_HASH_ALG, - PSS_SHA512_MASK_GEN_ALG, + AlgorithmIdentifier, AlgorithmParameters, EcParameters, RsaPssParameters, Time, + PSS_SHA256_HASH_ALG, PSS_SHA256_MASK_GEN_ALG, PSS_SHA384_HASH_ALG, PSS_SHA384_MASK_GEN_ALG, + PSS_SHA512_HASH_ALG, PSS_SHA512_MASK_GEN_ALG, }; -use cryptography_x509::extensions::SubjectAlternativeName; +use cryptography_x509::extensions::{BasicConstraints, Extensions, SubjectAlternativeName}; use cryptography_x509::name::GeneralName; -use cryptography_x509::oid::{EC_SECP256R1, EC_SECP384R1, EC_SECP521R1, EKU_SERVER_AUTH_OID}; +use cryptography_x509::oid::{ + AUTHORITY_INFORMATION_ACCESS_OID, AUTHORITY_KEY_IDENTIFIER_OID, BASIC_CONSTRAINTS_OID, + EC_SECP256R1, EC_SECP384R1, EC_SECP521R1, EKU_SERVER_AUTH_OID, EXTENDED_KEY_USAGE_OID, + KEY_USAGE_OID, NAME_CONSTRAINTS_OID, POLICY_CONSTRAINTS_OID, SUBJECT_ALTERNATIVE_NAME_OID, + SUBJECT_DIRECTORY_ATTRIBUTES_OID, SUBJECT_KEY_IDENTIFIER_OID, +}; +use self::extension::{ca, common, ee, Criticality, ExtensionPolicy}; use crate::ops::CryptoOps; use crate::types::{DNSName, DNSPattern, IPAddress}; +use crate::ValidationError; // SubjectPublicKeyInfo AlgorithmIdentifier constants, as defined in CA/B 7.1.3.1. @@ -180,7 +189,7 @@ impl Subject<'_> { /// A `Policy` describes user-configurable aspects of X.509 path validation. pub struct Policy<'a, B: CryptoOps> { - _ops: B, + pub ops: B, /// A top-level constraint on the length of intermediate CA paths /// constructed under this policy. @@ -207,6 +216,10 @@ pub struct Policy<'a, B: CryptoOps> { /// The set of permitted signature algorithms, identified by their /// algorithm identifiers. pub permitted_signature_algorithms: HashSet>, + + common_extension_policies: Vec>, + ca_extension_policies: Vec>, + ee_extension_policies: Vec>, } impl<'a, B: CryptoOps> Policy<'a, B> { @@ -219,7 +232,7 @@ impl<'a, B: CryptoOps> Policy<'a, B> { max_chain_depth: Option, ) -> Self { Self { - _ops: ops, + ops, max_chain_depth: max_chain_depth.unwrap_or(DEFAULT_MAX_CHAIN_DEPTH), subject, validation_time: time, @@ -234,15 +247,333 @@ impl<'a, B: CryptoOps> Policy<'a, B> { .into_iter() .cloned() .collect(), + common_extension_policies: Vec::from([ + // 5280 4.2.1.8: Subject Directory Attributes + ExtensionPolicy::maybe_present( + SUBJECT_DIRECTORY_ATTRIBUTES_OID, + Criticality::NonCritical, + None, + ), + // 5280 4.2.2.1: Authority Information Access + ExtensionPolicy::maybe_present( + AUTHORITY_INFORMATION_ACCESS_OID, + Criticality::NonCritical, + Some(common::authority_information_access), + ), + ]), + ca_extension_policies: Vec::from([ + // 5280 4.2.1.1: Authority Key Identifier + ExtensionPolicy::maybe_present( + AUTHORITY_KEY_IDENTIFIER_OID, + Criticality::NonCritical, + Some(ca::authority_key_identifier), + ), + // 5280 4.2.1.2: Subject Key Identifier + // NOTE: CABF requires SKI in CA certificates, but many older CAs lack it. + // We choose to be permissive here. + ExtensionPolicy::maybe_present( + SUBJECT_KEY_IDENTIFIER_OID, + Criticality::NonCritical, + None, + ), + // 5280 4.2.1.3: Key Usage + ExtensionPolicy::present(KEY_USAGE_OID, Criticality::Agnostic, Some(ca::key_usage)), + // 5280 4.2.1.9: Basic Constraints + ExtensionPolicy::present( + BASIC_CONSTRAINTS_OID, + Criticality::Critical, + Some(ca::basic_constraints), + ), + // 5280 4.2.1.10: Name Constraints + // NOTE: MUST be critical in 5280, but CABF relaxes to MAY. + ExtensionPolicy::maybe_present( + NAME_CONSTRAINTS_OID, + Criticality::Agnostic, + Some(ca::name_constraints), + ), + // 5280 4.2.1.11: Policy Constraints + ExtensionPolicy::maybe_present(POLICY_CONSTRAINTS_OID, Criticality::Critical, None), + // 5280: 4.2.1.12: Extended Key Usage + // NOTE: CABF requires EKUs in many non-root CA certs, but validators widely + // ignore this requirement and treat a missing EKU as "any EKU". + // We choose to be permissive here. + ExtensionPolicy::maybe_present( + EXTENDED_KEY_USAGE_OID, + Criticality::NonCritical, + Some(ca::extended_key_usage), + ), + ]), + ee_extension_policies: Vec::from([ + // 5280 4.2.1.1.: Authority Key Identifier + ExtensionPolicy::present( + AUTHORITY_KEY_IDENTIFIER_OID, + Criticality::NonCritical, + None, + ), + // 5280 4.2.1.3: Key Usage + ExtensionPolicy::maybe_present( + KEY_USAGE_OID, + Criticality::Agnostic, + Some(ee::key_usage), + ), + // CA/B 7.1.2.7.12 Subscriber Certificate Subject Alternative Name + ExtensionPolicy::present( + SUBJECT_ALTERNATIVE_NAME_OID, + Criticality::Agnostic, + Some(ee::subject_alternative_name), + ), + // 5280 4.2.1.9: Basic Constraints + ExtensionPolicy::maybe_present( + BASIC_CONSTRAINTS_OID, + Criticality::Agnostic, + Some(ee::basic_constraints), + ), + // 5280 4.2.1.10: Name Constraints + ExtensionPolicy::not_present(NAME_CONSTRAINTS_OID), + // CA/B: 7.1.2.7.10: Subscriber Certificate Extended Key Usage + // NOTE: CABF requires EKUs in EE certs, while RFC 5280 does not. + ExtensionPolicy::maybe_present( + EXTENDED_KEY_USAGE_OID, + Criticality::NonCritical, + Some(ee::extended_key_usage), + ), + ]), + } + } + + fn permits_basic(&self, cert: &Certificate<'_>) -> Result<(), ValidationError> { + let extensions = cert.extensions()?; + + // CA/B 7.1.1: + // Certificates MUST be of type X.509 v3. + if cert.tbs_cert.version != 2 { + return Err(ValidationError::Other( + "certificate must be an X509v3 certificate".to_string(), + )); + } + + // 5280 4.1.1.2 / 4.1.2.3: signatureAlgorithm / TBS Certificate Signature + // The top-level signatureAlgorithm and TBSCert signature algorithm + // MUST match. + if cert.signature_alg != cert.tbs_cert.signature_alg { + return Err(ValidationError::Other( + "mismatch between signatureAlgorithm and SPKI algorithm".to_string(), + )); + } + + // 5280 4.1.2.2: Serial Number + // Per 5280: The serial number MUST be a positive integer. + // In practice, there are a few roots in common trust stores (like certifi) + // that have `serial == 0`, so we can't enforce this yet. + let serial_bytes = cert.tbs_cert.serial.as_bytes(); + if !(1..=21).contains(&serial_bytes.len()) { + // Conforming CAs MUST NOT use serial numbers longer than 20 octets. + // NOTE: In practice, this requires us to check for an encoding of + // 21 octets, since some CAs generate 20 bytes of randomness and + // then forget to check whether that number would be negative, resulting + // in a 21-byte encoding. + return Err(ValidationError::Other( + "certificate must have a serial between 1 and 20 octets".to_string(), + )); + } else if serial_bytes[0] & 0x80 == 0x80 { + // TODO: replace with `is_negative`: https://github.com/alex/rust-asn1/pull/425 + return Err(ValidationError::Other( + "certificate serial number cannot be negative".to_string(), + )); + } + + // 5280 4.1.2.4: Issuer + // The issuer MUST be a non-empty distinguished name. + if cert.issuer().is_empty() { + return Err(ValidationError::Other( + "certificate must have a non-empty Issuer".to_string(), + )); + } + + // 5280 4.1.2.5: Validity + // Validity dates before 2050 MUST be encoded as UTCTime; + // dates in or after 2050 MUST be encoded as GeneralizedTime. + let not_before = cert.tbs_cert.validity.not_before.as_datetime(); + let not_after = cert.tbs_cert.validity.not_after.as_datetime(); + permits_validity_date(&cert.tbs_cert.validity.not_before)?; + permits_validity_date(&cert.tbs_cert.validity.not_after)?; + if &self.validation_time < not_before || &self.validation_time > not_after { + return Err(ValidationError::Other( + "cert is not valid at validation time".to_string(), + )); + } + + // Extension policy checks. + for ext_policy in self.common_extension_policies.iter() { + ext_policy.permits(self, cert, &extensions)?; + } + + // Check that all critical extensions in this certificate are accounted for. + let critical_extensions = extensions + .iter() + .filter(|e| e.critical) + .map(|e| e.extn_id) + .collect::>(); + let checked_extensions = self + .common_extension_policies + .iter() + .chain(self.ca_extension_policies.iter()) + .chain(self.ee_extension_policies.iter()) + .map(|p| p.oid.clone()) + .collect::>(); + + if critical_extensions + .difference(&checked_extensions) + .next() + .is_some() + { + // TODO: Render the OIDs here. + return Err(ValidationError::Other( + "certificate contains unaccounted-for critical extensions".to_string(), + )); + } + + Ok(()) + } + + /// Checks whether the given CA certificate is compatible with this policy. + pub(crate) fn permits_ca( + &self, + cert: &Certificate<'_>, + current_depth: u8, + extensions: &Extensions<'_>, + ) -> Result<(), ValidationError> { + self.permits_basic(cert)?; + + // 5280 4.1.2.6: Subject + // CA certificates MUST have a subject populated with a non-empty distinguished name. + // No check required here: `permits_basic` checks that the issuer is non-empty + // and `ChainBuilder::potential_issuers` enforces subject/issuer matching, + // meaning that an CA with an empty subject cannot occur in a built chain. + + // NOTE: This conceptually belongs in `valid_issuer`, but is easier + // to test here. It's also conceptually an extension policy, but + // requires a bit of extra external state (`current_depth`) that isn't + // presently convenient to push into that layer. + // + // NOTE: BasicConstraints is required via `ca_extension_policies`, + // so we always take this branch. + if let Some(bc) = extensions.get_extension(&BASIC_CONSTRAINTS_OID) { + let bc: BasicConstraints = bc.value()?; + + if bc + .path_length + .map_or(false, |len| u64::from(current_depth) > len) + { + return Err(ValidationError::Other( + "path length constraint violated".to_string(), + ))?; + } + } + + for ext_policy in self.ca_extension_policies.iter() { + ext_policy.permits(self, cert, extensions)?; + } + + Ok(()) + } + + /// Checks whether the given EE certificate is compatible with this policy. + pub(crate) fn permits_ee( + &self, + cert: &Certificate<'_>, + extensions: &Extensions<'_>, + ) -> Result<(), ValidationError> { + self.permits_basic(cert)?; + + for ext_policy in self.ee_extension_policies.iter() { + ext_policy.permits(self, cert, extensions)?; + } + + Ok(()) + } + + /// Checks whether `issuer` is a valid issuing CA for `child` at a + /// path-building depth of `current_depth`. + /// + /// This checks that `issuer` is permitted under this policy and that + /// it was used to sign for `child`. + /// + /// As a precondition, the caller must have already checked that + /// `issuer.subject() == child.issuer()`. + /// + /// On success, this function returns the new path-building depth. This + /// may or may not be a higher number than the original depth, depending + /// on the kind of validation performed (e.g., whether the issuer was + /// self-issued). + pub(crate) fn valid_issuer( + &self, + issuer: &Certificate<'_>, + child: &Certificate<'_>, + current_depth: u8, + issuer_extensions: &Extensions<'_>, + ) -> Result<(), ValidationError> { + // The issuer needs to be a valid CA at the current depth. + self.permits_ca(issuer, current_depth, issuer_extensions)?; + + // CA/B 7.1.3.1 SubjectPublicKeyInfo + if !self + .permitted_public_key_algorithms + .contains(&child.tbs_cert.spki.algorithm) + { + return Err(ValidationError::Other(format!( + "Forbidden public key algorithm: {:?}", + &child.tbs_cert.spki.algorithm + ))); + } + + // CA/B 7.1.3.2 Signature AlgorithmIdentifier + if !self + .permitted_signature_algorithms + .contains(&child.signature_alg) + { + return Err(ValidationError::Other(format!( + "Forbidden signature algorithm: {:?}", + &child.signature_alg + ))); + } + + let pk = self + .ops + .public_key(issuer) + .map_err(|_| ValidationError::Other("issuer has malformed public key".to_string()))?; + if self.ops.verify_signed_by(child, pk).is_err() { + return Err(ValidationError::Other( + "signature does not match".to_string(), + )); + } + + Ok(()) + } +} + +fn permits_validity_date(validity_date: &Time) -> Result<(), ValidationError> { + const GENERALIZED_DATE_INVALIDITY_RANGE: Range = 1950..2050; + + // NOTE: The inverse check on `asn1::UtcTime` is already done for us + // by the variant's constructor. + if let Time::GeneralizedTime(_) = validity_date { + if GENERALIZED_DATE_INVALIDITY_RANGE.contains(&validity_date.as_datetime().year()) { + return Err(ValidationError::Other( + "validity dates between 1950 and 2049 must be UtcTime".to_string(), + )); } } + + Ok(()) } #[cfg(test)] mod tests { use std::ops::Deref; - use asn1::SequenceOfWriter; + use asn1::{DateTime, SequenceOfWriter}; + use cryptography_x509::common::Time; use cryptography_x509::{ extensions::SubjectAlternativeName, name::{GeneralName, UnvalidatedIA5String}, @@ -257,9 +588,9 @@ mod tests { }; use super::{ - ECDSA_SHA256, ECDSA_SHA384, ECDSA_SHA512, RSASSA_PKCS1V15_SHA256, RSASSA_PKCS1V15_SHA384, - RSASSA_PKCS1V15_SHA512, RSASSA_PSS_SHA256, RSASSA_PSS_SHA384, RSASSA_PSS_SHA512, - WEBPKI_PERMITTED_SIGNATURE_ALGORITHMS, + permits_validity_date, ECDSA_SHA256, ECDSA_SHA384, ECDSA_SHA512, RSASSA_PKCS1V15_SHA256, + RSASSA_PKCS1V15_SHA384, RSASSA_PKCS1V15_SHA512, RSASSA_PSS_SHA256, RSASSA_PSS_SHA384, + RSASSA_PSS_SHA512, WEBPKI_PERMITTED_SIGNATURE_ALGORITHMS, }; #[test] @@ -380,7 +711,7 @@ mod tests { assert!(!ip_sub.matches(&any_cryptography_io)); } - // Single SAN, IP range. + // Single SAN, IP address. { let ip_gn = GeneralName::IPAddress(&[127, 0, 0, 1]); let san_der = asn1::write_single(&SequenceOfWriter::new([ip_gn])).unwrap(); @@ -413,4 +744,57 @@ mod tests { assert!(!domain_sub.matches(&any_cryptography_io)); } } + + #[test] + fn test_validity_date() { + { + // Pre-2050 date. + let utc_dt = DateTime::new(1980, 1, 1, 0, 0, 0).unwrap(); + let generalized_dt = utc_dt.clone(); + let utc_validity = Time::UtcTime(asn1::UtcTime::new(utc_dt).unwrap()); + let generalized_validity = + Time::GeneralizedTime(asn1::GeneralizedTime::new(generalized_dt).unwrap()); + assert!(permits_validity_date(&utc_validity).is_ok()); + assert!(permits_validity_date(&generalized_validity).is_err()); + } + { + // 2049 date. + let utc_dt = DateTime::new(2049, 1, 1, 0, 0, 0).unwrap(); + let generalized_dt = utc_dt.clone(); + let utc_validity = Time::UtcTime(asn1::UtcTime::new(utc_dt).unwrap()); + let generalized_validity = + Time::GeneralizedTime(asn1::GeneralizedTime::new(generalized_dt).unwrap()); + assert!(permits_validity_date(&utc_validity).is_ok()); + assert!(permits_validity_date(&generalized_validity).is_err()); + } + { + // 2050 date. + let utc_dt = DateTime::new(2050, 1, 1, 0, 0, 0).unwrap(); + let generalized_dt = utc_dt.clone(); + assert!(asn1::UtcTime::new(utc_dt).is_err()); + let generalized_validity = + Time::GeneralizedTime(asn1::GeneralizedTime::new(generalized_dt).unwrap()); + assert!(permits_validity_date(&generalized_validity).is_ok()); + } + { + // 2051 date. + let utc_dt = DateTime::new(2051, 1, 1, 0, 0, 0).unwrap(); + let generalized_dt = utc_dt.clone(); + // The `asn1::UtcTime` constructor prevents this. + assert!(asn1::UtcTime::new(utc_dt).is_err()); + let generalized_validity = + Time::GeneralizedTime(asn1::GeneralizedTime::new(generalized_dt).unwrap()); + assert!(permits_validity_date(&generalized_validity).is_ok()); + } + { + // Post-2050 date. + let utc_dt = DateTime::new(3050, 1, 1, 0, 0, 0).unwrap(); + let generalized_dt = utc_dt.clone(); + // The `asn1::UtcTime` constructor prevents this. + assert!(asn1::UtcTime::new(utc_dt).is_err()); + let generalized_validity = + Time::GeneralizedTime(asn1::GeneralizedTime::new(generalized_dt).unwrap()); + assert!(permits_validity_date(&generalized_validity).is_ok()); + } + } } diff --git a/src/rust/cryptography-x509/src/extensions.rs b/src/rust/cryptography-x509/src/extensions.rs index db7cdd82a5e8..15c495147759 100644 --- a/src/rust/cryptography-x509/src/extensions.rs +++ b/src/rust/cryptography-x509/src/extensions.rs @@ -8,6 +8,7 @@ use crate::common; use crate::crl; use crate::name; +#[derive(Debug, PartialEq, Eq)] pub struct DuplicateExtensionsError(pub asn1::ObjectIdentifier); pub type RawExtensions<'a> = common::Asn1ReadableOrWritable< diff --git a/src/rust/src/x509/verify.rs b/src/rust/src/x509/verify.rs index e074b9cb3009..d664e3814bba 100644 --- a/src/rust/src/x509/verify.rs +++ b/src/rust/src/x509/verify.rs @@ -6,6 +6,7 @@ use cryptography_x509::certificate::Certificate; use cryptography_x509_validation::{ ops::CryptoOps, policy::{Policy, Subject}, + trust_store::Store, types::{DNSName, IPAddress}, }; @@ -18,6 +19,8 @@ use crate::{ exceptions::VerificationError, }; +use super::certificate::OwnedCertificate; + pub(crate) struct PyCryptoOps {} impl CryptoOps for PyCryptoOps { @@ -103,11 +106,44 @@ impl PyServerVerifier { fn verify<'p>( &self, - _py: pyo3::Python<'p>, - _leaf: &PyCertificate, - _intermediates: &'p pyo3::types::PyList, + py: pyo3::Python<'p>, + leaf: &PyCertificate, + intermediates: Vec>, ) -> CryptographyResult> { - Err(VerificationError::new_err("unimplemented").into()) + let store = Store::new( + self.store + .as_ref(py) + .get() + .0 + .iter() + .map(|t| t.get().raw.borrow_dependent().clone()), + ); + + let policy = self.as_policy(); + let chain = cryptography_x509_validation::verify( + leaf.raw.borrow_dependent(), + intermediates + .iter() + .map(|i| i.raw.borrow_dependent().clone()), + policy, + &store, + ) + .map_err(|e| VerificationError::new_err(format!("validation failed: {e:?}")))?; + + // TODO: Optimize this? Turning a Certificate back into a PyCertificate + // involves a full round-trip back through DER, which isn't ideal. + chain + .iter() + .map(|c| { + let raw = pyo3::types::PyBytes::new(py, &asn1::write_single(c)?); + Ok(PyCertificate { + raw: OwnedCertificate::try_new(raw.into(), |raw| { + asn1::parse_single(raw.as_bytes(py)) + })?, + cached_extensions: pyo3::once_cell::GILOnceCell::new(), + }) + }) + .collect() } } diff --git a/tests/x509/verification/__init__.py b/tests/x509/verification/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/x509/verification/test_limbo.py b/tests/x509/verification/test_limbo.py new file mode 100644 index 000000000000..2d2f1fd6fe0f --- /dev/null +++ b/tests/x509/verification/test_limbo.py @@ -0,0 +1,147 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import datetime +import ipaddress +import json +import os + +import pytest + +from cryptography import x509 +from cryptography.x509 import load_pem_x509_certificate +from cryptography.x509.verification import ( + PolicyBuilder, + Store, + VerificationError, +) + +LIMBO_UNSUPPORTED_FEATURES = { + # NOTE: Path validation is required to reject wildcards on public suffixes, + # however this isn't practical and most implementations make no attempt to + # comply with this. + "pedantic-public-suffix-wildcard", + # TODO: We don't support Distinguished Name Constraints yet. + "name-constraint-dn", + # Our support for custom EKUs is limited, and we (like most impls.) don't + # handle all EKU conditions under CABF. + "pedantic-webpki-eku", + # Similarly: contains tests that fail based on a strict reading of RFC 5280 + # but are widely ignored by validators. + "pedantic-rfc5280", + # In rare circumstances, CABF relaxes RFC 5280's prescriptions in + # incompatible ways. Our validator always tries (by default) to comply + # closer to CABF, so we skip these. + "rfc5280-incompatible-with-webpki", +} + +LIMBO_SKIP_TESTCASES = { + # We unconditionally count intermediate certificates for pathlen and max + # depth constraint purposes, even when self-issued. + # This is a violation of RFC 5280, but is consistent with Go's crypto/x509 + # and Rust's webpki crate do. + "pathlen::self-issued-certs-pathlen", + "pathlen::max-chain-depth-1-self-issued", + # We allow certificates with serial numbers of zero. This is + # invalid under RFC 5280 but is widely violated by certs in common + # trust stores. + "rfc5280::serial::zero", + # We allow CAs that don't have AKIs, which is forbidden under + # RFC 5280. This is consistent with what Go's crypto/x509 and Rust's + # webpki crate do. + "rfc5280::ski::root-missing-ski", + "rfc5280::ski::intermediate-missing-ski", + # We currently allow intermediate CAs that don't have AKIs, which + # is technically forbidden under CABF. This is consistent with what + # Go's crypto/x509 and Rust's webpki crate do. + "rfc5280::aki::intermediate-missing-aki", + # We allow root CAs where the AKI and SKI mismatch, which is technically + # forbidden under CABF. This is consistent with what + # Go's crypto/x509 and Rust's webpki crate do. + "webpki::aki::root-with-aki-ski-mismatch", + # We disallow CAs in the leaf position, which is explicitly forbidden + # by CABF (but implicitly permitted under RFC 5280). This is consistent + # with what webpki and rustls do, but inconsistent with Go and OpenSSL. + "rfc5280::ca-as-leaf", + "pathlen::validation-ignores-pathlen-in-leaf", +} + + +def _get_limbo_peer(expected_peer): + kind = expected_peer["kind"] + assert kind in ("DNS", "IP") + value = expected_peer["value"] + if kind == "DNS": + return x509.DNSName(value) + else: + return x509.IPAddress(ipaddress.ip_address(value)) + + +def _limbo_testcase(id_, testcase): + if id_ in LIMBO_SKIP_TESTCASES: + return + + features = testcase["features"] + if LIMBO_UNSUPPORTED_FEATURES.intersection(features): + return + assert testcase["validation_kind"] == "SERVER" + assert testcase["signature_algorithms"] == [] + assert testcase["extended_key_usage"] == [] or testcase[ + "extended_key_usage" + ] == ["serverAuth"] + assert testcase["expected_peer_names"] == [] + + trusted_certs = [ + load_pem_x509_certificate(cert.encode()) + for cert in testcase["trusted_certs"] + ] + untrusted_intermediates = [ + load_pem_x509_certificate(cert.encode()) + for cert in testcase["untrusted_intermediates"] + ] + peer_certificate = load_pem_x509_certificate( + testcase["peer_certificate"].encode() + ) + peer_name = _get_limbo_peer(testcase["expected_peer_name"]) + validation_time = testcase["validation_time"] + validation_time = ( + datetime.datetime.fromisoformat(validation_time) + if validation_time is not None + else None + ) + max_chain_depth = testcase["max_chain_depth"] + should_pass = testcase["expected_result"] == "SUCCESS" + + verifier = PolicyBuilder( + time=validation_time, + store=Store(trusted_certs), + max_chain_depth=max_chain_depth, + ).build_server_verifier(peer_name) + + if should_pass: + built_chain = verifier.verify( + peer_certificate, untrusted_intermediates + ) + + # Assert that the verifier returns chains in [EE, ..., TA] order. + assert built_chain[0] == peer_certificate + for intermediate in built_chain[1:-1]: + assert intermediate in untrusted_intermediates + assert built_chain[-1] in trusted_certs + else: + with pytest.raises(VerificationError): + verifier.verify(peer_certificate, untrusted_intermediates) + + +def test_limbo(subtests, pytestconfig): + limbo_root = pytestconfig.getoption("--x509-limbo-root", skip=True) + limbo_path = os.path.join(limbo_root, "limbo.json") + with open(limbo_path, mode="rb") as limbo_file: + limbo = json.load(limbo_file) + testcases = limbo["testcases"] + for testcase in testcases: + with subtests.test(): + # NOTE: Pass in the id separately to make pytest + # error renderings slightly nicer. + _limbo_testcase(testcase["id"], testcase) diff --git a/tests/x509/test_verification.py b/tests/x509/verification/test_verification.py similarity index 86% rename from tests/x509/test_verification.py rename to tests/x509/verification/test_verification.py index 73012dee03e1..d4b0bc07d606 100644 --- a/tests/x509/test_verification.py +++ b/tests/x509/verification/test_verification.py @@ -11,11 +11,7 @@ from cryptography import x509 from cryptography.x509.general_name import DNSName, IPAddress -from cryptography.x509.verification import ( - PolicyBuilder, - Store, - VerificationError, -) +from cryptography.x509.verification import PolicyBuilder, Store from tests.x509.test_x509 import _load_cert @@ -107,18 +103,3 @@ def test_store_rejects_empty_list(self): def test_store_rejects_non_certificates(self): with pytest.raises(TypeError): Store(["not a cert"]) # type: ignore[list-item] - - -class TestServerVerifier: - def test_not_implemented(self): - verifier = ( - PolicyBuilder() - .store(dummy_store()) - .build_server_verifier(DNSName("cryptography.io")) - ) - cert = _load_cert( - os.path.join("x509", "cryptography.io.pem"), - x509.load_pem_x509_certificate, - ) - with pytest.raises(VerificationError): - verifier.verify(cert, [])