diff --git a/.bumpversion-ansible.cfg b/.bumpversion-ansible.cfg index ae2e4f9b3..cb4aa6e79 100644 --- a/.bumpversion-ansible.cfg +++ b/.bumpversion-ansible.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 7.4.0 +current_version = 7.5.3 commit = true message = Bump cp-ansible Version: {current_version} → {new_version} tag = false diff --git a/.bumpversion.cfg b/.bumpversion.cfg index d2a191ac2..9a03d1627 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 7.4.0 +current_version = 7.5.3 commit = true message = Bump CP Version: {current_version} → {new_version} tag = false diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index faa585caf..3e62d1d4f 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,2 @@ # See go/codeowners - automatically generated for confluentinc/cp-ansible: -* @confluentinc/cp-ansible-dev +* @confluentinc/ansible-team diff --git a/CHANGELOG.rst b/CHANGELOG.rst index af4d3d3bd..d51b8b1ae 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -4,6 +4,60 @@ Ansible Playbooks for Confluent Platform - Release Notes .. contents:: Topics +v7.5.3 +====== + +Notable fixes +------------- + +- Critical security and vulnerability issues were fixed. + +v7.5.2 +====== + +Notable fixes +------------- + +- Critical security and vulnerability issues were fixed. + + +v7.5.1 +====== + +Notable enhancements +------------- + +- Updated default confluent cli version to 3.30.1 +- Fixed bugs in discovery to generate inventory file with appropriate security protocols +- Ansible builtin File mode is now string instead of octal + +v7.5.0 +====== + +New features +------------- + +- Configure single sign-on (SSO) authentication for Control Center using OpenID Connect (OIDC) +- Enable FIPS on Confluent Platform(CP) for RHEL8 & RHEL9 using cp-ansible. + +Notable enhancements +------------- + +- Added capability to discover multiple workers in the connect cluster in discovery +- Added support to specify ansible_password in discovery + +v7.4.1 +====== + +Notable enhancements +------------- + +- Parametrize the number of retries for MDS API requests +- Add Broker's principals to Controller's super user list on a Kraft cluster with RBAC +- Removed timeout configs from client properties of Kafka Broker, allowing customers to use custom timeout values +- Archived installation of Confluent Platform on Debian 9 since the OS version reached end-of-life + + v7.4.0 ====== diff --git a/Jenkinsfile b/Jenkinsfile index fa90c93ff..76fd8a5ee 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -29,8 +29,8 @@ def confluent_release_quality = choice(name: 'CONFLUENT_RELEASE_QUALITY', // Parameter for the molecule test scenario to run def molecule_scenario_name = choice(name: 'SCENARIO_NAME', - choices: ['rbac-scram-custom-rhel', 'plaintext-rhel'], - defaultValue: 'rbac-scram-custom-rhel', + choices: ['rbac-mtls-rhel8', 'plaintext-rhel'], + defaultValue: 'rbac-mtls-rhel8', description: 'The Ansible Molecule scenario name to run', ) diff --git a/discovery/README.md b/discovery/README.md index d5493d334..31e6f9f4d 100644 --- a/discovery/README.md +++ b/discovery/README.md @@ -12,15 +12,15 @@ Though this script tries to come up with inventory which is the closed represent These are dependencies for this script and should be installed on the machine where we are executing it from. This is not a requirement for managed nodes of the cluster. #### Hosts -The discovery script needs list of hosts which is part of the existing cluster on which services has to be discovered. Apart from the list of hosts, the script also need the Confluent Service names. If these service names has been updated, the same should be provided under `service_override` section. +The discovery script needs list of hosts which is part of the existing cluster on which services has to be discovered. Apart from the list of hosts, the script also need the Confluent Service names. If these service names has been updated, the same should be provided under `service_overrides` section. The name should be passed including ".service" ```yaml all: vars: ansible_connection: docker ansible_user: null - service_override: - zookeeper_service_name: myservice.zookeeper + service_overrides: + zookeeper_service_name: myservice-zookeeper.service ``` ### How @@ -95,8 +95,8 @@ For a cluster running on local docker environment vars: ansible_connection: docker - service_override: - zookeeper_service_name: 'custom-service-name' + service_overrides: + zookeeper_service_name: 'custom-service-name.service' hosts: all: - zookeeper1 @@ -112,16 +112,25 @@ hosts: ``` #### Command Line options -##### verbose +##### verbosity To get the verbose output from script and Ansible you can set the verbosity level between 0 to 4. Where 4 means more verbose. +```shell +python discovery/main.py --input discovery/hosts.yml --verbosity 4 +``` ##### limit Use limit flag to limit the discovery for specified list of hosts +```shell +python discovery/main.py --input discovery/hosts.yml --limit host1,host2 +``` ##### output_file Use this flag to specify output inventory file name. Default value is inventory.yml - ```shell -python discovery/main.py --input discovery/hosts.yml --verbose 4 --limit host1,host2 +python discovery/main.py --input discovery/hosts.yml --output inventory.yml ``` +#### multi_threaded +To run the discovery scripts in multi threaded environment. Default is True + + ### FQA * **Can I use it for older CP versions** Ideally we should be using the discovery from the branch which maps to the CP cluster. However, to onboard existing cluster, one can use the latest disvoery code and use **--from_version** parameter to specify the CP cluster version diff --git a/discovery/main.py b/discovery/main.py index 0e4ea907f..3ac28c615 100644 --- a/discovery/main.py +++ b/discovery/main.py @@ -7,8 +7,9 @@ logger = Logger.get_logger() -def perform_pre_checks(input_context, inventory): - SystemValidator.validate_connection(input_context) +def perform_pre_checks(input_context: InputContext, inventory: CPInventoryManager): + if not input_context.skip_validation: + SystemValidator.validate_connection(input_context) def build_system_properties(input_context: InputContext, inventory: CPInventoryManager): diff --git a/discovery/manager/manager.py b/discovery/manager/manager.py index 3b37702f6..664fe7418 100644 --- a/discovery/manager/manager.py +++ b/discovery/manager/manager.py @@ -32,6 +32,7 @@ def my_event_handler(self, event): def get_ansible_vars_from_input_context(input_context: InputContext) -> dict: vars = dict() vars['ansible_user'] = input_context.ansible_user + vars['ansible_password'] = input_context.ansible_password vars['ansible_become'] = input_context.ansible_become vars['ansible_connection'] = input_context.ansible_connection vars['ansible_become_user'] = input_context.ansible_become_user @@ -39,6 +40,8 @@ def get_ansible_vars_from_input_context(input_context: InputContext) -> dict: vars['ansible_ssh_extra_args'] = input_context.ansible_ssh_extra_args vars['ansible_python_interpreter'] = input_context.ansible_python_interpreter vars['ansible_ssh_private_key_file'] = input_context.ansible_ssh_private_key_file + vars['ansible_common_remote_group'] = input_context.ansible_common_remote_group + vars['ansible_become_password'] = input_context.ansible_become_password return vars @staticmethod @@ -98,9 +101,7 @@ def get_service_facts(input_context: InputContext) -> dict: def get_service_host_mapping(input_context: InputContext, **kwargs) -> dict: logger.info(f"Creating service and host mapping...") logger.info("Configured services:") - logger.info(yaml.dump(ConfluentServices(input_context).get_all_service_names(), - indent=2, - default_flow_style=False)) + logger.info(ConfluentServices(input_context).get_all_service_names()) hosts = input_context.ansible_hosts if _host_group_declared_in_inventory(hosts, input_context): @@ -238,11 +239,11 @@ def __get_service_properties_file(input_context: InputContext, service: ServiceD host = hosts[0] service_details = SystemPropertyManager.get_service_details(input_context, service, [host]) - execution_command = service_details.get(host).get("status").get("ExecStart") + execution_command = str(service_details.get(host).get("status").get("ExecStart")) # check if we have flag based configs property_files = dict() - matches = re.findall('(--[\w\.]+\.config)*\s+([\w\/-]+\.properties)', execution_command) + matches = re.findall(u'(--[\w\.]+\.config)*\s+(\S+\.properties)', execution_command) for match in matches: key, path = match key = key.strip('--') if key else DEFAULT_KEY @@ -252,6 +253,37 @@ def __get_service_properties_file(input_context: InputContext, service: ServiceD logger.error(f"Cannot find associated properties file for service {service.value.get('name')}") return property_files + @staticmethod + def slurp_remote_file(input_context: InputContext, hosts: list, file: str) -> dict: + """ + Returns a map of hosts and content of given file on that host + :param input_context: + :param hosts: + :param file: + :return: + """ + + content = dict() + if not file: + return content + + runner_utils = AnsibleRunnerUtils() + hosts, host_pattern = AnsibleRunnerUtils.get_host_and_pattern_from_host_list(hosts) + ansible_runner.run( + quiet=input_context.verbosity <= 3, + host_pattern=host_pattern, + inventory=AnsibleRunnerUtils.get_inventory_dict(input_context, hosts), + module="slurp", + module_args=f"src={file}", + event_handler=runner_utils.my_event_handler + ) + + response = runner_utils.result_ok + for host in hosts: + content[host] = base64.b64decode(response[host]['content']).decode('utf-8') + + return content + @staticmethod def get_property_mappings(input_context: InputContext, service: ServiceData, hosts: list) -> dict: @@ -262,23 +294,10 @@ def get_property_mappings(input_context: InputContext, service: ServiceData, hos return mappings for key, file in seed_properties_file.items(): - - runner_utils = AnsibleRunnerUtils() - hosts, host_pattern = AnsibleRunnerUtils.get_host_and_pattern_from_host_list(hosts) - ansible_runner.run( - quiet=input_context.verbosity <= 3, - host_pattern=host_pattern, - inventory=AnsibleRunnerUtils.get_inventory_dict(input_context, hosts), - module="slurp", - module_args=f"src={file}", - event_handler=runner_utils.my_event_handler - ) - - response = runner_utils.result_ok + content = ServicePropertyManager.slurp_remote_file(input_context=input_context, hosts=hosts, file=file) for host in hosts: - properties = base64.b64decode(response[host]['content']).decode('utf-8') host_properties = mappings.get(host, dict()) - host_properties.update({key: load_properties_to_dict(properties)}) + host_properties.update({key: load_properties_to_dict(content.get(host))}) mappings[host] = host_properties return mappings @@ -314,7 +333,10 @@ def get_keystore_alias_names(input_context: InputContext, hosts: list, keystorep @staticmethod def get_jaas_file_path(input_context: InputContext, service: ServiceData, hosts: list): # check if overriden as env var - env_details = ServicePropertyManager.get_env_details(input_context, service, hosts) + from discovery.service import AbstractPropertyBuilder + env_details = AbstractPropertyBuilder.get_service_environment_variable(input_context=input_context, + service=service, + hosts=hosts) kafka_opts = env_details.get('KAFKA_OPTS', None) if kafka_opts is not None: if "-Djava.security.auth.login.config=" in kafka_opts: @@ -325,7 +347,9 @@ def get_jaas_file_path(input_context: InputContext, service: ServiceData, hosts: @staticmethod def get_log_file_path(input_context: InputContext, service: ServiceData, hosts: list, log4j_opts_env_var): # check if overriden as env var - env_details = ServicePropertyManager.get_env_details(input_context, service, hosts) + from discovery.service import AbstractPropertyBuilder + env_details = AbstractPropertyBuilder.get_service_environment_variable(input_context=input_context, + service=service, hosts=hosts) log4j_opts = env_details.get(log4j_opts_env_var, None) if log4j_opts is not None: if "-Dlog4j.configuration=file:" in log4j_opts: @@ -354,36 +378,6 @@ def get_log_file_path(input_context: InputContext, service: ServiceData, hosts: return None - @staticmethod - def parse_environment_details(env_command: str) -> dict: - env_details = dict() - if not env_command: - return env_details - - tokens = ['KAFKA_HEAP_OPTS', 'KAFKA_OPTS', 'KAFKA_LOG4J_OPTS', 'LOG_DIR', 'CONFLUENT_SECURITY_MASTER_KEY'] - for token in tokens: - pattern = f"{token}=(.*?)( [A-Z]{{3}}|$)" - match = re.search(pattern, env_command) - if match: - env_details[token] = match.group(1).rstrip() - return env_details - - @staticmethod - def _get_env_from_service(input_context: InputContext, service: ServiceData, hosts: list) -> str: - service_facts = SystemPropertyManager.get_service_details(input_context, service, hosts) - service_facts = service_facts.get(hosts[0]) - environment = service_facts.get("status", dict()).get("Environment", None) - if not environment: - logger.warning(f"Could not find any environment variable for service {service.name}") - return environment - - @staticmethod - def get_env_details(input_context: InputContext, service: ServiceData, hosts: list) -> dict: - env_cmd = ServicePropertyManager._get_env_from_service(input_context=input_context, - service=service, - hosts=hosts) - return ServicePropertyManager.parse_environment_details(env_cmd) - @staticmethod def get_kerberos_configurations(input_context: InputContext, hosts: list, kerberos_config_file): realm, kdc, admin = "", "", "" @@ -581,10 +575,12 @@ def validate_connection(input_context: InputContext) -> None: hosts, host_pattern = AnsibleRunnerUtils.get_host_and_pattern_from_input_context(input_context) if not hosts or len(hosts) == 0: terminate_script("Empty host list. Please refer to documentation for correct host format") + + inventory = AnsibleRunnerUtils.get_inventory_dict(input_context) ansible_runner.run( quiet=input_context.verbosity <= 3, host_pattern=host_pattern, - inventory=AnsibleRunnerUtils.get_inventory_dict(input_context), + inventory=inventory, module='ansible.builtin.ping', event_handler=runner_utils.my_event_handler ) @@ -592,11 +588,13 @@ def validate_connection(input_context: InputContext) -> None: if runner_utils.result_ok: alive_hosts = runner_utils.result_ok.keys() failed_hosts.extend(hosts - alive_hosts) - logger.info(f"Connection was successful to:\n{yaml.dump(list(alive_hosts), indent=2, default_flow_style=False)}") + logger.info( + f"Connection was successful to:\n{yaml.dump(list(alive_hosts), indent=2, default_flow_style=False)}") else: failed_hosts.extend(hosts) if failed_hosts: - message = f"Could not connect to hosts:\n{yaml.dump(list(failed_hosts), indent=2, default_flow_style=False)}.\n" \ - f"Please verify the hostnames, ssh user and key" + message = f"Could not connect to hosts:\n{yaml.dump(list(failed_hosts), indent=2, default_flow_style=False)}\n" \ + f"Please verify the following details:\n" \ + f"{yaml.dump(inventory['all']['vars'], indent=2, default_flow_style=False)}" terminate_script(message) diff --git a/discovery/service/control_center.py b/discovery/service/control_center.py index ccd937e18..24bf97884 100644 --- a/discovery/service/control_center.py +++ b/discovery/service/control_center.py @@ -1,10 +1,11 @@ +import re import sys from discovery.service.service import AbstractPropertyBuilder from discovery.utils.constants import DEFAULT_KEY from discovery.utils.inventory import CPInventoryManager from discovery.utils.services import ConfluentServices, ServiceData -from discovery.utils.utils import InputContext, Logger, FileUtils +from discovery.utils.utils import InputContext, Logger, FileUtils, get_listener_details logger = Logger.get_logger() @@ -95,14 +96,16 @@ def __build_runtime_properties(self, hosts: list): def _build_service_protocol_port(self, service_prop: dict) -> tuple: key = "confluent.controlcenter.rest.listeners" self.mapped_service_properties.add(key) - from urllib.parse import urlparse - listener = service_prop.get(key).split(',')[0] - parsed_uri = urlparse(listener) - return self.group, { - "control_center_http_protocol": parsed_uri.scheme, - "control_center_listener_hostname": parsed_uri.hostname, - "control_center_port": parsed_uri.port - } + if key in service_prop: + listener = service_prop.get(key).split(',')[0] + parsed_uri = get_listener_details(listener) + return self.group, { + "control_center_http_protocol": parsed_uri['scheme'], + "control_center_listener_hostname": parsed_uri['host'], + "control_center_port": parsed_uri['port'] + } + else: + return self.group, {} def _build_control_center_internal_replication_property(self, service_prop: dict) -> tuple: key1 = "confluent.controlcenter.command.topic.replication" @@ -342,6 +345,24 @@ def _build_sr_ssl_properties(self, service_props: dict) -> tuple: return 'all', {'schema_registry_ssl_enabled': True} return 'all', {} + def _build_client_properties(self, service_props: dict) -> tuple: + # Clients properties will be populated by CP-Ansible. + # We will remove the existing client properties from inventory + patterns = ['confluent.controlcenter.[connect|ksql].(\S+).cluster', + 'confluent.controlcenter.[connect|ksql].(\S+).ssl.key.password', + 'confluent.controlcenter.[connect|ksql].(\S+).ssl.keystore.location', + 'confluent.controlcenter.[connect|ksql].(\S+).ssl.keystore.password', + 'confluent.controlcenter.[connect|ksql].(\S+).ssl.truststore.location', + 'confluent.controlcenter.[connect|ksql].(\S+).ssl.truststore.password', + 'confluent.controlcenter.[connect|ksql].(\S+).advertised.url', + 'confluent.controlcenter.[connect|ksql].(\S+).url'] + + for pattern in patterns: + for key in service_props.keys(): + match = re.search(pattern, key) + if match: + self.mapped_service_properties.add(key) + class ControlCenterServicePropertyBaseBuilder60(ControlCenterServicePropertyBaseBuilder): pass diff --git a/discovery/service/kafka_broker.py b/discovery/service/kafka_broker.py index 1906fd599..736845287 100644 --- a/discovery/service/kafka_broker.py +++ b/discovery/service/kafka_broker.py @@ -4,7 +4,7 @@ from discovery.utils.constants import DEFAULT_KEY from discovery.utils.inventory import CPInventoryManager from discovery.utils.services import ConfluentServices, ServiceData -from discovery.utils.utils import InputContext, Logger, FileUtils +from discovery.utils.utils import InputContext, Logger, FileUtils, get_listener_details logger = Logger.get_logger() @@ -106,11 +106,18 @@ def __get_user_dict(self, service_prop: dict, key: str) -> dict: # parse line to get the user configs for token in jaas_config.split(): if "=" in token: - username, password = token.split('=') - if username.startswith('user_'): - principal = username.replace('user_', '') + key, value = token.split('=') + if key.startswith('username'): + principal = value.strip('"') + # Sanitize the password + if key.startswith('password'): + password = value + password = value.rstrip(';').strip('"') + users[principal] = {'principal': principal, 'password': password} + if key.startswith('user_'): + principal = key.replace('user_', '') # Sanitize the password - password = password.rstrip(';').strip('"') + password = value.rstrip(';').strip('"') users[principal] = {'principal': principal, 'password': password} return users @@ -138,8 +145,10 @@ def _build_replication_factors(self, service_properties: dict) -> tuple: def _build_inter_broker_listener_name(self, service_prop: dict) -> tuple: key = "inter.broker.listener.name" - self.mapped_service_properties.add(key) - return self.group, {"kafka_broker_inter_broker_listener_name": service_prop.get(key).lower()} + if key in service_prop: + self.mapped_service_properties.add(key) + return self.group, {"kafka_broker_inter_broker_listener_name": service_prop.get(key).lower()} + return self.group, {} def _build_http_server_listener(self, service_prop: dict) -> tuple: @@ -278,9 +287,8 @@ def _build_fips_properties(self, service_properties: dict) -> tuple: listeners = self.__get_all_listeners(service_prop=service_properties) for listener in listeners: - from urllib.parse import urlparse - parsed_uri = urlparse(listener) - name = parsed_uri.scheme + parsed_uri = get_listener_details(listener) + name = parsed_uri['scheme'] self.mapped_service_properties.add(f"listener.name.{name}.ssl.enabled.protocols") self.mapped_service_properties.add(f"listener.name.{name}.ssl.keymanager.algorithm") self.mapped_service_properties.add(f"listener.name.{name}.ssl.keystore.type") @@ -296,8 +304,7 @@ def __get_all_listeners(self, service_prop: dict) -> list: def _build_custom_listeners(self, service_prop: dict) -> tuple: custom_listeners = dict() - default_scram_users = dict() - default_scram256_users = dict() + default_scram_sha_256_users = dict() default_scram_sha_512_users = dict() default_plain_users = dict() default_gssapi_users = dict() @@ -305,13 +312,12 @@ def _build_custom_listeners(self, service_prop: dict) -> tuple: listeners = self.__get_all_listeners(service_prop) for listener in listeners: - from urllib.parse import urlparse - parsed_uri = urlparse(listener) - name = parsed_uri.scheme - port = parsed_uri.port - + parsed_uri = get_listener_details(listener) + name = parsed_uri['scheme'] + port = parsed_uri['port'] key1 = f"listener.name.{name}.sasl.enabled.mechanisms" - key2 = f"listener.name.{name}.ssl.client.auth" + key2 = f"listener.name.{name}.ssl.keystore.location" + key3 = f"listener.name.{name}.ssl.client.auth" self.mapped_service_properties.add(key1) self.mapped_service_properties.add(key2) @@ -320,28 +326,33 @@ def _build_custom_listeners(self, service_prop: dict) -> tuple: "port": port } + non_standard_protocol = {'GSSAPI': 'kerberos', 'SCRAM-SHA-512': 'scram', 'SCRAM-SHA-256': 'scram256', 'PLAIN': 'plain', 'OAUTHBEARER': 'oauth'} + ssl_enabled = service_prop.get(key2) if ssl_enabled is not None: custom_listeners[name]['ssl_enabled'] = True - - if 'ssl_mutual_auth_enabled' in self.inventory.groups.get('kafka_broker').vars and \ - self.inventory.groups.get('kafka_broker').vars.get('ssl_mutual_auth_enabled') is True: - custom_listeners[name]['ssl_mutual_auth_enabled'] = True + ssl_mutual_enabled = service_prop.get(key3) + logger.info(f"SSL mutual auth enabled for listener {name} {ssl_mutual_enabled}") + if ssl_mutual_enabled == 'required': + custom_listeners[name]['ssl_mutual_auth_enabled'] = True + else: + custom_listeners[name]['ssl_mutual_auth_enabled'] = False sasl_protocol = service_prop.get(key1) if sasl_protocol is not None: - custom_listeners[name]['sasl_protocol']: sasl_protocol + custom_listeners[name]['sasl_protocol'] = non_standard_protocol[sasl_protocol] # Add the users to corresponding sasl mechanism key = f"listener.name.{name.lower()}.{sasl_protocol.lower()}.sasl.jaas.config" _dict = locals()[f"default_{sasl_protocol.lower().replace('-', '_')}_users"] _dict.update(self.__get_user_dict(service_prop, key)) self.mapped_service_properties.add(key) + else: + custom_listeners[name]['sasl_protocol'] = 'none' return self.group, { "kafka_broker_custom_listeners": custom_listeners, - "sasl_scram_users": default_scram_users, - "sasl_scram256_users": default_scram256_users, - "sasl_scram512_users": default_scram_sha_512_users, + "sasl_scram_users": default_scram_sha_512_users, + "sasl_scram256_users": default_scram_sha_256_users, "sasl_plain_users": default_plain_users } diff --git a/discovery/service/kafka_connect.py b/discovery/service/kafka_connect.py index cd677cef8..605ca7c98 100644 --- a/discovery/service/kafka_connect.py +++ b/discovery/service/kafka_connect.py @@ -1,10 +1,11 @@ import sys +import threading from discovery.service.service import AbstractPropertyBuilder from discovery.utils.constants import DEFAULT_KEY from discovery.utils.inventory import CPInventoryManager from discovery.utils.services import ConfluentServices, ServiceData -from discovery.utils.utils import InputContext, Logger, FileUtils +from discovery.utils.utils import InputContext, Logger, FileUtils, get_listener_details logger = Logger.get_logger() @@ -37,7 +38,6 @@ def __init__(self, input_context: InputContext, inventory: CPInventoryManager): self.group = self.service.group def build_properties(self): - # Get the hosts for given service hosts = self.get_service_host(self.service, self.inventory) self.hosts = hosts @@ -46,10 +46,42 @@ def build_properties(self): return host_service_properties = self.get_property_mappings(self.input_context, self.service, hosts) - service_properties = host_service_properties.get(hosts[0]).get(DEFAULT_KEY) + + # Workers running on different hosts + if self.input_context.multi_threaded: + logger.debug(f'Running in multithreaded environment...') + threads = list() + for host in hosts: + t = threading.Thread(target=self.build_properties_threaded, args=(host, host_service_properties,)) + threads.append(t) + + for thread in threads: + thread.start() + thread.join() + else: + for host in hosts: + self.build_properties_threaded(host, host_service_properties) + + def build_properties_threaded(self, host, host_service_properties): + + # reset the mapped service properties + self.mapped_service_properties = set() + + service_properties = host_service_properties.get(host).get(DEFAULT_KEY) + + # check if group id exists + key = "group.id" + if key in service_properties: + self.group = service_properties.get(key) + self.inventory.add_group(self.group) + self.inventory.add_child(self.service.group, self.group) + + # Remove host from main group and add in child group + self.inventory.add_host(host, self.group) + self.update_inventory(self.inventory, (self.group, {'kafka_connect_group_id': self.group})) # Build service user group properties - self.__build_daemon_properties(self.input_context, self.service, hosts) + self.__build_daemon_properties(self.input_context, self.service, [host]) # Build service properties self.__build_service_properties(service_properties) @@ -58,13 +90,13 @@ def build_properties(self): self.__build_custom_properties(host_service_properties, self.mapped_service_properties) # Build Command line properties - self.__build_runtime_properties(hosts) + self.__build_runtime_properties([host]) def __build_daemon_properties(self, input_context: InputContext, service: ServiceData, hosts: list): # User group information response = self.get_service_user_group(input_context, service, hosts) - self.update_inventory(self.inventory, response) + self.update_inventory(self.inventory, (self.group, response[1])) def __build_service_properties(self, service_properties): for key, value in vars(class_name).items(): @@ -113,33 +145,65 @@ def _build_monitoring_interceptor_propperty(self, service_prop: dict) -> tuple: self.mapped_service_properties.add(key) return self.group, {"kafka_connect_monitoring_interceptors_enabled": key in service_prop} - def _build_connect_group_id(self, service_prop: dict) -> tuple: - key = "group.id" - self.mapped_service_properties.add(key) - return self.group, {"kafka_connect_group_id": service_prop.get(key)} - def _build_service_protocol_port(self, service_prop: dict) -> tuple: key = "listeners" self.mapped_service_properties.add(key) - from urllib.parse import urlparse listener = service_prop.get(key).split(',')[0] - parsed_uri = urlparse(listener) + parsed_uri = get_listener_details(listener) return self.group, { - "kafka_connect_http_protocol": parsed_uri.scheme, - "kafka_connect_rest_port": parsed_uri.port + "kafka_connect_http_protocol": parsed_uri['scheme'], + "kafka_connect_rest_port": parsed_uri['port'] } def _build_advertised_protocol_port(self, service_prop: dict) -> tuple: - key1 = "rest.advertised.listener" - self.mapped_service_properties.add(key1) - key2 = "rest.port" - self.mapped_service_properties.add(key2) + properties_dict = dict() - return self.group, { - "kafka_connect_http_protocol": service_prop.get(key1), - "kafka_connect_rest_port": int(service_prop.get(key2)) - } + key = "rest.advertised.listener" + self.mapped_service_properties.add(key) + if key in service_prop: + properties_dict['kafka_connect_http_protocol'] = service_prop.get(key) + + key = "rest.port" + self.mapped_service_properties.add(key) + if key in service_prop: + properties_dict['kafka_connect_rest_port'] = int(service_prop.get(key)) + + key = 'rest.advertised.port' + self.mapped_service_properties.add(key) + if key in service_prop: + properties_dict['kafka_connect_replicator_port'] = int(service_prop.get(key)) + + return self.group, properties_dict + + def _build_ssl_client_properties(self, service_properties: dict) -> tuple: + property_dict = dict() + key = 'ssl.truststore.location' + self.mapped_service_properties.add(key) + if key in service_properties: + property_dict['kafka_connect_truststore_path'] = service_properties.get(key) + + key = 'ssl.truststore.password' + self.mapped_service_properties.add(key) + if key in service_properties: + property_dict['kafka_connect_truststore_storepass'] = service_properties.get(key) + + key = 'ssl.keystore.location' + self.mapped_service_properties.add(key) + if key in service_properties: + property_dict['kafka_connect_keystore_path'] = service_properties.get(key) + + key = 'ssl.keystore.password' + self.mapped_service_properties.add(key) + if key in service_properties: + property_dict['kafka_connect_keystore_storepass'] = service_properties.get(key) + + key = 'ssl.key.password' + self.mapped_service_properties.add(key) + if key in service_properties: + property_dict['kafka_connect_keystore_keypass'] = service_properties.get(key) + + return self.group, property_dict def _build_ssl_properties(self, service_properties: dict) -> tuple: key = 'rest.advertised.listener' @@ -188,7 +252,7 @@ def _build_mtls_property(self, service_properties: dict) -> tuple: self.mapped_service_properties.add(key) value = service_properties.get(key) if value is not None and value == 'required': - return self.group, {'ssl_mutual_auth_enabled': True} + return self.group, {'kafka_connect_authentication_type': 'mtls'} return self.group, {} def _build_rbac_properties(self, service_prop: dict) -> tuple: diff --git a/discovery/service/kafka_rest.py b/discovery/service/kafka_rest.py index 3ed5b4d34..c1526163a 100644 --- a/discovery/service/kafka_rest.py +++ b/discovery/service/kafka_rest.py @@ -4,7 +4,7 @@ from discovery.utils.services import ConfluentServices, ServiceData from discovery.utils.constants import DEFAULT_KEY from discovery.utils.inventory import CPInventoryManager -from discovery.utils.utils import InputContext, Logger, FileUtils +from discovery.utils.utils import InputContext, Logger, FileUtils, get_listener_details logger = Logger.get_logger() @@ -95,12 +95,11 @@ def __build_runtime_properties(self, hosts: list): def _build_service_protocol_port(self, service_prop: dict) -> tuple: key = "listeners" self.mapped_service_properties.add(key) - from urllib.parse import urlparse listener = service_prop.get(key).split(',')[0] - parsed_uri = urlparse(listener) + parsed_uri = get_listener_details(listener) return self.group, { - "kafka_rest_http_protocol": parsed_uri.scheme, - "kafka_rest_port": parsed_uri.port + "kafka_rest_http_protocol": parsed_uri['scheme'], + "kafka_rest_port": parsed_uri['port'] } def _build_monitoring_interceptor_property(self, service_prop: dict) -> tuple: diff --git a/discovery/service/ksql.py b/discovery/service/ksql.py index 64506c413..11e62fd19 100644 --- a/discovery/service/ksql.py +++ b/discovery/service/ksql.py @@ -4,7 +4,7 @@ from discovery.utils.constants import DEFAULT_KEY from discovery.utils.inventory import CPInventoryManager from discovery.utils.services import ConfluentServices, ServiceData -from discovery.utils.utils import InputContext, Logger, FileUtils +from discovery.utils.utils import InputContext, Logger, FileUtils, get_listener_details logger = Logger.get_logger() @@ -101,13 +101,12 @@ def _build_service_id(self, service_prop: dict) -> tuple: def _build_service_protocol_port(self, service_prop: dict) -> tuple: key = "listeners" self.mapped_service_properties.add(key) - from urllib.parse import urlparse listener = service_prop.get(key).split(',')[0] - parsed_uri = urlparse(listener) + parsed_uri = get_listener_details(listener) return self.group, { - "ksql_http_protocol": parsed_uri.scheme, - "ksql_listener_port": parsed_uri.port + "ksql_http_protocol": parsed_uri['scheme'], + "ksql_listener_port": parsed_uri['port'] } def _build_ksql_internal_replication_property(self, service_prop: dict) -> tuple: diff --git a/discovery/service/service.py b/discovery/service/service.py index b7475fb7d..a6de85c85 100644 --- a/discovery/service/service.py +++ b/discovery/service/service.py @@ -1,11 +1,12 @@ import abc import re +import string from abc import ABC from discovery.manager.manager import ServicePropertyManager from discovery.utils.inventory import CPInventoryManager from discovery.utils.services import ConfluentServices, ServiceData -from discovery.utils.utils import InputContext, Logger +from discovery.utils.utils import InputContext, Logger, load_properties_to_dict, MultiOrderedDict logger = Logger.get_logger() @@ -62,13 +63,15 @@ def get_service_host(service: ServiceData, inventory: CPInventoryManager): @staticmethod def get_rocksdb_path(input_context: InputContext, service: ServiceData, hosts: list): - env_details = ServicePropertyManager.get_env_details(input_context, service, hosts) + env_details = AbstractPropertyBuilder.get_service_environment_variable(input_context=input_context, + service=service, hosts=hosts) return env_details.get("ROCKSDB_SHAREDLIB_DIR", "") @staticmethod def get_jvm_arguments(input_context: InputContext, service: ServiceData, hosts: list): # Build Java runtime overrides - env_details = ServicePropertyManager.get_env_details(input_context, service, hosts) + env_details = AbstractPropertyBuilder.get_service_environment_variable(input_context=input_context, + service=service, hosts=hosts) heap_ops = env_details.get('KAFKA_HEAP_OPTS', '') kafka_ops = env_details.get('KAFKA_OPTS', '') # Remove java agent configurations. These will be populated by other configs. @@ -81,7 +84,7 @@ def get_jvm_arguments(input_context: InputContext, service: ServiceData, hosts: if kafka_ops: jvm_str = f"{jvm_str} {kafka_ops}" - return jvm_str + return jvm_str.strip() @staticmethod def build_telemetry_properties(service_prop: dict) -> dict: @@ -110,14 +113,48 @@ def get_service_facts(input_context: InputContext, service: ServiceData, hosts: return response.get(hosts[0]).get("status") + @staticmethod + def _get_systemd_env_details(file_content) -> dict: + import configparser + env_dict = dict() + if not file_content: + return env_dict + + configParser = configparser.RawConfigParser(dict_type=MultiOrderedDict, strict=False) + configParser.read_string(file_content) + try: + env_details = configParser.get(section='Service', option='Environment') + for line in env_details.splitlines(): + key, value = line.split('=', maxsplit=1) + env_dict[key.strip('"')] = value.strip('"') + except Exception: + pass + return env_dict + + @staticmethod + def get_service_environment_variable(input_context: InputContext, service: ServiceData, hosts: list) -> dict: + # Large values in service environment are getting truncated. As an alternate method, we will get + # it from service and override variables + env_map = dict() + + service_facts = AbstractPropertyBuilder.get_service_facts(input_context, service, hosts) + service_file = service_facts.get('FragmentPath') + override_file = service_facts.get('DropInPaths') + + content = ServicePropertyManager.slurp_remote_file(input_context=input_context, hosts=hosts, file=service_file) + env_map.update(AbstractPropertyBuilder._get_systemd_env_details(content.get(hosts[0]))) + + content = ServicePropertyManager.slurp_remote_file(input_context=input_context, hosts=hosts, file=override_file) + env_map.update(AbstractPropertyBuilder._get_systemd_env_details(content.get(hosts[0]))) + return env_map + @staticmethod def get_service_user_group(input_context: InputContext, service: ServiceData, hosts: list) -> tuple: service_facts = AbstractPropertyBuilder.get_service_facts(input_context, service, hosts) user = service_facts.get("User", None) group = service_facts.get("Group", None) - environment = service_facts.get("Environment", None) - env_details = ServicePropertyManager.parse_environment_details(environment) + env_details = AbstractPropertyBuilder.get_service_environment_variable(input_context, service, hosts) # Useful information for future usages # service_file = service_facts.get("FragmentPath", None) @@ -143,6 +180,10 @@ def update_inventory(inventory: CPInventoryManager, data: tuple): mapped_properties = data[1] for key, value in mapped_properties.items(): + # Sanitise key value to ASCII values + if isinstance(value, str): + value = ''.join(filter(lambda x: x in string.printable, value)) + inventory.set_variable(group_name, key, value) @staticmethod @@ -192,32 +233,77 @@ def get_values_from_jaas_config(jaas_config: str) -> dict: return user_dict @staticmethod - def get_monitoring_details(input_context, service, hosts, key) -> dict: + def _get_jolokia_props(env_str: str, service: ServiceData) -> dict: monitoring_props = dict() - env_details = ServicePropertyManager.get_env_details(input_context, service, hosts) - ops_str = env_details.get(key, '') + if 'jolokia.jar' not in env_str: + return monitoring_props + + monitoring_props['jolokia_enabled'] = True + + # Parse the jolokia jar location + pattern = 'javaagent:(\S+jolokia.jar)' + match = re.search(pattern, env_str) + if match: + monitoring_props['jolokia_jar_path'] = match.group(1) + + # Parse the jolokia properties + pattern = 'javaagent:\S+jolokia.jar\S+config=(\S+)' + match = re.search(pattern, env_str) + if match: + jolokia_file_path = match.group(1) + monitoring_props[f"{service.group}_jolokia_config"] = jolokia_file_path + jolokia_props = load_properties_to_dict(jolokia_file_path) + if "port" in jolokia_props: + monitoring_props[f"{service.group}_jolokia_port"] = jolokia_props.get("port") - if 'jolokia.jar' in ops_str: - monitoring_props['jolokia_enabled'] = True - # jolokia properies will be managed by cp-ansible plays + return monitoring_props + + @staticmethod + def _get_prometheus_props(env_str: str, service: ServiceData) -> dict: + monitoring_props = dict() + if 'jmx_prometheus_javaagent.jar' not in env_str: + return monitoring_props + + monitoring_props['jmxexporter_enabled'] = True + + # Parse the jar location. Ansible supports a single path for all components + pattern = 'javaagent:(\S+prometheus\S+.jar)' + match = re.search(pattern, env_str) + if match: + monitoring_props['jmxexporter_jar_path'] = match.group(1) + + # Parse the port number + pattern = "\S+prometheus\S+.jar=([0-9]+):" + match = re.search(pattern, env_str) + if match: + monitoring_props[f'{service.group}_jmxexporter_port'] = int(match.group(1)) + + # Parse the properties file + pattern = '\S+prometheus\S+.jar=[0-9]+:(\S+)' + match = re.search(pattern, env_str) + if match: + jmx_file_path = match.group(1) + monitoring_props[f'{service.group}_jmxexporter_config_path'] = jmx_file_path + + return monitoring_props + + @staticmethod + def get_monitoring_details(input_context: InputContext, service: ServiceData, hosts: list, key: str) -> dict: + monitoring_props = dict() + env_details = AbstractPropertyBuilder.get_service_environment_variable(input_context=input_context, + service=service, hosts=hosts) + env_str = env_details.get(key, '') - if 'jmx_prometheus_javaagent.jar' in ops_str: - monitoring_props['jmxexporter_enabled'] = True - pattern = "jmx_prometheus_javaagent.jar=([0-9]+):" - match = re.search(pattern, ops_str) - if match: - monitoring_props['jmxexporter_port'] = int(match.group(1)) + monitoring_props.update(AbstractPropertyBuilder._get_jolokia_props(env_str, service)) + monitoring_props.update(AbstractPropertyBuilder._get_prometheus_props(env_str, service)) return monitoring_props @staticmethod def get_secret_protection_master_key(input_context: InputContext, service: ServiceData, hosts: list): - env_cmd = ServicePropertyManager._get_env_from_service(input_context=input_context, - service=service, - hosts=hosts) - pattern = f"CONFLUENT_SECURITY_MASTER_KEY=(\S*)" - match = re.search(pattern, env_cmd) - return match.group(1).rstrip() if match else None + env_dict = AbstractPropertyBuilder.get_service_environment_variable(input_context=input_context, + service=service, hosts=hosts) + return env_dict.get('CONFLUENT_SECURITY_MASTER_KEY', None) @staticmethod def get_audit_log_properties(input_context: InputContext, hosts: str, mds_user: str, mds_password: str) -> tuple: diff --git a/discovery/system/system.py b/discovery/system/system.py index c2e6e7317..7c242e9cf 100644 --- a/discovery/system/system.py +++ b/discovery/system/system.py @@ -36,6 +36,7 @@ def with_service_host_mappings(self): def with_ansible_variables(self): self.inventory.set_variable('all', 'ansible_user', self.input_context.ansible_user) + self.inventory.set_variable('all', 'ansible_password', self.input_context.ansible_password) self.inventory.set_variable('all', 'ansible_become', self.input_context.ansible_become) self.inventory.set_variable('all', 'ansible_connection', self.input_context.ansible_connection) self.inventory.set_variable('all', 'ansible_become_user', self.input_context.ansible_become_user) @@ -70,22 +71,25 @@ def with_archive_properties(self): confluent_services = ConfluentServices(self.input_context) # check if we have kafka broker hosts available. - host = None - if service_facts.get(confluent_services.KAFKA_BROKER().name): - host = service_facts.get(confluent_services.KAFKA_BROKER().name)[0] - else: - if service_facts.get(confluent_services.ZOOKEEPER().name): - host = service_facts.get(confluent_services.ZOOKEEPER().name)[0] + aHost = None + zk_service_name = confluent_services.ZOOKEEPER().name + bk_service_name = confluent_services.KAFKA_BROKER().name - if not host: + for host, data in service_facts.items(): + service_keys = service_facts.get(host).get('services').keys() + if zk_service_name in service_keys or bk_service_name in service_keys: + aHost = host + break + + if not aHost: logger.error(f"Cannot find any host with either Broker or Zookeeper service running.\n" f"Cannot proceed with service property mappings.") return service_details = SystemPropertyManager.get_service_details(self.input_context, confluent_services.KAFKA_BROKER(), - [host]) - exec_start = service_details.get(host).get('status', {}).get('ExecStart', '') + [aHost]) + exec_start = service_details.get(aHost).get('status', {}).get('ExecStart', '') pattern = '.*path=(.*?)[\w\-\d\.]*\/bin' match = re.search(pattern, exec_start) diff --git a/discovery/utils/inventory.py b/discovery/utils/inventory.py index f75edeb2c..0bb8dbb35 100644 --- a/discovery/utils/inventory.py +++ b/discovery/utils/inventory.py @@ -21,7 +21,9 @@ def __init__(self, input_context: InputContext = None): def generate_final_inventory(self): data = self.get_inventory_data() - InventorySanitizer.sanitize(data, self.input_context) + # There can be dynamic child groups of defined group. + # We need to revisit the sanitize logic. + # InventorySanitizer.sanitize(data, self.input_context) self.put_inventory_data(data) def get_inventory_data(self) -> dict: diff --git a/discovery/utils/utils.py b/discovery/utils/utils.py index e62c27fbc..e732cd256 100644 --- a/discovery/utils/utils.py +++ b/discovery/utils/utils.py @@ -1,13 +1,15 @@ import argparse import logging import sys -from os.path import exists +from collections import OrderedDict from os.path import dirname +from os.path import exists from os.path import realpath import yaml from jproperties import Properties + def singleton(class_): instances = {} @@ -34,6 +36,15 @@ def load_properties_to_dict(content): return props +class MultiOrderedDict(OrderedDict): + def __setitem__(self, key, value): + if isinstance(value, list) and key in self: + self[key].extend(value) + else: + super(MultiOrderedDict, self).__setitem__(key, value) + super().__setitem__(key, value) + + class Logger: """ Logging levels - https://docs.python.org/3/howto/logging.html @@ -83,8 +94,9 @@ class InputContext: ansible_connection = None ansible_become = False ansible_user = None + ansible_password = None ansible_hosts = None - ansible_become_user = None + ansible_become_user = 'root' ansible_become_method = 'sudo' ansible_ssh_private_key_file = None ansible_ssh_extra_args = None @@ -93,27 +105,39 @@ class InputContext: from_version = None verbosity = 0 service_overrides = dict() + skip_validation = False + ansible_become_password = None + ansible_common_remote_group = None + multi_threaded = True def __init__(self, ansible_hosts, ansible_connection, ansible_user, + ansible_password, ansible_become, ansible_become_user, ansible_become_method, + ansible_become_password, + ansible_common_remote_group, ansible_ssh_private_key_file, verbosity, ansible_ssh_extra_args, - ansible_python_interpreter=None, - from_version=None, - output_file=None, - service_overrides = {}): + ansible_python_interpreter, + from_version, + output_file, + service_overrides, + skip_validation, + multi_threaded): self.ansible_hosts = ansible_hosts self.ansible_connection = ansible_connection self.ansible_user = ansible_user + self.ansible_password = ansible_password self.ansible_become = ansible_become self.ansible_become_user = ansible_become_user self.ansible_become_method = ansible_become_method + self.ansible_become_password = ansible_become_password + self.ansible_common_remote_group = ansible_common_remote_group self.ansible_ssh_private_key_file = ansible_ssh_private_key_file self.from_version = from_version self.ansible_ssh_extra_args = ansible_ssh_extra_args @@ -121,9 +145,12 @@ def __init__(self, self.verbosity = verbosity self.output_file = output_file self.service_overrides = service_overrides + self.skip_validation = skip_validation + self.multi_threaded = multi_threaded + class Arguments: - input_context:InputContext = None + input_context: InputContext = None @staticmethod def parse_arguments(): @@ -135,8 +162,10 @@ def parse_arguments(): parser.add_argument("--input", type=str, required=True, help="Input Inventory file") parser.add_argument("--limit", type=str, nargs="*", help="Limit to list of hosts") parser.add_argument("--from_version", type=str, help="Target cp cluster version") - parser.add_argument("--verbosity", type=int, help="Log level") + parser.add_argument("--verbosity", type=int, default=1, help="Log level") parser.add_argument("--output_file", type=str, help="Generated output inventory file") + parser.add_argument("--skip_validation", type=bool, default=False, help="Skip validations") + parser.add_argument("--multi_threaded", type=bool, default=True, help="Use multi threaded environment") # Read arguments from command line return parser.parse_args() @@ -162,17 +191,24 @@ def get_input_context(cls, args) -> InputContext: vars = cls.get_vars(args) Arguments.input_context = InputContext(ansible_hosts=hosts, ansible_connection=vars.get("ansible_connection"), - ansible_become=vars.get("ansible_become"), + ansible_become=vars.get("ansible_become", False), ansible_become_user=vars.get("ansible_become_user"), - ansible_become_method=vars.get("ansible_become_method"), + ansible_become_method=vars.get("ansible_become_method", 'sudo'), + ansible_become_password=vars.get("ansible_become_password", None), + ansible_common_remote_group=vars.get("ansible_common_remote_group", + None), ansible_ssh_private_key_file=vars.get("ansible_ssh_private_key_file"), ansible_user=vars.get("ansible_user"), + ansible_password=vars.get("ansible_password"), ansible_ssh_extra_args=vars.get("ansible_ssh_extra_args"), - ansible_python_interpreter=vars.get("ansible_python_interpreter"), + ansible_python_interpreter=vars.get("ansible_python_interpreter", + 'auto'), output_file=vars.get("output_file"), verbosity=vars.get("verbosity", 3), from_version=vars.get("from_version"), - service_overrides = vars.get("service_overrides")) + service_overrides=vars.get("service_overrides", dict()), + skip_validation=vars.get('skip_validation'), + multi_threaded=vars.get('multi_threaded')) return Arguments.input_context @classmethod @@ -220,7 +256,7 @@ def get_hosts(cls, args): @classmethod def get_vars(cls, args) -> dict: inventory = cls.__parse_inventory_file(args) - vars = {} + vars = dict() # Check vars in the inventory file if inventory: @@ -238,11 +274,11 @@ def get_vars(cls, args) -> dict: if args.output_file: vars['output_file'] = args.output_file - # set default values of some variables explicitly - vars['ansible_python_interpreter'] = vars.get('ansible_python_interpreter', 'auto') - vars['ansible_become_method'] = vars.get('ansible_become_method', 'sudo') - vars['ansible_become'] = vars.get('ansible_become', False) - vars['service_overrides'] = vars.get('service_overrides', {}) + if args.skip_validation: + vars['skip_validation'] = bool(args.skip_validation) + + if args.skip_validation: + vars['multi_threaded'] = bool(args.multi_threaded) return vars @@ -307,7 +343,7 @@ def get_kafka_replicator_configs(name): return FileUtils.__read_service_configuration_file("kafka_replicator.yml").get(name, []) -def _host_group_declared_in_inventory(hosts:dict, input_context:InputContext) -> bool: +def _host_group_declared_in_inventory(hosts: dict, input_context: InputContext) -> bool: from discovery.utils.services import ConfluentServices from discovery.utils.constants import DEFAULT_GROUP_NAME @@ -325,3 +361,13 @@ def _host_group_declared_in_inventory(hosts:dict, input_context:InputContext) -> def terminate_script(message: str = None): logger.error(message) sys.exit(message) + +def get_listener_details(listener): + """ + Extract scheme, port and host from listener + """ + listener_details = listener.split(':') + if len(listener_details) != 3: + logger.warning("Can not get listener scheme, port and host") + + return {'scheme':listener_details[0].lower(), 'host':listener_details[1][2:], 'port': listener_details[2]} diff --git a/docs/DEVELOPMENT_GUIDE.md b/docs/DEVELOPMENT_GUIDE.md index b862970aa..86452f59e 100644 --- a/docs/DEVELOPMENT_GUIDE.md +++ b/docs/DEVELOPMENT_GUIDE.md @@ -26,7 +26,7 @@ Each Confluent component has its own role, with the name ``. Wit template: (2) src: override.conf.j2 dest: "{{ kafka_broker.systemd_override }}" (3) - mode: 0640 (4) + mode: '640' (4) owner: "{{kafka_broker_user}}" (5) group: "{{kafka_broker_group}}" notify: restart kafka (6) @@ -35,7 +35,7 @@ Each Confluent component has its own role, with the name ``. Wit 1. A name clearly defining what the task accomplishes, using capital letters 2. Uses an idempotent ansible module whenever possible 3. Make use of variables instead of hard coding paths -4. For file creation use 0640 permission, for directory creation use 0750 permission. There are some exceptions, but be sure to secure files. +4. For file creation use '640' permission, for directory creation use '750' permission. There are some exceptions, but be sure to secure files. 5. Proper ownership set 6. Trigger component restart handler when necessary @@ -168,7 +168,7 @@ Now the schema_registry_final_properties property set eventually gets written to owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" state: directory - mode: 0750 + mode: '750' with_items: "{{ kafka_broker_final_properties['log.dirs'].split(',') }}" ``` diff --git a/docs/MOLECULE_SCENARIOS.md b/docs/MOLECULE_SCENARIOS.md index fc6f00959..b5b836b7f 100644 --- a/docs/MOLECULE_SCENARIOS.md +++ b/docs/MOLECULE_SCENARIOS.md @@ -68,9 +68,9 @@ Validates that Java 17 is in Use *** -### molecule/archive-plain-rhel +### molecule/archive-plain-rhel-fips -#### Scenario archive-plain-rhel test's the following: +#### Scenario archive-plain-rhel-fips test's the following: Archive Installation of Confluent Platform on RHEL9. @@ -88,7 +88,7 @@ Custom log dirs for all components. Logredactor enabled for all components. -#### Scenario archive-plain-rhel verify test's the following: +#### Scenario archive-plain-rhel-fips verify test's the following: Validates that SASL SSL protocol is set across all components. @@ -98,6 +98,8 @@ Validates that FIPS security is enabled on the Brokers. Validates that logredactor is functioning properly for all components as per the rule file. +Validates that FIPS is in use in OpenSSL. + *** ### molecule/archive-plain-ubuntu @@ -310,9 +312,9 @@ Validates that client ID's are set correctly on Replicator. *** -### molecule/kafka-connect-replicator-plain-kerberos-rhel +### molecule/kafka-connect-replicator-plain-kerberos-rhel-fips -#### Scenario kafka-connect-replicator-plain-kerberos-rhel test's the following: +#### Scenario kafka-connect-replicator-plain-kerberos-rhel-fips test's the following: Installation of Confluent Platform on centos8 with two distinct clusters. @@ -328,7 +330,9 @@ Replicator Produces to Cluster2 using Kerberos with Custom Certs for TLS. Tests custom client IDs for Replicator. -#### Scenario kafka-connect-replicator-plain-kerberos-rhel verify test's the following: +FIPS enabled on both clusters. + +#### Scenario kafka-connect-replicator-plain-kerberos-rhel-fips verify test's the following: Validates that the Console Consumer can consume data from cluster2, proving that data has been replicated from cluster1 (MDS). @@ -338,6 +342,8 @@ Validates that Replicator is using SASL PLAIN with TLS to Consume from Cluster1 Validates that client ID's are set correctly on Replicator. +Validates that FIPS is in use in OpenSSL. + *** ### molecule/kerberos-customcerts-rhel @@ -402,9 +408,9 @@ Validates that Control Center Can connect to each KSQL cluster *** -### molecule/mtls-custombundle-rhel +### molecule/mtls-custombundle-rhel-fips -#### Scenario mtls-custombundle-rhel test's the following: +#### Scenario mtls-custombundle-rhel-fips test's the following: Installation of Confluent Platform Edition on centos7. @@ -414,7 +420,9 @@ Tests custom filtering properties for Secrets Protection. TLS is disabled for Zookeeper. -#### Scenario mtls-custombundle-rhel verify test's the following: +FIPS enabled + +#### Scenario mtls-custombundle-rhel-fips verify test's the following: Validates that Keystore is present. @@ -488,9 +496,9 @@ Validates that Java 11 is in use. *** -### molecule/mtls-java11-rhel +### molecule/mtls-java11-rhel-fips -#### Scenario mtls-java11-rhel test's the following: +#### Scenario mtls-java11-rhel-fips test's the following: Installation of Confluent Platform on RHEL9. @@ -498,12 +506,16 @@ MTLS enabled. Java 11. -#### Scenario mtls-java11-rhel verify test's the following: +FIPS enabled + +#### Scenario mtls-java11-rhel-fips verify test's the following: Validates that Java 11 is in use. Validates that FIPS security is enabled on the Brokers. +Validates that FIPS is in use in OpenSSL. + *** ### molecule/mtls-java8-ubuntu @@ -554,6 +566,8 @@ Validates mapping rules for ACLs. Validates ACL users. +Validated ACL creation. + *** ### molecule/multi-ksql-connect-rhel @@ -584,9 +598,9 @@ Validates that Control Center Can connect to each KSQL cluster. *** -### molecule/plain-customcerts-rhel +### molecule/plain-customcerts-rhel-fips -#### Scenario plain-customcerts-rhel test's the following: +#### Scenario plain-customcerts-rhel-fips test's the following: Installation of Confluent Platform on centos8. @@ -596,12 +610,16 @@ SASL Plain enabled. Custom certificates on remote host -#### Scenario plain-customcerts-rhel verify test's the following: +FIPS enabled + +#### Scenario plain-customcerts-rhel-fips verify test's the following: Validates that keystores are present on all components. Validates that SASL mechanism is set to PLAIN on all components. +Validates that FIPS is in use in OpenSSL. + *** ### molecule/plain-erp-tls-rhel @@ -868,6 +886,8 @@ Kafka Broker Customer Listener RBAC Additional System Admin. +SSO authentication using OIDC in Control center using Azure IdP + #### Scenario rbac-mds-kerberos-debian verify test's the following: Validates that GSSAPI protocol is set on Cluster2. @@ -878,6 +898,8 @@ Validates that all components on Cluster2 are pointing to the MDS on Cluster1. Validates that Java 17 is in Use +Validates OIDC authenticate api for SSO in Control Center + *** ### molecule/rbac-mds-kerberos-mtls-custom-rhel @@ -948,9 +970,9 @@ Validates that all components on Cluster2 are pointing to the MDS on Cluster1. *** -### molecule/rbac-mds-mtls-custom-rhel +### molecule/rbac-mds-mtls-custom-rhel-fips -#### Scenario rbac-mds-mtls-custom-rhel test's the following: +#### Scenario rbac-mds-mtls-custom-rhel-fips test's the following: Installs two Confluent Platform Clusters on centos8. @@ -962,13 +984,13 @@ Custom TLS certificates. MTLS enabled on both clusters. -FIPS enabled on Cluster2. +FIPS enabled on both clusters. Kafka Broker Customer Listener. RBAC Additional System Admin. -#### Scenario rbac-mds-mtls-custom-rhel verify test's the following: +#### Scenario rbac-mds-mtls-custom-rhel-fips verify test's the following: Validates that Audit logs are working on topic creation. @@ -978,6 +1000,8 @@ Validates that MDS is HTTP on Cluster1 (MDS). Validates that all components on Cluster2 are pointing to the MDS on Cluster1. +Validates that FIPS is in use on both clusters. + *** ### molecule/rbac-mds-mtls-existing-keystore-truststore-ubuntu @@ -1008,9 +1032,9 @@ Validates that TLS CN is being registered as super user. *** -### molecule/rbac-mds-plain-custom-rhel +### molecule/rbac-mds-plain-custom-rhel-fips -#### Scenario rbac-mds-plain-custom-rhel test's the following: +#### Scenario rbac-mds-plain-custom-rhel-fips test's the following: Installs two Confluent Platform Clusters on centos8. @@ -1026,7 +1050,11 @@ Kafka Broker Customer Listener. RBAC Additional System Admin. -#### Scenario rbac-mds-plain-custom-rhel verify test's the following: +SSO authentication using OIDC in Control center using KeyCloak IdP + +FIPS enabled on both clusters. + +#### Scenario rbac-mds-plain-custom-rhel-fips verify test's the following: Validates that protocol is sasl plain. @@ -1034,6 +1062,10 @@ Validates that MDS is HTTPs on Cluster1 (MDS). Validates that all components on Cluster2 are pointing to the MDS on Cluster1. +Validates OIDC authenticate api for SSO in Control Center + +Validates that FIPS is in use on both clusters. + *** ### molecule/rbac-mds-scram-custom-rhel @@ -1090,9 +1122,9 @@ Validates that TLS CN is being registered as super user. *** -### molecule/rbac-mtls-rhel +### molecule/rbac-mtls-rhel-fips -#### Scenario rbac-mtls-rhel test's the following: +#### Scenario rbac-mtls-rhel-fips test's the following: Installs Confluent Platform Cluster on centos8. @@ -1110,7 +1142,7 @@ RBAC Additional System Admin. Provided SSL Principal Mapping rule -#### Scenario rbac-mtls-rhel verify test's the following: +#### Scenario rbac-mtls-rhel-fips verify test's the following: Validates TLS version across all components. @@ -1124,6 +1156,8 @@ Validates Cluster Registry. Validates the filter resolve_principal with different ssl.mapping.rule +Validates that FIPS is in use in OpenSSL. + *** ### molecule/rbac-mtls-rhel8 @@ -1138,10 +1172,14 @@ MTLS enabled. Kafka Broker Customer Listener. +SSO authentication using OIDC in Control center using Okta IdP + #### Scenario rbac-mtls-rhel8 verify test's the following: Validates TLS keysizes across all components. +Validates OIDC authenticate api for SSO in Control Center + *** ### molecule/rbac-plain-provided-debian @@ -1178,9 +1216,9 @@ Validates LDAP authentication *** -### molecule/rbac-scram-custom-rhel +### molecule/rbac-scram-custom-rhel-fips -#### Scenario rbac-scram-custom-rhel test's the following: +#### Scenario rbac-scram-custom-rhel-fips test's the following: Installs Confluent Platform Cluster on centos8. @@ -1196,7 +1234,11 @@ Additional Scram Users added. Kafka Connect Custom arguments. -#### Scenario rbac-scram-custom-rhel verify test's the following: +SSO authentication using OIDC in Control center using Azure IdP + +FIPS enabled + +#### Scenario rbac-scram-custom-rhel-fips verify test's the following: Validates keystore is present across all components. @@ -1208,6 +1250,10 @@ Validates total number of clusters for user2. Validates truststore across all components. +Validates OIDC authenticate api for SSO in Control Center + +Validates that FIPS is in use in OpenSSL. + *** ### molecule/scram-rhel @@ -1224,6 +1270,34 @@ Validates that SCRAM is enabled on all components. *** +### molecule/zookeeper-digest-mtls-secrets-rhel + +#### Scenario zookeeper-digest-mtls-secrets-rhel test's the following: + +Installs Confluent Platform on centos8 + +Enables SASL SCRAM Auth on Zookeeper. + +TLS enabled. + +Customer zookeeper root. + +Secrets Protection enabled. + +Jolokia has TLS disabled. + +#### Scenario zookeeper-digest-mtls-secrets-rhel verify test's the following: + +Validates that Confluent CLI is installed. + +Validates that Zookeeper is using SCRAM for auth. + +Validates that other components are using SCRAM for auth. + +Validates that Secrets protection is applied to the correct properties. + +*** + ### molecule/zookeeper-digest-rhel #### Scenario zookeeper-digest-rhel test's the following: @@ -1288,9 +1362,9 @@ Validates that Secrets protection is applied to the correct properties. *** -### molecule/zookeeper-mtls-secrets-rhel +### molecule/zookeeper-tls-rhel-fips -#### Scenario zookeeper-mtls-secrets-rhel test's the following: +#### Scenario zookeeper-tls-rhel-fips test's the following: Installs Confluent Platform on centos8 @@ -1300,41 +1374,17 @@ TLS enabled. Customer zookeeper root. -Secrets Protection enabled. - Jolokia has TLS disabled. -#### Scenario zookeeper-mtls-secrets-rhel verify test's the following: - -Validates that Confluent CLI is installed. - -Validates that Zookeeper is using SCRAM for auth. - -Validates that other components are using SCRAM for auth. - -Validates that Secrets protection is applied to the correct properties. - -*** - -### molecule/zookeeper-tls-rhel - -#### Scenario zookeeper-tls-rhel test's the following: - -Installs Confluent Platform on centos8 +FIPS enabled -Enables SASL SCRAM Auth on Zookeeper. - -TLS enabled. - -Customer zookeeper root. - -Jolokia has TLS disabled. - -#### Scenario zookeeper-tls-rhel verify test's the following: +#### Scenario zookeeper-tls-rhel-fips verify test's the following: Validates that Zookeeper is using TLS. Validates that other components are using SCRAM for auth. +Validates that FIPS is in use in OpenSSL. + *** diff --git a/docs/VARIABLES.md b/docs/VARIABLES.md index dd3a41c56..26a4719ab 100644 --- a/docs/VARIABLES.md +++ b/docs/VARIABLES.md @@ -8,7 +8,7 @@ Below are the supported variables for the role variables Version of Confluent Platform to install -Default: 7.4.0 +Default: 7.5.3 *** @@ -134,7 +134,7 @@ Default: false ### fips_enabled -Boolean to have cp-ansible configure components with FIPS security settings. Must have ssl_enabled: true and use Java 8 or 11. Only valid for self signed certs and ssl_custom_certs: true, not ssl_provided_keystore_and_truststore: true. +Boolean to have cp-ansible configure components with FIPS security settings. Must have ssl_enabled: true. Only valid for self signed certs and ssl_custom_certs: true, not ssl_provided_keystore_and_truststore: true. Refer CP-Ansible docs for prerequisites. Default: false @@ -456,7 +456,7 @@ Default: "/usr/local/bin/confluent" Confluent CLI version to download (e.g. "1.9.0"). Support matrix https://docs.confluent.io/platform/current/installation/versions-interoperability.html#confluent-cli -Default: 3.2.1 +Default: 3.30.1 *** @@ -760,7 +760,7 @@ Default: "{{sasl_protocol if sasl_protocol == 'kerberos' else 'none'}}" Authentication to put on ZK Server to Server connections. Available options: [mtls, digest, digest_over_tls]. -Default: "{{ 'mtls' if zookeeper_ssl_enabled and zookeeper_ssl_mutual_auth_enabled else zookeeper_sasl_protocol }}" +Default: "{% if zookeeper_ssl_enabled and zookeeper_ssl_mutual_auth_enabled %}mtls{% elif zookeeper_sasl_protocol == 'digest' %}digest{% else %}none{% endif %}" *** @@ -902,7 +902,7 @@ Default: 3888 ### zookeeper_copy_files -Use to copy files from control node to zookeeper hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +Use to copy files from control node to zookeeper hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. Default: [] @@ -958,7 +958,7 @@ Default: "{{ssl_mutual_auth_enabled}}" ### kafka_controller_sasl_protocol -SASL Mechanism for controller Server to Server and Server to Client Authentication. Options are none, kerberos, digest. Server to server auth only working for digest-md5 +SASL Mechanism for controller Server to Server and Server to Client Authentication. Options are plain, kerberos, none Default: "{{sasl_protocol}}" @@ -1078,7 +1078,7 @@ Default: /opt/prometheus/kafka.yml ### kafka_controller_copy_files -Use to copy files from control node to kafka hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +Use to copy files from control node to kafka hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. Default: [] @@ -1118,7 +1118,7 @@ Default: {} ### kafka_controller_rest_proxy_enabled -Boolean to enable the embedded rest proxy within Kafka. NOTE- Embedded Rest Proxy must be enabled if RBAC is enabled and Confluent Server must be enabled +Boolean to enable the embedded rest proxy within Kraft Controller. Not yet supported. Default: false @@ -1302,7 +1302,7 @@ Default: /opt/prometheus/kafka.yml ### kafka_broker_copy_files -Use to copy files from control node to kafka hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +Use to copy files from control node to kafka hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. Default: [] @@ -1534,7 +1534,7 @@ Default: 8078 ### schema_registry_copy_files -Use to copy files from control node to schema registry hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +Use to copy files from control node to schema registry hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. Default: [] @@ -1718,7 +1718,7 @@ Default: 8075 ### kafka_rest_copy_files -Use to copy files from control node to schema registry hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +Use to copy files from control node to schema registry hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. Default: [] @@ -1942,7 +1942,7 @@ Default: 8077 ### kafka_connect_copy_files -Use to copy files from control node to connect hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +Use to copy files from control node to connect hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. Default: [] @@ -2166,7 +2166,7 @@ Default: 8076 ### ksql_copy_files -Use to copy files from control node to ksqlDB hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +Use to copy files from control node to ksqlDB hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. Default: [] @@ -2294,7 +2294,7 @@ Default: "{{control_center_default_log_dir}}" ### control_center_copy_files -Use to copy files from control node to Control Center hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +Use to copy files from control node to Control Center hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. Default: [] @@ -2380,6 +2380,94 @@ Default: "{{mds_ssl_enabled}}" *** +### sso_mode + +SSO mode for C3. Possible values: oidc, not supported in ccs. If enabling oidc you must set sso_groups_claim, sso_sub_claim, sso_jwks_uri, sso_authorize_uri, sso_token_uri, sso_issuer_url, sso_client_id, sso_client_password in MDS + +Default: none + +*** + +### sso_groups_claim + +Groups in JWT + +Default: groups + +*** + +### sso_sub_claim + +Sub in JWT + +Default: sub + +*** + +### sso_issuer_url + +The issuer url, which is typically the authorization server's URL. This value is used to compare to issuer claim in the JWT token for verification + +Default: none + +*** + +### sso_jwks_uri + +JSON Web Key Set (JWKS) URI + +Default: none + +*** + +### sso_authorize_uri + +Endpoint for an OAuth authorization request + +Default: none + +*** + +### sso_token_uri + +IdP token endpoint, from where a token is requested by MDS + +Default: none + +*** + +### sso_client_id + +Client id for authorize and token request to Idp + +Default: none + +*** + +### sso_client_password + +Client password for authorize and token request to Idp + +Default: none + +*** + +### sso_groups_scope + +If any additional scope is needed to include groups in the token, this config is optional based on Idp. Possible values: groups,openid,offline_access etc. + +Default: none + +*** + +### sso_refresh_token + +Configures whether offline_access scope would be requested in the authorization URI, Set this to false if offline tokens are not allowed for the user or client in IdP + +Default: true + +*** + ### mds_super_user LDAP User which will be granted super user permissions to create role bindings in the MDS @@ -2396,6 +2484,14 @@ Default: password *** +### mds_retries + +Parameter to increase the number of retries for MDS API requests + +Default: 30 + +*** + ### kafka_broker_ldap_user LDAP User for Kafkas Embedded Rest Service to authenticate as @@ -2678,7 +2774,7 @@ Default: "{{rbac_component_additional_system_admins}}" ### secrets_protection_enabled -Boolean to enable secrets protection on all components except Zookeeper. Starting from CP 7.1.0, secrets protection will work only with RBAC +Boolean to enable secrets protection on all components except Zookeeper. Default: false diff --git a/docs/hosts_example.yml b/docs/hosts_example.yml index 6d2afed90..1c204fb3b 100644 --- a/docs/hosts_example.yml +++ b/docs/hosts_example.yml @@ -23,11 +23,14 @@ all: ## For SASL/GSSAPI uncomment this line and see Kerberos Configuration properties below # sasl_protocol: kerberos - #### Zookeeper SASL Authentication #### - ## Zookeeper can have Kerberos (GSSAPI) or Digest-MD5 SASL Authentication - ## By default when sasl_protocol = kerberos, zookeeper will also use sasl kerberos. It can be configured with: - ## When a mechanism is selected, zookeeper.set.acl=true is added to kafka's server.properties. Note: property not added when using mTLS, set manually with Custom Properties - # zookeeper_sasl_protocol: + #### Zookeeper Server - Server Authentication #### + ## Note: kerberos is not a supported option for Server to Server Authentication + # zookeeper_quorum_authentication_type: + + #### Zookeeper Client - Server Authentication #### + ## When a Client Authentication method is either digest or kerberos, zookeeper.set.acl=true is added to kafka's server.properties. Note: property not added when using mTLS, set manually with Custom Properties + ## By default when sasl_protocol = kerberos, zookeeper Client to Server Authentication will also use kerberos. It can be configured with: + # zookeeper_client_authentication_type: #### Kafka Controller SASL Authentication #### ## Controller can have Kerberos, Plain or Oauth Authentication diff --git a/docs/sample_inventories/rbac_sso_c3.yml b/docs/sample_inventories/rbac_sso_c3.yml new file mode 100644 index 000000000..ad68d194c --- /dev/null +++ b/docs/sample_inventories/rbac_sso_c3.yml @@ -0,0 +1,108 @@ +--- +### RBAC SSL - SSO in Control Center +## +## The following is an example inventory file of the configuration required for setting up Confluent Platform with: +# RBAC enabled, SASL Plain protocol, Single Sign On in Confluent Control Center + +all: + vars: + ansible_connection: ssh + ansible_user: ec2-user + ansible_become: true + ansible_ssh_private_key_file: /home/ec2-user/guest.pem + + ## TLS Configuration - Custom Certificates + ssl_enabled: true + #### SASL Authentication Configuration - Choose the one that suit your requirements and configure according #### + ## By default there will be no SASL Authentication + ## For SASL/PLAIN uncomment this line: + sasl_protocol: plain + ## For SASL/SCRAM uncomment this line: + # sasl_protocol: scram + ## For SASL/GSSAPI uncomment this line and see Kerberos Configuration properties below + # sasl_protocol: kerberos + + ## RBAC Configuration + rbac_enabled: true + + ## LDAP CONFIGURATION + kafka_broker_custom_properties: + ldap.java.naming.factory.initial: com.sun.jndi.ldap.LdapCtxFactory + ldap.com.sun.jndi.ldap.read.timeout: 3000 + ldap.java.naming.provider.url: ldaps://ldap1:636 + ldap.java.naming.security.principal: uid=mds,OU=rbac,DC=example,DC=com + ldap.java.naming.security.credentials: password + ldap.java.naming.security.authentication: simple + ldap.user.search.base: OU=rbac,DC=example,DC=com + ldap.group.search.base: OU=rbac,DC=example,DC=com + ldap.user.name.attribute: uid + ldap.user.memberof.attribute.pattern: CN=(.*),OU=rbac,DC=example,DC=com + ldap.group.name.attribute: cn + ldap.group.member.attribute.pattern: CN=(.*),OU=rbac,DC=example,DC=com + ldap.user.object.class: account + + ## LDAP USERS + mds_super_user: mds + mds_super_user_password: password + kafka_broker_ldap_user: kafka_broker + kafka_broker_ldap_password: password + schema_registry_ldap_user: schema_registry + schema_registry_ldap_password: password + kafka_connect_ldap_user: connect_worker + kafka_connect_ldap_password: password + ksql_ldap_user: ksql + ksql_ldap_password: password + kafka_rest_ldap_user: rest_proxy + kafka_rest_ldap_password: password + control_center_ldap_user: control_center + control_center_ldap_password: password + + ## (OPTIONAL) SYSTEM ADMINS (SYSTEM ADMIN ROLEBINDINGS) + # A list of principals can be specified for all the componentes using 'rbac_component_additional_system_admins:' + # Or individual components [component]_additional_system_admins (i.e. schema_registry_additional_system_admins) + + ## Varibles to enable SSO in Control Center + sso_mode: oidc + # necessary configs in MDS server for sso in C3 + sso_groups_claim: groups # default + sso_sub_claim: sub # default + sso_groups_scope: groups # scope is optional, depending on the Idp + sso_issuer_url: + sso_jwks_uri: + sso_authorize_uri: + sso_token_uri: + sso_client_id: + sso_client_password: + sso_refresh_token: true # defaults to true + +zookeeper: + hosts: + demo-zk-0: + demo-zk-1: + demo-zk-2: + +kafka_broker: + hosts: + demo-broker-0: + demo-broker-1: + demo-broker-2: + +schema_registry: + hosts: + demo-sr-0: + +kafka_connect: + hosts: + demo-connect-0: + +kafka_rest: + hosts: + demo-rest-0: + +ksql: + hosts: + demo-ksql-0: + +control_center: + hosts: + demo-c3-0: diff --git a/galaxy.yml b/galaxy.yml index 3a5b44473..11d15e49f 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,7 +1,7 @@ --- namespace: confluent name: platform -version: 7.4.0 +version: 7.5.3 readme: README.md authors: - Confluent Ansible Community diff --git a/molecule/Dockerfile-rhel9-java11.j2 b/molecule/Dockerfile-rhel9-java11.j2 index c96945eee..6083c0c97 100644 --- a/molecule/Dockerfile-rhel9-java11.j2 +++ b/molecule/Dockerfile-rhel9-java11.j2 @@ -39,9 +39,6 @@ RUN yum -y install java-11-openjdk \ procps \ procps-ng -# Workaround to fix GPG signature validation issue, remove when it is fixed in CP 7.5 release -RUN update-crypto-policies --set DEFAULT:SHA1 - {% set DEFAULT_PACKAGE_VER = lookup('pipe', "awk '/confluent_package_version:/ {print $2}' $MOLECULE_PROJECT_DIRECTORY/roles/variables/defaults/main.yml" ) %} {% set PACKAGE_VER = lookup('env', 'VERSION') | default(DEFAULT_PACKAGE_VER, true) %} {% set REPO_VER = PACKAGE_VER | regex_replace('^([0-9])\\.([0-9]*).*', '\\1.\\2') %} diff --git a/molecule/Dockerfile-rhel9-java17.j2 b/molecule/Dockerfile-rhel9-java17.j2 index b9d49aa91..7454c6d2f 100644 --- a/molecule/Dockerfile-rhel9-java17.j2 +++ b/molecule/Dockerfile-rhel9-java17.j2 @@ -33,9 +33,6 @@ RUN yum -y install java-17-openjdk \ procps-ng \ tar -# Workaround to fix GPG signature validation issue, remove when it is fixed in CP 7.5 release -RUN update-crypto-policies --set DEFAULT:SHA1 - {% set DEFAULT_PACKAGE_VER = lookup('pipe', "awk '/confluent_package_version:/ {print $2}' $MOLECULE_PROJECT_DIRECTORY/roles/variables/defaults/main.yml" ) %} {% set PACKAGE_VER = lookup('env', 'VERSION') | default(DEFAULT_PACKAGE_VER, true) %} {% set REPO_VER = PACKAGE_VER | regex_replace('^([0-9])\\.([0-9]*).*', '\\1.\\2') %} diff --git a/molecule/Dockerfile-rhel9-java8.j2 b/molecule/Dockerfile-rhel9-java8.j2 new file mode 100644 index 000000000..e5d9b7d0e --- /dev/null +++ b/molecule/Dockerfile-rhel9-java8.j2 @@ -0,0 +1,49 @@ +FROM {{ item.image }} +LABEL maintainer="CP Ansible" +ENV container docker + +RUN microdnf -y --nodocs install yum +RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == \ +systemd-tmpfiles-setup.service ] || rm -f $i; done); \ +rm -f /lib/systemd/system/multi-user.target.wants/*;\ +rm -f /etc/systemd/system/*.wants/*;\ +rm -f /lib/systemd/system/local-fs.target.wants/*; \ +rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ +rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ +rm -f /lib/systemd/system/basic.target.wants/*;\ +rm -f /lib/systemd/system/anaconda.target.wants/*; + + +# Install requirements. +RUN yum -y install rpm \ + && yum -y update \ + && yum -y install sudo vim-enhanced \ + && yum clean all + +# Disable requiretty. +RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers + +VOLUME ["/sys/fs/cgroup"] +CMD ["/usr/lib/systemd/systemd"] + +RUN yum -y install java-1.8.0-openjdk \ + rsync \ + openssl \ + rsyslog \ + openldap \ + openldap-clients \ + openldap-devel \ + krb5-libs \ + krb5-workstation \ + unzip \ + procps \ + procps-ng + +# Workaround to fix GPG signature validation issue, remove when it is fixed in CP 7.5 release +RUN update-crypto-policies --set DEFAULT:SHA1 + +{% set DEFAULT_PACKAGE_VER = lookup('pipe', "awk '/confluent_package_version:/ {print $2}' $MOLECULE_PROJECT_DIRECTORY/roles/variables/defaults/main.yml" ) %} +{% set PACKAGE_VER = lookup('env', 'VERSION') | default(DEFAULT_PACKAGE_VER, true) %} +{% set REPO_VER = PACKAGE_VER | regex_replace('^([0-9])\\.([0-9]*).*', '\\1.\\2') %} +{% set COMMON_REPO_URL = lookup('env', 'COMMON_REPO_URL') | default('https://packages.confluent.io', true) %} +{% set CLIENT_REPO_URL = lookup('env', 'CLIENT_REPO_URL') | default('https://packages.confluent.io', true) %} diff --git a/molecule/archive-plain-debian/molecule.yml b/molecule/archive-plain-debian/molecule.yml index 855d5ea6e..87b610744 100644 --- a/molecule/archive-plain-debian/molecule.yml +++ b/molecule/archive-plain-debian/molecule.yml @@ -99,7 +99,7 @@ provisioner: group_vars: all: confluent_cli_download_enabled: true - confluent_cli_version: 3.2.1 + confluent_cli_version: 3.30.1 sasl_protocol: plain ssl_enabled: true installation_method: "archive" diff --git a/molecule/archive-plain-rhel/molecule.yml b/molecule/archive-plain-rhel-fips/molecule.yml similarity index 100% rename from molecule/archive-plain-rhel/molecule.yml rename to molecule/archive-plain-rhel-fips/molecule.yml diff --git a/molecule/archive-plain-rhel/rules.json b/molecule/archive-plain-rhel-fips/rules.json similarity index 100% rename from molecule/archive-plain-rhel/rules.json rename to molecule/archive-plain-rhel-fips/rules.json diff --git a/molecule/archive-plain-rhel/verify.yml b/molecule/archive-plain-rhel-fips/verify.yml similarity index 94% rename from molecule/archive-plain-rhel/verify.yml rename to molecule/archive-plain-rhel-fips/verify.yml index e929ae8e1..cd8889932 100644 --- a/molecule/archive-plain-rhel/verify.yml +++ b/molecule/archive-plain-rhel-fips/verify.yml @@ -3,6 +3,7 @@ ### Validates that custom log4j configuration is in place. ### Validates that FIPS security is enabled on the Brokers. ### Validates that logredactor is functioning properly for all components as per the rule file. +### Validates that FIPS is in use in OpenSSL. - name: Verify - kafka_controller hosts: kafka_controller @@ -61,6 +62,19 @@ property: abc expected_value: xyz + - name: Test TLS version used in certificate + shell: openssl s_client -connect {{inventory_hostname}}:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 + - name: Verify - schema_registry hosts: schema_registry gather_facts: false diff --git a/molecule/certificates.yml b/molecule/certificates.yml index cb2fb0de7..ec1c40c58 100644 --- a/molecule/certificates.yml +++ b/molecule/certificates.yml @@ -20,17 +20,42 @@ state: present when: ansible_os_family == "Debian" + - name: Get Java Path + shell: dirname $(dirname $(readlink -f $(which java))) + register: java_path + when: + - fips_enabled|default(false) | bool + - ansible_os_family == "RedHat" and ansible_distribution_major_version in ['8', '9'] + + - name: Disable JVM level FIPS + lineinfile: + path: "{{java_path.stdout}}/conf/security/java.security" + search_string: 'security.useSystemPropertiesFile=true' + line: security.useSystemPropertiesFile=false + owner: root + group: root + mode: '0644' + when: + - fips_enabled|default(false) | bool + - ansible_os_family == "RedHat" and ansible_distribution_major_version in ['8', '9'] + + - name: Configure crypto policies + shell: update-crypto-policies --set FIPS + when: + - fips_enabled|default(false) | bool + - ansible_os_family == "RedHat" and ansible_distribution_major_version in ['8', '9'] + - name: Create SSL Certificate Generation Directory file: path: /var/ssl/private/generation state: directory - mode: 0755 + mode: '755' - name: Copy in cert generation files in copy: src: "{{item}}" dest: "/var/ssl/private/generation/{{item|basename}}" - mode: 0777 + mode: '777' loop: - certs-create.sh @@ -40,7 +65,7 @@ copy: src: "{{item}}" dest: "/var/ssl/private/generation/{{item|basename}}" - mode: 0777 + mode: '777' loop: - "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/certificate-hosts" rescue: @@ -109,6 +134,20 @@ loop: "{{groups['ldap_server']}}" when: groups['ldap_server'] is defined + - name: Install bind utils + yum: + name: + - bind-utils + state: present + when: ansible_os_family == "RedHat" + + - name: Install bind utils + apt: + name: + - dnsutils + state: present + when: ansible_os_family == "Debian" + - name: Run shell: | ./certs-create.sh {{molecule_certs_format|default('PKCS12')|lower}}{% if molecule_add_extra_cert|default(False)|bool %} true{% endif %} diff --git a/molecule/certs-create.sh b/molecule/certs-create.sh index f624ec7ed..30623ae1b 100644 --- a/molecule/certs-create.sh +++ b/molecule/certs-create.sh @@ -50,10 +50,12 @@ for line in `sed '/^$/d' $filename`; do service=${split_hostnames[0]} internal=${split_hostnames[1]} fqdn=$internal.confluent - + ip_add=$(dig +short $internal) + if [ "$ip_add" = "" ]; then # skip creating Zookeeper certs in Kraft mode and vice versa + continue + fi alias=$service.$internal KEYSTORE_FILENAME=$internal.keystore.jks - CSR_FILENAME=$internal.csr CRT_SIGNED_FILENAME=$internal-ca1-signed.crt KEY_FILENAME=$internal-key.pem @@ -118,6 +120,7 @@ subjectAltName = @alt_names [alt_names] DNS.1 = $internal DNS.2 = $fqdn +IP.1 = $ip_add EOF ) diff --git a/molecule/custom-user-plaintext-rhel/verify.yml b/molecule/custom-user-plaintext-rhel/verify.yml index 7620396ec..8e6572bb4 100644 --- a/molecule/custom-user-plaintext-rhel/verify.yml +++ b/molecule/custom-user-plaintext-rhel/verify.yml @@ -79,7 +79,7 @@ recurse: true group: cp-test-group owner: "{{zookeeper_user}}" - mode: 0770 + mode: '770' - name: Restart Service systemd: diff --git a/molecule/kafka-connect-replicator-plain-kerberos-rhel/.gitignore b/molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/.gitignore similarity index 100% rename from molecule/kafka-connect-replicator-plain-kerberos-rhel/.gitignore rename to molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/.gitignore diff --git a/molecule/kafka-connect-replicator-plain-kerberos-rhel/certificate-hosts b/molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/certificate-hosts similarity index 100% rename from molecule/kafka-connect-replicator-plain-kerberos-rhel/certificate-hosts rename to molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/certificate-hosts diff --git a/molecule/kafka-connect-replicator-plain-kerberos-rhel/converge.yml b/molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/converge.yml similarity index 100% rename from molecule/kafka-connect-replicator-plain-kerberos-rhel/converge.yml rename to molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/converge.yml diff --git a/molecule/kafka-connect-replicator-plain-kerberos-rhel/molecule.yml b/molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/molecule.yml similarity index 99% rename from molecule/kafka-connect-replicator-plain-kerberos-rhel/molecule.yml rename to molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/molecule.yml index 90cc662a7..47890fa2c 100644 --- a/molecule/kafka-connect-replicator-plain-kerberos-rhel/molecule.yml +++ b/molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/molecule.yml @@ -6,6 +6,7 @@ ### Replicator consumes from Cluster1 (MDS) using SASL Plain with Custom Certs for TLS. ### Replicator Produces to Cluster2 using Kerberos with Custom Certs for TLS. ### Tests custom client IDs for Replicator. +### FIPS enabled on both clusters. driver: name: docker @@ -146,9 +147,9 @@ provisioner: inventory: group_vars: all: - scenario_name: kafka-connect-replicator-plain-kerberos-rhel + scenario_name: kafka-connect-replicator-plain-kerberos-rhel-fips kerberos_kafka_broker_primary: kafka - + fips_enabled: true kerberos: realm: realm.example.com kdc_hostname: mds-kerberos1 diff --git a/molecule/kafka-connect-replicator-plain-kerberos-rhel/prepare.yml b/molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/prepare.yml similarity index 100% rename from molecule/kafka-connect-replicator-plain-kerberos-rhel/prepare.yml rename to molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/prepare.yml diff --git a/molecule/kafka-connect-replicator-plain-kerberos-rhel/verify.yml b/molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/verify.yml similarity index 83% rename from molecule/kafka-connect-replicator-plain-kerberos-rhel/verify.yml rename to molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/verify.yml index 2dd4619a2..e953387a6 100644 --- a/molecule/kafka-connect-replicator-plain-kerberos-rhel/verify.yml +++ b/molecule/kafka-connect-replicator-plain-kerberos-rhel-fips/verify.yml @@ -3,6 +3,7 @@ ### Validates that Replicator is using Kerberos and TLS to Produce data to Cluster2. ### Validates that Replicator is using SASL PLAIN with TLS to Consume from Cluster1 (MDS). ### Validates that client ID's are set correctly on Replicator. +### Validates that FIPS is in use in OpenSSL. - name: Verify - kafka_controller hosts: kafka_controller @@ -18,6 +19,23 @@ property: controller.quorum.voters expected_value: "{{ kafka_controller_quorum_voters }}" +- name: Validate FIPS + hosts: kafka-broker1 + gather_facts: false + tasks: + - name: Test TLS version used in certificate + shell: openssl s_client -connect kafka-broker1:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 + - name: Confluent Replicator Validate Consumption and Production between clusters hosts: kafka-broker1 gather_facts: false diff --git a/molecule/ksql-scale-up/molecule.yml b/molecule/ksql-scale-up/molecule.yml index 881e8a829..c02915668 100644 --- a/molecule/ksql-scale-up/molecule.yml +++ b/molecule/ksql-scale-up/molecule.yml @@ -12,8 +12,8 @@ platforms: hostname: ${KRAFT_CONTROLLER:-zookeeper}1.confluent groups: - ${CONTROLLER_HOSTGROUP:-zookeeper} - image: geerlingguy/docker-centos7-ansible - dockerfile: ../Dockerfile-rhel7-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java8.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -24,8 +24,8 @@ platforms: hostname: kafka-broker1.confluent groups: - kafka_broker - image: geerlingguy/docker-centos7-ansible - dockerfile: ../Dockerfile-rhel7-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java8.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -36,8 +36,8 @@ platforms: hostname: kafka-broker2.confluent groups: - kafka_broker - image: geerlingguy/docker-centos7-ansible - dockerfile: ../Dockerfile-rhel7-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java8.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -48,8 +48,8 @@ platforms: hostname: kafka-broker3.confluent groups: - kafka_broker - image: geerlingguy/docker-centos7-ansible - dockerfile: ../Dockerfile-rhel7-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java8.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -60,8 +60,8 @@ platforms: hostname: kafka-connect1.confluent groups: - kafka_connect - image: geerlingguy/docker-centos7-ansible - dockerfile: ../Dockerfile-rhel7-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java8.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -72,8 +72,8 @@ platforms: hostname: ksql1.confluent groups: - ksql - image: geerlingguy/docker-centos7-ansible - dockerfile: ../Dockerfile-rhel7-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java8.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -84,8 +84,8 @@ platforms: hostname: ksql2.confluent groups: - ksql - image: geerlingguy/docker-centos7-ansible - dockerfile: ../Dockerfile-rhel7-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java8.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -94,8 +94,8 @@ platforms: - name: confluent - name: ksql3 hostname: ksql3.confluent - image: geerlingguy/docker-centos7-ansible - dockerfile: ../Dockerfile-rhel7-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java8.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -104,8 +104,8 @@ platforms: - name: confluent - name: ksql4 hostname: ksql4.confluent - image: geerlingguy/docker-centos7-ansible - dockerfile: ../Dockerfile-rhel7-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java8.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -116,8 +116,8 @@ platforms: hostname: control-center1.confluent groups: - control_center - image: geerlingguy/docker-centos7-ansible - dockerfile: ../Dockerfile-rhel7-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java8.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro diff --git a/molecule/mtls-custombundle-rhel/ca1-hosts b/molecule/mtls-custombundle-rhel-fips/ca1-hosts similarity index 100% rename from molecule/mtls-custombundle-rhel/ca1-hosts rename to molecule/mtls-custombundle-rhel-fips/ca1-hosts diff --git a/molecule/mtls-custombundle-rhel/ca2-hosts b/molecule/mtls-custombundle-rhel-fips/ca2-hosts similarity index 100% rename from molecule/mtls-custombundle-rhel/ca2-hosts rename to molecule/mtls-custombundle-rhel-fips/ca2-hosts diff --git a/molecule/mtls-custombundle-rhel/create_ca_bundle.sh b/molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh similarity index 100% rename from molecule/mtls-custombundle-rhel/create_ca_bundle.sh rename to molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh diff --git a/molecule/mtls-custombundle-rhel/molecule.yml b/molecule/mtls-custombundle-rhel-fips/molecule.yml similarity index 98% rename from molecule/mtls-custombundle-rhel/molecule.yml rename to molecule/mtls-custombundle-rhel-fips/molecule.yml index 47b49d3a2..5394beaed 100644 --- a/molecule/mtls-custombundle-rhel/molecule.yml +++ b/molecule/mtls-custombundle-rhel-fips/molecule.yml @@ -3,6 +3,7 @@ ### MTLS Enabled with custom certificates. ### Tests custom filtering properties for Secrets Protection. ### TLS is disabled for Zookeeper. +### FIPS enabled driver: name: docker @@ -123,7 +124,7 @@ provisioner: inventory: group_vars: all: - scenario_name: mtls-custombundle-rhel + scenario_name: mtls-custombundle-rhel-fips ssl_enabled: true fips_enabled: true redhat_java_package_name: java-11-openjdk diff --git a/molecule/mtls-custombundle-rhel/prepare.yml b/molecule/mtls-custombundle-rhel-fips/prepare.yml similarity index 96% rename from molecule/mtls-custombundle-rhel/prepare.yml rename to molecule/mtls-custombundle-rhel-fips/prepare.yml index 799678bd4..3b48b5b58 100644 --- a/molecule/mtls-custombundle-rhel/prepare.yml +++ b/molecule/mtls-custombundle-rhel-fips/prepare.yml @@ -24,13 +24,13 @@ file: path: /var/ssl/private/generation state: directory - mode: 0755 + mode: '755' - name: Copy in cert generation files in copy: src: "{{item}}" dest: "/var/ssl/private/generation/{{item|basename}}" - mode: 0777 + mode: '777' loop: - ca1-hosts - ca2-hosts diff --git a/molecule/mtls-custombundle-rhel/security.properties b/molecule/mtls-custombundle-rhel-fips/security.properties similarity index 100% rename from molecule/mtls-custombundle-rhel/security.properties rename to molecule/mtls-custombundle-rhel-fips/security.properties diff --git a/molecule/mtls-custombundle-rhel/verify.yml b/molecule/mtls-custombundle-rhel-fips/verify.yml similarity index 100% rename from molecule/mtls-custombundle-rhel/verify.yml rename to molecule/mtls-custombundle-rhel-fips/verify.yml diff --git a/molecule/mtls-java11-debian/molecule.yml b/molecule/mtls-java11-debian/molecule.yml index 56afb1b82..3fd1c28fd 100644 --- a/molecule/mtls-java11-debian/molecule.yml +++ b/molecule/mtls-java11-debian/molecule.yml @@ -124,5 +124,3 @@ provisioner: ssl_mutual_auth_enabled: true debian_java_package_name: openjdk-11-jdk - - fips_enabled: true diff --git a/molecule/mtls-java11-rhel/molecule.yml b/molecule/mtls-java11-rhel-fips/molecule.yml similarity index 99% rename from molecule/mtls-java11-rhel/molecule.yml rename to molecule/mtls-java11-rhel-fips/molecule.yml index 05c5ecfea..cd67fc337 100644 --- a/molecule/mtls-java11-rhel/molecule.yml +++ b/molecule/mtls-java11-rhel-fips/molecule.yml @@ -2,6 +2,7 @@ ### Installation of Confluent Platform on RHEL9. ### MTLS enabled. ### Java 11. +### FIPS enabled driver: name: docker diff --git a/molecule/mtls-java11-rhel/verify.yml b/molecule/mtls-java11-rhel-fips/verify.yml similarity index 76% rename from molecule/mtls-java11-rhel/verify.yml rename to molecule/mtls-java11-rhel-fips/verify.yml index 1a88b3bb5..f881633d5 100644 --- a/molecule/mtls-java11-rhel/verify.yml +++ b/molecule/mtls-java11-rhel-fips/verify.yml @@ -1,6 +1,7 @@ --- ### Validates that Java 11 is in use. ### Validates that FIPS security is enabled on the Brokers. +### Validates that FIPS is in use in OpenSSL. - name: Verify hosts: all @@ -49,3 +50,16 @@ file_path: /etc/kafka/server.properties property: security.providers expected_value: io.confluent.kafka.security.fips.provider.BcFipsProviderCreator,io.confluent.kafka.security.fips.provider.BcFipsJsseProviderCreator + + - name: Test TLS version used in certificate + shell: openssl s_client -connect {{inventory_hostname}}:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 diff --git a/molecule/mtls-java8-ubuntu/molecule.yml b/molecule/mtls-java8-ubuntu/molecule.yml index 42f38b808..35060166d 100644 --- a/molecule/mtls-java8-ubuntu/molecule.yml +++ b/molecule/mtls-java8-ubuntu/molecule.yml @@ -124,5 +124,3 @@ provisioner: ssl_mutual_auth_enabled: true ubuntu_java_package_name: openjdk-8-jdk - - fips_enabled: true diff --git a/molecule/mtls-ubuntu-acl/molecule.yml b/molecule/mtls-ubuntu-acl/molecule.yml index f1923d86a..e7d6ade59 100644 --- a/molecule/mtls-ubuntu-acl/molecule.yml +++ b/molecule/mtls-ubuntu-acl/molecule.yml @@ -91,4 +91,8 @@ provisioner: authorizer.class.name: "${ACL_AUTHORIZER:-kafka.security.authorizer.AclAuthorizer}" # be aware of the double $$ to escape it https://github.com/ansible-community/molecule/issues/993 ssl.principal.mapping.rules: "RULE:^.*[Cc][Nn]=([a-zA-Z0-9._-]*).*$$/$$1/L,DEFAULT" - super.users: "User:kafka_broker;User:schema_registry;User:kafka_connect;User:ksql;User:control_center" + allow.everyone.if.no.acl.found: true + + kafka_controller_custom_properties: + authorizer.class.name: org.apache.kafka.metadata.authorizer.StandardAuthorizer + allow.everyone.if.no.acl.found: true diff --git a/molecule/mtls-ubuntu-acl/verify.yml b/molecule/mtls-ubuntu-acl/verify.yml index cbd7aa06c..5b6906562 100644 --- a/molecule/mtls-ubuntu-acl/verify.yml +++ b/molecule/mtls-ubuntu-acl/verify.yml @@ -2,6 +2,7 @@ ### Validates that MTLS is enabled. ### Validates mapping rules for ACLs. ### Validates ACL users. +### Validated ACL creation. - name: Verify - kafka_controller hosts: kafka_controller @@ -54,13 +55,6 @@ file_path: /etc/kafka/server.properties property: ssl.principal.mapping.rules expected_value: RULE:^.*[Cc][Nn]=([a-zA-Z0-9._-]*).*$/$1/L,DEFAULT - - import_role: - name: confluent.test - tasks_from: check_property.yml - vars: - file_path: /etc/kafka/server.properties - property: super.users - expected_value: User:kafka_broker;User:schema_registry;User:kafka_connect;User:ksql;User:control_center - import_role: name: confluent.test tasks_from: check_property.yml @@ -78,6 +72,56 @@ expected_value: org.apache.kafka.metadata.authorizer.StandardAuthorizer when: kraft_mode|bool + - name: Create Kafka topic + shell: kafka-topics --create --topic test-topic \ + --bootstrap-server kafka-broker1:9091 --command-config /etc/kafka/client.properties \ + --replication-factor 1 --partitions 6 + run_once: true + register: output + failed_when: + - "'Topic test-topic already exists' not in output.stdout" + - "'Created topic test-topic' not in output.stdout" + + - name: Create Topic Data + shell: | + seq 10 | kafka-console-producer --topic test-topic \ + --bootstrap-server kafka-broker1:9091 --producer.config /etc/kafka/client.properties + run_once: true + + - name: Read Topic Data + shell: kafka-console-consumer --topic test-topic \ + --bootstrap-server kafka-broker1:9091 --timeout-ms 10000 \ + --from-beginning --consumer.config /etc/kafka/client.properties + run_once: true + register: consumer_output + failed_when: + - "'1\n2\n3\n4\n5\n6\n7\n8\n9\n10' not in consumer_output.stdout" + + - name: Create ACL with write only permission + shell: kafka-acls --bootstrap-server kafka-broker1:9091 --add + --topic test-topic --allow-principal User:* --operation write + --command-config /etc/kafka/client.properties + run_once: true + register: acl_output + failed_when: + - "'Adding ACLs for resource' not in acl_output.stdout" + - "'Current ACLs for resource' not in acl_output.stdout" + + - name: Create Topic Data + shell: | + seq 5 | kafka-console-producer --topic test-topic \ + --bootstrap-server kafka-broker1:9091 --producer.config /etc/kafka/client.properties + run_once: true + + - name: Read Topic Data + shell: kafka-console-consumer --topic test-topic \ + --bootstrap-server kafka-broker1:9091 --timeout-ms 10000 \ + --from-beginning --consumer.config /etc/kafka/client.properties + run_once: true + register: consumer_output + failed_when: + - consumer_output.stdout != "" # not authorized to read data + - name: Verify - schema_registry hosts: schema_registry gather_facts: false diff --git a/molecule/plain-customcerts-rhel/molecule.yml b/molecule/plain-customcerts-rhel-fips/molecule.yml similarity index 97% rename from molecule/plain-customcerts-rhel/molecule.yml rename to molecule/plain-customcerts-rhel-fips/molecule.yml index ab47c87af..8ad265ff3 100644 --- a/molecule/plain-customcerts-rhel/molecule.yml +++ b/molecule/plain-customcerts-rhel-fips/molecule.yml @@ -3,6 +3,7 @@ ### TLS enabled. ### SASL Plain enabled. ### Custom certificates on remote host +### FIPS enabled driver: name: docker @@ -123,8 +124,8 @@ provisioner: inventory: group_vars: all: - scenario_name: plain-customcerts-rhel - + scenario_name: plain-customcerts-rhel-fips + fips_enabled: true sasl_protocol: plain ssl_enabled: true diff --git a/molecule/plain-customcerts-rhel/verify.yml b/molecule/plain-customcerts-rhel-fips/verify.yml similarity index 88% rename from molecule/plain-customcerts-rhel/verify.yml rename to molecule/plain-customcerts-rhel-fips/verify.yml index 0514e1bad..510349534 100644 --- a/molecule/plain-customcerts-rhel/verify.yml +++ b/molecule/plain-customcerts-rhel-fips/verify.yml @@ -1,6 +1,7 @@ --- ### Validates that keystores are present on all components. ### Validates that SASL mechanism is set to PLAIN on all components. +### Validates that FIPS is in use in OpenSSL. - name: Verify - kafka_controller hosts: kafka_controller @@ -21,7 +22,7 @@ vars: file_path: /etc/controller/server.properties property: listener.name.controller.ssl.keystore.location - expected_value: /var/ssl/private/kafka_controller.keystore.jks + expected_value: /var/ssl/private/kafka_controller.keystore.bcfks - import_role: name: confluent.test @@ -41,7 +42,7 @@ vars: file_path: /etc/kafka/server.properties property: listener.name.internal.ssl.keystore.location - expected_value: /var/ssl/private/kafka_broker.keystore.jks + expected_value: /var/ssl/private/kafka_broker.keystore.bcfks - import_role: name: confluent.test @@ -51,6 +52,19 @@ property: sasl.enabled.mechanisms expected_value: PLAIN + - name: Test TLS version used in certificate + shell: openssl s_client -connect {{inventory_hostname}}:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 + - name: Verify - schema_registry hosts: schema_registry gather_facts: false diff --git a/molecule/plain-rhel/molecule.yml b/molecule/plain-rhel/molecule.yml index 4d0fc0387..6cc315e15 100644 --- a/molecule/plain-rhel/molecule.yml +++ b/molecule/plain-rhel/molecule.yml @@ -104,6 +104,9 @@ provisioner: zookeeper_custom_properties: dataDir: /opt/zookeeper + kafka_broker_custom_client_properties: + default.api.timeout.ms: 40000 + kafka_connect_confluent_hub_plugins: - jcustenborder/kafka-connect-spooldir:2.0.43 diff --git a/molecule/plain-rhel/verify.yml b/molecule/plain-rhel/verify.yml index 81911c58e..1a990b904 100644 --- a/molecule/plain-rhel/verify.yml +++ b/molecule/plain-rhel/verify.yml @@ -123,6 +123,13 @@ property: log4j.rootLogger expected_value: "INFO, kafkaAppender" + - import_role: + name: confluent.test + tasks_from: check_property.yml + vars: + file_path: /etc/kafka/client.properties + property: default.api.timeout.ms + expected_value: "40000" - name: Verify - schema_registry hosts: schema_registry diff --git a/molecule/plaintext-rhel-customrepo/molecule.yml b/molecule/plaintext-rhel-customrepo/molecule.yml index 43b76fdb7..9a30ca63f 100644 --- a/molecule/plaintext-rhel-customrepo/molecule.yml +++ b/molecule/plaintext-rhel-customrepo/molecule.yml @@ -154,7 +154,7 @@ provisioner: destination_path: /tmp/molecule.yml - source_path: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/molecule.yml" destination_path: /tmp/molecule2.yml - file_mode: '0666' + file_mode: '666' kafka_broker_copy_files: "{{ksql_copy_files}}" ${CONTROLLER_HOSTGROUP:-zookeeper}_copy_files: "{{ksql_copy_files}}" diff --git a/molecule/rbac-mds-kerberos-debian/molecule.yml b/molecule/rbac-mds-kerberos-debian/molecule.yml index afd3e67cb..1af9a7007 100644 --- a/molecule/rbac-mds-kerberos-debian/molecule.yml +++ b/molecule/rbac-mds-kerberos-debian/molecule.yml @@ -5,6 +5,7 @@ ### Custom TLS certificates. ### Kafka Broker Customer Listener ### RBAC Additional System Admin. +### SSO authentication using OIDC in Control center using Azure IdP driver: name: docker @@ -208,6 +209,7 @@ provisioner: sasl_protocol: kerberos rbac_enabled: true + sso_mode: oidc create_mds_certs: false token_services_public_pem_file: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/generated_ssl_files/public.pem" @@ -261,6 +263,17 @@ provisioner: custom_java_path: /opt/jdk17 # Use custom Java 17 mds: + + # necessary configs in MDS server for sso in C3 + sso_groups_claim: groups + sso_sub_claim: sub + sso_issuer_url: https://login.microsoftonline.com/0893715b-959b-4906-a185-2789e1ead045/v2.0 + sso_jwks_uri: https://login.microsoftonline.com/0893715b-959b-4906-a185-2789e1ead045/discovery/v2.0/keys + sso_authorize_uri: https://login.microsoftonline.com/0893715b-959b-4906-a185-2789e1ead045/oauth2/v2.0/authorize + sso_token_uri: https://login.microsoftonline.com/0893715b-959b-4906-a185-2789e1ead045/oauth2/v2.0/token + sso_client_id: ${AZURE_CLIENT:-user} + sso_client_password: ${AZURE_PASSWORD:-pass} + kafka_broker_custom_properties: ldap.java.naming.factory.initial: com.sun.jndi.ldap.LdapCtxFactory ldap.com.sun.jndi.ldap.read.timeout: 3000 diff --git a/molecule/rbac-mds-kerberos-debian/verify.yml b/molecule/rbac-mds-kerberos-debian/verify.yml index 574137863..b182d6ebd 100644 --- a/molecule/rbac-mds-kerberos-debian/verify.yml +++ b/molecule/rbac-mds-kerberos-debian/verify.yml @@ -3,6 +3,7 @@ ### Validates that MDS is HTTP on Cluster1 (MDS). ### Validates that all components on Cluster2 are pointing to the MDS on Cluster1. ### Validates that Java 17 is in Use +### Validates OIDC authenticate api for SSO in Control Center - name: Verify Java hosts: all:!ldap_server:!kerberos_server @@ -102,3 +103,11 @@ check_mode: false changed_when: false failed_when: linecheck.rc != 0 + + - name: Check status of Authenticate api + uri: + url: "http://control-center1:9021/api/metadata/security/1.0/oidc/authenticate?caller=http://control-center1:9021/api/metadata" + validate_certs: false + follow_redirects: none + status_code: 302 + register: sso diff --git a/molecule/rbac-mds-mtls-custom-rhel/certificate-hosts b/molecule/rbac-mds-mtls-custom-rhel-fips/certificate-hosts similarity index 100% rename from molecule/rbac-mds-mtls-custom-rhel/certificate-hosts rename to molecule/rbac-mds-mtls-custom-rhel-fips/certificate-hosts diff --git a/molecule/rbac-mds-mtls-custom-rhel/molecule.yml b/molecule/rbac-mds-mtls-custom-rhel-fips/molecule.yml similarity index 99% rename from molecule/rbac-mds-mtls-custom-rhel/molecule.yml rename to molecule/rbac-mds-mtls-custom-rhel-fips/molecule.yml index 17b5116bb..730b7fe85 100644 --- a/molecule/rbac-mds-mtls-custom-rhel/molecule.yml +++ b/molecule/rbac-mds-mtls-custom-rhel-fips/molecule.yml @@ -4,7 +4,7 @@ ### Remote MDS from Cluster2 to Cluster1 (MDS). ### Custom TLS certificates. ### MTLS enabled on both clusters. -### FIPS enabled on Cluster2. +### FIPS enabled on both clusters. ### Kafka Broker Customer Listener. ### RBAC Additional System Admin. @@ -189,7 +189,8 @@ provisioner: inventory: group_vars: all: - scenario_name: rbac-mds-mtls-custom-rhel + scenario_name: rbac-mds-mtls-custom-rhel-fips + fips_enabled: true redhat_java_package_name: java-11-openjdk ssl_enabled: true ssl_mutual_auth_enabled: true @@ -268,7 +269,6 @@ provisioner: cluster2: kafka_broker_cluster_name: audit_logs - fips_enabled: true external_mds_enabled: true kafka_broker_rest_ssl_enabled: true mds_broker_bootstrap_servers: mds-kafka-broker1:9093,mds-kafka-broker2:9093 diff --git a/molecule/rbac-mds-mtls-custom-rhel/prepare.yml b/molecule/rbac-mds-mtls-custom-rhel-fips/prepare.yml similarity index 100% rename from molecule/rbac-mds-mtls-custom-rhel/prepare.yml rename to molecule/rbac-mds-mtls-custom-rhel-fips/prepare.yml diff --git a/molecule/rbac-mds-mtls-custom-rhel/verify.yml b/molecule/rbac-mds-mtls-custom-rhel-fips/verify.yml similarity index 77% rename from molecule/rbac-mds-mtls-custom-rhel/verify.yml rename to molecule/rbac-mds-mtls-custom-rhel-fips/verify.yml index 054e4a200..bd2c4f738 100644 --- a/molecule/rbac-mds-mtls-custom-rhel/verify.yml +++ b/molecule/rbac-mds-mtls-custom-rhel-fips/verify.yml @@ -3,11 +3,42 @@ ### Validates that keystores are in place. ### Validates that MDS is HTTP on Cluster1 (MDS). ### Validates that all components on Cluster2 are pointing to the MDS on Cluster1. +### Validates that FIPS is in use on both clusters. + +- name: Validate FIPS on MDS cluster + hosts: kafka_broker + gather_facts: false + tasks: + - name: Test TLS version used in certificate + shell: openssl s_client -connect {{inventory_hostname}}:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 - name: Verify - kafka_broker hosts: kafka_broker2 gather_facts: false tasks: + - name: Test TLS version used in certificate + shell: openssl s_client -connect {{inventory_hostname}}:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 + - name: Check 2 way tls line omitted in properties shell: | grep "confluent.metadata.server.ssl.truststore.location" /etc/kafka/server.properties diff --git a/molecule/rbac-mds-plain-custom-rhel/certificate-hosts b/molecule/rbac-mds-plain-custom-rhel-fips/certificate-hosts similarity index 100% rename from molecule/rbac-mds-plain-custom-rhel/certificate-hosts rename to molecule/rbac-mds-plain-custom-rhel-fips/certificate-hosts diff --git a/molecule/rbac-mds-plain-custom-rhel/molecule.yml b/molecule/rbac-mds-plain-custom-rhel-fips/molecule.yml similarity index 82% rename from molecule/rbac-mds-plain-custom-rhel/molecule.yml rename to molecule/rbac-mds-plain-custom-rhel-fips/molecule.yml index 1a9c02431..275ec739d 100644 --- a/molecule/rbac-mds-plain-custom-rhel/molecule.yml +++ b/molecule/rbac-mds-plain-custom-rhel-fips/molecule.yml @@ -6,6 +6,8 @@ ### SASL PLAIN enabled on both clusters. ### Kafka Broker Customer Listener. ### RBAC Additional System Admin. +### SSO authentication using OIDC in Control center using KeyCloak IdP +### FIPS enabled on both clusters. driver: name: docker @@ -28,8 +30,8 @@ platforms: groups: - ${CONTROLLER_HOSTGROUP:-zookeeper} - mds - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -41,8 +43,8 @@ platforms: groups: - kafka_broker - mds - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -54,8 +56,8 @@ platforms: groups: - kafka_broker - mds - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -68,8 +70,8 @@ platforms: groups: - ${CONTROLLER_HOSTGROUP:-zookeeper}2 - cluster2 - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -81,8 +83,8 @@ platforms: groups: - kafka_broker2 - cluster2 - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -94,8 +96,8 @@ platforms: groups: - kafka_broker2 - cluster2 - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -107,8 +109,8 @@ platforms: groups: - kafka_broker2 - cluster2 - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -120,8 +122,8 @@ platforms: groups: - schema_registry2 - cluster2 - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -133,8 +135,8 @@ platforms: groups: - kafka_rest2 - cluster2 - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -146,8 +148,8 @@ platforms: groups: - kafka_connect2 - cluster2 - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -159,8 +161,8 @@ platforms: groups: - ksql2 - cluster2 - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -172,8 +174,8 @@ platforms: groups: - control_center2 - cluster2 - image: geerlingguy/docker-centos8-ansible - dockerfile: ../Dockerfile-rhel-java17.j2 + image: redhat/ubi9-minimal + dockerfile: ../Dockerfile-rhel9-java17.j2 command: "" volumes: - /sys/fs/cgroup:/sys/fs/cgroup:ro @@ -188,11 +190,11 @@ provisioner: inventory: group_vars: all: - scenario_name: rbac-mds-plain-custom-rhel + scenario_name: rbac-mds-plain-custom-rhel-fips ssl_enabled: true sasl_protocol: plain - + fips_enabled: true ssl_custom_certs: true ssl_ca_cert_filepath: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/generated_ssl_files/ca.crt" ssl_signed_cert_filepath: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/generated_ssl_files/{{inventory_hostname}}-ca1-signed.crt" @@ -200,6 +202,7 @@ provisioner: ssl_key_password: keypass rbac_enabled: true + sso_mode: oidc kafka_broker_custom_log4j: false @@ -240,6 +243,17 @@ provisioner: - User:user1 mds: + + # necessary configs in MDS server for sso in C3 + sso_groups_claim: groups + sso_sub_claim: sub + sso_issuer_url: http://ec2-35-166-19-61.us-west-2.compute.amazonaws.com:8080/auth/realms/cpsso + sso_jwks_uri: http://ec2-35-166-19-61.us-west-2.compute.amazonaws.com:8080/auth/realms/cpsso/protocol/openid-connect/certs + sso_authorize_uri: http://ec2-35-166-19-61.us-west-2.compute.amazonaws.com:8080/auth/realms/cpsso/protocol/openid-connect/auth + sso_token_uri: http://ec2-35-166-19-61.us-west-2.compute.amazonaws.com:8080/auth/realms/cpsso/protocol/openid-connect/token + sso_client_id: ${KEYCLOAK_CLIENT:-user} + sso_client_password: ${KEYCLOAK_PASSWORD:-pass} + kafka_broker_custom_properties: ldap.java.naming.factory.initial: com.sun.jndi.ldap.LdapCtxFactory ldap.com.sun.jndi.ldap.read.timeout: 3000 diff --git a/molecule/rbac-mds-plain-custom-rhel/prepare.yml b/molecule/rbac-mds-plain-custom-rhel-fips/prepare.yml similarity index 100% rename from molecule/rbac-mds-plain-custom-rhel/prepare.yml rename to molecule/rbac-mds-plain-custom-rhel-fips/prepare.yml diff --git a/molecule/rbac-mds-plain-custom-rhel/verify.yml b/molecule/rbac-mds-plain-custom-rhel-fips/verify.yml similarity index 65% rename from molecule/rbac-mds-plain-custom-rhel/verify.yml rename to molecule/rbac-mds-plain-custom-rhel-fips/verify.yml index 728c9c417..93b0d1773 100644 --- a/molecule/rbac-mds-plain-custom-rhel/verify.yml +++ b/molecule/rbac-mds-plain-custom-rhel-fips/verify.yml @@ -2,6 +2,25 @@ ### Validates that protocol is sasl plain. ### Validates that MDS is HTTPs on Cluster1 (MDS). ### Validates that all components on Cluster2 are pointing to the MDS on Cluster1. +### Validates OIDC authenticate api for SSO in Control Center +### Validates that FIPS is in use on both clusters. + +- name: Validate FIPS on MDS cluster + hosts: kafka_broker + gather_facts: false + tasks: + - name: Test TLS version used in certificate + shell: openssl s_client -connect {{inventory_hostname}}:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 - name: Verify - kafka_broker hosts: kafka_broker2 @@ -15,6 +34,19 @@ property: confluent.metadata.sasl.mechanism expected_value: PLAIN + - name: Test TLS version used in certificate + shell: openssl s_client -connect {{inventory_hostname}}:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 + - name: Verify - kafka_controller hosts: kafka_controller2 gather_facts: false @@ -87,3 +119,10 @@ file_path: /etc/confluent-control-center/control-center-production.properties property: confluent.metadata.bootstrap.server.urls expected_value: https://mds-kafka-broker1:8090,https://mds-kafka-broker2:8090 + - name: Check status of Authenticate api + uri: + url: "https://control-center1:9021/api/metadata/security/1.0/oidc/authenticate?caller=https://control-center1:9021/api/metadata" + validate_certs: false + follow_redirects: none + status_code: 302 + register: sso diff --git a/molecule/rbac-mtls-rhel/molecule.yml b/molecule/rbac-mtls-rhel-fips/molecule.yml similarity index 93% rename from molecule/rbac-mtls-rhel/molecule.yml rename to molecule/rbac-mtls-rhel-fips/molecule.yml index a5793bf0c..0a8b03b2c 100644 --- a/molecule/rbac-mtls-rhel/molecule.yml +++ b/molecule/rbac-mtls-rhel-fips/molecule.yml @@ -137,7 +137,7 @@ provisioner: inventory: group_vars: all: - scenario_name: rbac-mtls-rhel + scenario_name: rbac-mtls-rhel-fips redhat_java_package_name: java-11-openjdk ssl_enabled: true fips_enabled: true @@ -205,8 +205,12 @@ provisioner: ksql_cluster_name: Test-Ksql ldap_server: - ldaps_enabled: false - ldaps_custom_certs: false + ldaps_enabled: true + ldaps_custom_certs: true + ssl_custom_certs: true + ssl_ca_cert_filepath: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/generated_ssl_files/ca.crt" + ssl_signed_cert_filepath: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/generated_ssl_files/{{inventory_hostname}}-ca1-signed.crt" + ssl_key_filepath: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/generated_ssl_files/{{inventory_hostname}}-key.pem" ldap_admin_password: ldppassword diff --git a/molecule/rbac-mtls-rhel/prepare.yml b/molecule/rbac-mtls-rhel-fips/prepare.yml similarity index 64% rename from molecule/rbac-mtls-rhel/prepare.yml rename to molecule/rbac-mtls-rhel-fips/prepare.yml index 285187972..9084617bc 100644 --- a/molecule/rbac-mtls-rhel/prepare.yml +++ b/molecule/rbac-mtls-rhel-fips/prepare.yml @@ -1,4 +1,7 @@ --- +- name: Create Custom Certs + import_playbook: ../certificates.yml + - name: Install and configure OpenLDAP hosts: ldap_server tasks: diff --git a/molecule/rbac-mtls-rhel/verify.yml b/molecule/rbac-mtls-rhel-fips/verify.yml similarity index 92% rename from molecule/rbac-mtls-rhel/verify.yml rename to molecule/rbac-mtls-rhel-fips/verify.yml index 67c4b74b2..2d20433f6 100644 --- a/molecule/rbac-mtls-rhel/verify.yml +++ b/molecule/rbac-mtls-rhel-fips/verify.yml @@ -5,6 +5,7 @@ ### Validates Kafka Connect secrets registry. ### Validates Cluster Registry. ### Validates the filter resolve_principal with different ssl.mapping.rule +### Validates that FIPS is in use in OpenSSL. - name: Verify - Zookeeper hosts: zookeeper @@ -31,6 +32,22 @@ property: confluent.metadata.ssl.keystore.location expected_value: /var/ssl/private/kafka_controller.keystore.bcfks + - import_role: + name: confluent.test + tasks_from: check_property.yml + vars: + file_path: /etc/controller/server.properties + property: confluent.metadata.ssl.keystore.password + expected_value: ${securepass:/var/ssl/private/kafka_controller-security.properties:server.properties/confluent.metadata.ssl.keystore.password} + + - import_role: + name: confluent.test + tasks_from: check_property.yml + vars: + file_path: /etc/controller/server.properties + property: ssl.truststore.password + expected_value: ${securepass:/var/ssl/private/kafka_controller-security.properties:server.properties/ssl.truststore.password} + - import_role: name: confluent.test tasks_from: check_property.yml @@ -103,6 +120,19 @@ - 9092 - 9093 + - name: Test TLS version used in certificate + shell: openssl s_client -connect {{inventory_hostname}}:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 + - name: Verify - schema_registry hosts: schema_registry gather_facts: false diff --git a/molecule/rbac-mtls-rhel8/molecule.yml b/molecule/rbac-mtls-rhel8/molecule.yml index 23e782518..9bf88c6e5 100644 --- a/molecule/rbac-mtls-rhel8/molecule.yml +++ b/molecule/rbac-mtls-rhel8/molecule.yml @@ -3,6 +3,7 @@ ### RBAC enabled. ### MTLS enabled. ### Kafka Broker Customer Listener. +### SSO authentication using OIDC in Control center using Okta IdP driver: name: docker @@ -162,6 +163,18 @@ provisioner: rbac_enabled: true + sso_mode: oidc + # necessary configs in MDS server for sso in C3 + sso_groups_claim: groups + sso_sub_claim: sub + sso_groups_scope: groups # scope is optional, depending on the Idp + sso_issuer_url: https://dev-59009577.okta.com/oauth2/aus96p2og3u7Cpwu65d7 + sso_jwks_uri: https://dev-59009577.okta.com/oauth2/aus96p2og3u7Cpwu65d7/v1/keys + sso_authorize_uri: https://dev-59009577.okta.com/oauth2/aus96p2og3u7Cpwu65d7/v1/authorize + sso_token_uri: https://dev-59009577.okta.com/oauth2/aus96p2og3u7Cpwu65d7/v1/token + sso_client_id: ${OKTA_CLIENT:-user} + sso_client_password: ${OKTA_PASSWORD:-pass} + kafka_broker_custom_listeners: client_listener: name: CLIENT diff --git a/molecule/rbac-mtls-rhel8/verify.yml b/molecule/rbac-mtls-rhel8/verify.yml index 16ce46df8..1b6ea6bf3 100644 --- a/molecule/rbac-mtls-rhel8/verify.yml +++ b/molecule/rbac-mtls-rhel8/verify.yml @@ -1,5 +1,6 @@ --- ### Validates TLS keysizes across all components. +### Validates OIDC authenticate api for SSO in Control Center - name: Verify - zookeeper hosts: zookeeper @@ -64,3 +65,15 @@ - shell: "yum list available |grep Confluent.clients" register: client_package_grep failed_when: "client_package_grep.rc > 0" + +- name: Verify SSO Authentication + hosts: control_center + gather_facts: false + tasks: + - name: Check status of Authenticate api + uri: + url: "https://control-center1:9021/api/metadata/security/1.0/oidc/authenticate?caller=https://control-center1:9021/api/metadata" + validate_certs: false + follow_redirects: none + status_code: 302 + register: sso diff --git a/molecule/rbac-scram-custom-rhel/molecule.yml b/molecule/rbac-scram-custom-rhel-fips/molecule.yml similarity index 92% rename from molecule/rbac-scram-custom-rhel/molecule.yml rename to molecule/rbac-scram-custom-rhel-fips/molecule.yml index e6b469155..349b3f58a 100644 --- a/molecule/rbac-scram-custom-rhel/molecule.yml +++ b/molecule/rbac-scram-custom-rhel-fips/molecule.yml @@ -6,6 +6,8 @@ ### Additional System Admins added. ### Additional Scram Users added. ### Kafka Connect Custom arguments. +### SSO authentication using OIDC in Control center using Azure IdP +### FIPS enabled driver: name: docker @@ -162,8 +164,8 @@ provisioner: inventory: group_vars: all: - scenario_name: rbac-scram-custom-rhel - + scenario_name: rbac-scram-custom-rhel-fips + fips_enabled: true # Test additional scram user sasl_scram_users: client: @@ -179,6 +181,7 @@ provisioner: ssl_key_password: keypass rbac_enabled: true + sso_mode: oidc create_mds_certs: false token_services_public_pem_file: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/generated_ssl_files/public.pem" @@ -213,6 +216,16 @@ provisioner: kafka_broker_additional_system_admins: - User:user2 + # necessary configs in MDS server for sso in C3 + sso_groups_claim: groups + #sso_sub_claim: sub + sso_issuer_url: https://login.microsoftonline.com/0893715b-959b-4906-a185-2789e1ead045/v2.0 + sso_jwks_uri: https://login.microsoftonline.com/0893715b-959b-4906-a185-2789e1ead045/discovery/v2.0/keys + sso_authorize_uri: https://login.microsoftonline.com/0893715b-959b-4906-a185-2789e1ead045/oauth2/v2.0/authorize + sso_token_uri: https://login.microsoftonline.com/0893715b-959b-4906-a185-2789e1ead045/oauth2/v2.0/token + sso_client_id: ${AZURE_CLIENT:-user} + sso_client_password: ${AZURE_PASSWORD:-pass} + kafka_broker_custom_properties: ldap.java.naming.factory.initial: com.sun.jndi.ldap.LdapCtxFactory ldap.com.sun.jndi.ldap.read.timeout: 3000 diff --git a/molecule/rbac-scram-custom-rhel/prepare.yml b/molecule/rbac-scram-custom-rhel-fips/prepare.yml similarity index 100% rename from molecule/rbac-scram-custom-rhel/prepare.yml rename to molecule/rbac-scram-custom-rhel-fips/prepare.yml diff --git a/molecule/rbac-scram-custom-rhel/verify.yml b/molecule/rbac-scram-custom-rhel-fips/verify.yml similarity index 81% rename from molecule/rbac-scram-custom-rhel/verify.yml rename to molecule/rbac-scram-custom-rhel-fips/verify.yml index babf67d05..7cec3f297 100644 --- a/molecule/rbac-scram-custom-rhel/verify.yml +++ b/molecule/rbac-scram-custom-rhel-fips/verify.yml @@ -4,6 +4,8 @@ ### Validates that Confluent Balancer is enabled. ### Validates total number of clusters for user2. ### Validates truststore across all components. +### Validates OIDC authenticate api for SSO in Control Center +### Validates that FIPS is in use in OpenSSL. - name: Verify - kafka_broker hosts: kafka_broker @@ -15,7 +17,7 @@ vars: file_path: /etc/kafka/server.properties property: confluent.metadata.server.ssl.keystore.location - expected_value: /var/ssl/private/kafka_broker.keystore.jks + expected_value: /var/ssl/private/kafka_broker.keystore.bcfks - import_role: name: confluent.test @@ -75,6 +77,19 @@ fail_msg: "There should only be for clusters in {{clusters.json}}" quiet: true + - name: Test TLS version used in certificate + shell: openssl s_client -connect {{inventory_hostname}}:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 + - name: Verify - schema_registry hosts: schema_registry gather_facts: false @@ -135,3 +150,10 @@ file_path: /etc/confluent-control-center/control-center-production.properties property: confluent.controlcenter.streams.ssl.truststore.location expected_value: /var/ssl/private/control_center.truststore.jks + - name: Check status of Authenticate api + uri: + url: "https://control-center1:9021/api/metadata/security/1.0/oidc/authenticate?caller=https://control-center1:9021/api/metadata" + validate_certs: false + follow_redirects: none + status_code: 302 + register: sso diff --git a/molecule/zookeeper-mtls-secrets-rhel/molecule.yml b/molecule/zookeeper-digest-mtls-secrets-rhel/molecule.yml similarity index 97% rename from molecule/zookeeper-mtls-secrets-rhel/molecule.yml rename to molecule/zookeeper-digest-mtls-secrets-rhel/molecule.yml index 614e06e21..d00166e17 100644 --- a/molecule/zookeeper-mtls-secrets-rhel/molecule.yml +++ b/molecule/zookeeper-digest-mtls-secrets-rhel/molecule.yml @@ -145,7 +145,7 @@ provisioner: inventory: group_vars: all: - scenario_name: zookeeper-mtls-secrets-rhel + scenario_name: zookeeper-digest-mtls-secrets-rhel ssl_enabled: true zookeeper_ssl_mutual_auth_enabled: true @@ -153,6 +153,7 @@ provisioner: kafka_broker_jolokia_ssl_enabled: false zookeeper_ssl_enabled: true + zookeeper_quorum_authentication_type: digest_over_tls sasl_protocol: scram # Secret protection will not work in case RBAC is not enabled, hence disabling it for correct expectation setting #secrets_protection_enabled: true diff --git a/molecule/zookeeper-mtls-secrets-rhel/verify.yml b/molecule/zookeeper-digest-mtls-secrets-rhel/verify.yml similarity index 100% rename from molecule/zookeeper-mtls-secrets-rhel/verify.yml rename to molecule/zookeeper-digest-mtls-secrets-rhel/verify.yml diff --git a/molecule/zookeeper-digest-rhel/molecule.yml b/molecule/zookeeper-digest-rhel/molecule.yml index 21ffd2594..58b050248 100644 --- a/molecule/zookeeper-digest-rhel/molecule.yml +++ b/molecule/zookeeper-digest-rhel/molecule.yml @@ -97,7 +97,8 @@ provisioner: all: scenario_name: zookeeper-digest-rhel - zookeeper_sasl_protocol: digest + zookeeper_quorum_authentication_type: digest + zookeeper_client_authentication_type: digest sasl_protocol: scram zookeeper_chroot: "/kafka" diff --git a/molecule/zookeeper-kerberos-rhel/molecule.yml b/molecule/zookeeper-kerberos-rhel/molecule.yml index f70cc052b..dde7c48af 100644 --- a/molecule/zookeeper-kerberos-rhel/molecule.yml +++ b/molecule/zookeeper-kerberos-rhel/molecule.yml @@ -110,7 +110,8 @@ provisioner: all: scenario_name: zookeeper-kerberos-rhel - zookeeper_sasl_protocol: kerberos + zookeeper_quorum_authentication_type: none # kerberos not supported in Server - Server authentication + zookeeper_client_authentication_type: kerberos sasl_protocol: scram kerberos_kafka_broker_primary: kafka diff --git a/molecule/zookeeper-tls-rhel/molecule.yml b/molecule/zookeeper-tls-rhel-fips/molecule.yml similarity index 97% rename from molecule/zookeeper-tls-rhel/molecule.yml rename to molecule/zookeeper-tls-rhel-fips/molecule.yml index bceb4e09a..af593c4a2 100644 --- a/molecule/zookeeper-tls-rhel/molecule.yml +++ b/molecule/zookeeper-tls-rhel-fips/molecule.yml @@ -4,6 +4,7 @@ ### TLS enabled. ### Customer zookeeper root. ### Jolokia has TLS disabled. +### FIPS enabled driver: name: docker @@ -148,10 +149,10 @@ provisioner: inventory: group_vars: all: - scenario_name: zookeeper-tls-rhel + scenario_name: zookeeper-tls-rhel-fips ssl_enabled: true - + fips_enabled: true kafka_broker_jolokia_ssl_enabled: false zookeeper_ssl_enabled: true diff --git a/molecule/zookeeper-tls-rhel/verify.yml b/molecule/zookeeper-tls-rhel-fips/verify.yml similarity index 72% rename from molecule/zookeeper-tls-rhel/verify.yml rename to molecule/zookeeper-tls-rhel-fips/verify.yml index 402431d0b..f0dc9fa78 100644 --- a/molecule/zookeeper-tls-rhel/verify.yml +++ b/molecule/zookeeper-tls-rhel-fips/verify.yml @@ -1,6 +1,7 @@ --- ### Validates that Zookeeper is using TLS. ### Validates that other components are using SCRAM for auth. +### Validates that FIPS is in use in OpenSSL. - name: Verify - zookeeper hosts: zookeeper @@ -34,6 +35,19 @@ property: zookeeper.connect expected_value: zookeeper1:2182,zookeeper2:2182,zookeeper3:2182/kafka + - name: Test TLS version used in certificate + shell: openssl s_client -connect {{inventory_hostname}}:9091 /dev/null | grep 'Protocol :' | sed 's/^.*:\ //' + register: tls_version + + - fail: + msg: "TLS version is {{tls_version}}, it should be >=1.2" + when: tls_version is version('TLSv1.2', '<') + + - name: Check FIPS in OpenSSL + shell: openssl md5 <<< "123" + register: openssl_out + failed_when: openssl_out.rc == 0 + - name: Verify - schema_registry hosts: schema_registry gather_facts: false diff --git a/playbooks/tasks/certificate_authority.yml b/playbooks/tasks/certificate_authority.yml index da9763bbd..c8363da8c 100644 --- a/playbooks/tasks/certificate_authority.yml +++ b/playbooks/tasks/certificate_authority.yml @@ -92,7 +92,7 @@ file: path: "{{ ssl_file_dir_final }}/generation" state: directory - mode: 0755 + mode: '755' - name: Create Certificate Authority tags: certificate_authority diff --git a/plugins/filter/filters.py b/plugins/filter/filters.py index dee74760b..db65aa705 100644 --- a/plugins/filter/filters.py +++ b/plugins/filter/filters.py @@ -178,7 +178,7 @@ def listener_properties(self, listeners_dict, default_ssl_enabled, final_dict['listener.name.' + listener_name + '.ssl.trustmanager.algorithm'] = 'PKIX' final_dict['listener.name.' + listener_name + '.ssl.keystore.type'] = 'BCFKS' final_dict['listener.name.' + listener_name + '.ssl.truststore.type'] = 'BCFKS' - final_dict['listener.name.' + listener_name + '.ssl.enabled.protocols'] = 'TLSv1.2' + final_dict['listener.name.' + listener_name + '.ssl.enabled.protocols'] = 'TLSv1.2,TLSv1.3' if listeners_dict[listener].get('ssl_mutual_auth_enabled', default_ssl_mutual_auth_enabled): final_dict['listener.name.' + listener_name + '.ssl.client.auth'] = 'required' @@ -214,6 +214,8 @@ def listener_properties(self, listeners_dict, default_ssl_enabled, 'io.confluent.kafka.server.plugins.auth.token.TokenBearerServerLoginCallbackHandler' final_dict['listener.name.' + listener_name + '.oauthbearer.sasl.jaas.config'] =\ 'org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required publicKeyPath=\"' + oauth_pem_path + '\";' + final_dict['listener.name.' + listener_name + '.principal.builder.class'] =\ + 'io.confluent.kafka.security.authenticator.OAuthKafkaPrincipalBuilder' return final_dict diff --git a/roles/common/tasks/config_validations.yml b/roles/common/tasks/config_validations.yml index 4aa6feeac..1f6c67301 100644 --- a/roles/common/tasks/config_validations.yml +++ b/roles/common/tasks/config_validations.yml @@ -1,7 +1,7 @@ --- - name: Retrieve SSL public key hash from private key on Local Host shell: - cmd: openssl pkey -pubout | openssl md5 + cmd: openssl pkey -pubout | openssl sha256 stdin: "{{ lookup('file', ssl_key_filepath) }}" register: key_hash_local delegate_to: localhost @@ -28,7 +28,7 @@ - name: Retrieve SSL public key Hash from private key on Remote Host shell: - cmd: openssl pkey -pubout | openssl md5 + cmd: openssl pkey -pubout | openssl sha256 stdin: "{{ remote_key['results'][group_idx].content | b64decode }}" register: key_hash_remote changed_when: false @@ -44,7 +44,7 @@ - name: Retrieve SSL public key hash from X509 certificate on Local Host shell: - cmd: openssl x509 -noout -pubkey | openssl md5 + cmd: openssl x509 -noout -pubkey | openssl sha256 stdin: "{{ lookup('file', ssl_signed_cert_filepath) }}" register: cert_hash_local delegate_to: localhost @@ -71,7 +71,7 @@ - name: Retrieve SSL public key hash from X509 certificate on Remote Host shell: - cmd: openssl x509 -noout -pubkey | openssl md5 + cmd: openssl x509 -noout -pubkey | openssl sha256 stdin: "{{ remote_cert['results'][group_idx].content | b64decode }}" register: cert_hash_remote changed_when: false @@ -103,7 +103,7 @@ assert: that: key_hash['results'][group_idx].stdout == cert_hash['results'][group_idx].stdout and not key_hash['results'][group_idx].stderr|length > 0 fail_msg: >- - "The MD5 value of the custom ssl key does not match the MD5 value of the custom certificate, indicating that the keys do no match + "The sha256 value of the custom ssl key does not match the sha256 value of the custom certificate, indicating that the keys do no match and are incompatible. Please review your keys and certs and confirm they are from the same source." when: - ssl_custom_certs|bool @@ -114,3 +114,47 @@ loop: "{{group_names}}" loop_control: index_var: group_idx + +- name: Check the OS when using FIPS mode + fail: + msg: "FIPS mode is only supported on RedHat based OS" + when: + - fips_enabled | bool + - ansible_os_family != "RedHat" + tags: validate + +- name: Check if FIPS is enabled on Local Host + shell: sysctl crypto.fips_enabled + delegate_to: localhost + register: fips_output_localhost + when: + - fips_enabled | bool + - ansible_os_family == "RedHat" + tags: validate + +- assert: + that: + - fips_output_localhost.stdout == "crypto.fips_enabled = 1" + fail_msg: "FIPS is not enabled on your localhost, please enable fips on your Local Host." + delegate_to: localhost + when: + - fips_enabled | bool + - ansible_os_family == "RedHat" + tags: validate + +- name: Check if FIPS is enabled on Remote Host + shell: sysctl crypto.fips_enabled + register: fips_output_remotehost + when: + - fips_enabled | bool + - ansible_os_family == "RedHat" + tags: validate + +- assert: + that: + - fips_output_remotehost.stdout == "crypto.fips_enabled = 1" + fail_msg: "FIPS is not enabled on your remote host, please enable fips on your Remote Host." + when: + - fips_enabled | bool + - ansible_os_family == "RedHat" + tags: validate diff --git a/roles/common/tasks/confluent_cli.yml b/roles/common/tasks/confluent_cli.yml index 6abdde5b0..3f4e031b9 100644 --- a/roles/common/tasks/confluent_cli.yml +++ b/roles/common/tasks/confluent_cli.yml @@ -19,7 +19,7 @@ file: path: "{{confluent_cli_base_path}}/{{confluent_cli_dir}}" state: directory - mode: '0755' + mode: '755' recurse: true - name: Expand remote Confluent CLI archive @@ -29,7 +29,7 @@ dest: "{{confluent_cli_base_path}}/{{confluent_cli_dir}}" group: "{{ omit if archive_group == '' else archive_group }}" owner: "{{ omit if archive_owner == '' else archive_owner }}" - mode: 0755 + mode: '755' extra_opts: [--strip-components=1] creates: "{{confluent_cli_base_path}}/{{confluent_cli_dir}}/{{confluent_cli_binary}}" when: confluent_cli_custom_download_url is not defined @@ -38,7 +38,7 @@ get_url: url: "{{ confluent_cli_custom_download_url }}" dest: "{{confluent_cli_base_path}}/{{confluent_cli_dir}}/{{confluent_cli_binary}}" - mode: 0755 + mode: '755' register: cli_download_result until: cli_download_result is success retries: 5 diff --git a/roles/common/tasks/copy_files.yml b/roles/common/tasks/copy_files.yml index d4cae9163..cf917f56b 100644 --- a/roles/common/tasks/copy_files.yml +++ b/roles/common/tasks/copy_files.yml @@ -3,7 +3,7 @@ file: path: "{{ item.destination_path | dirname }}" state: directory - mode: "{{ item.directory_mode | default('0750') }}" + mode: "{{ item.directory_mode | default('750') }}" owner: "{{user}}" group: "{{group}}" loop: "{{ copy_files }}" @@ -12,7 +12,7 @@ copy: src: "{{ item.source_path }}" dest: "{{ item.destination_path }}" - mode: "{{ item.file_mode | default('0640') }}" + mode: "{{ item.file_mode | default('640') }}" owner: "{{user}}" group: "{{group}}" loop: "{{ copy_files }}" diff --git a/roles/common/tasks/debian.yml b/roles/common/tasks/debian.yml index 3085b5015..0078618d0 100644 --- a/roles/common/tasks/debian.yml +++ b/roles/common/tasks/debian.yml @@ -112,7 +112,7 @@ copy: content: 'Acquire::Check-Valid-Until "0";' dest: /etc/apt/apt.conf.d/skip-check - mode: 0644 + mode: '644' notify: - debian apt-get update when: diff --git a/roles/common/tasks/fetch_logs.yml b/roles/common/tasks/fetch_logs.yml index 67a8b4727..52f1704bb 100644 --- a/roles/common/tasks/fetch_logs.yml +++ b/roles/common/tasks/fetch_logs.yml @@ -3,7 +3,7 @@ file: state: directory path: "troubleshooting" - mode: 0755 + mode: '755' delegate_to: localhost vars: ansible_connection: local @@ -24,7 +24,7 @@ file: path: "{{fetch_logs_path}}/troubleshooting/{{inventory_hostname}}/" state: directory - mode: 0750 + mode: '750' owner: "{{user}}" group: "{{group}}" @@ -33,7 +33,7 @@ dest: "{{fetch_logs_path}}/troubleshooting/{{inventory_hostname}}/" src: "{{ item }}" remote_src: true - mode: 0750 + mode: '750' owner: "{{user}}" group: "{{group}}" loop: "{{ find_output.files | map(attribute='path') | list + [config_file] }}" @@ -45,7 +45,7 @@ remove: true format: gz force_archive: true - mode: 0750 + mode: '750' owner: "{{user}}" group: "{{group}}" diff --git a/roles/common/tasks/fips-redhat.yml b/roles/common/tasks/fips-redhat.yml new file mode 100644 index 000000000..ec8ce47fe --- /dev/null +++ b/roles/common/tasks/fips-redhat.yml @@ -0,0 +1,17 @@ +--- +- name: Get Java Path + shell: dirname $(dirname $(readlink -f $(which java))) + register: java_path + +- name: Disable JVM level FIPS + lineinfile: + path: "{{java_path.stdout}}/conf/security/java.security" + search_string: 'security.useSystemPropertiesFile=true' + line: security.useSystemPropertiesFile=false + owner: root + group: root + mode: '644' + +- name: Configure crypto policies + shell: update-crypto-policies --set FIPS + become: true diff --git a/roles/common/tasks/main.yml b/roles/common/tasks/main.yml index 1727110dc..af6a8476f 100644 --- a/roles/common/tasks/main.yml +++ b/roles/common/tasks/main.yml @@ -70,7 +70,7 @@ group: "{{ omit if archive_group == '' else archive_group }}" owner: "{{ omit if archive_owner == '' else archive_owner }}" state: directory - mode: 0755 + mode: '755' when: installation_method == "archive" # If the target directory (i.e. creates) doesn't exist then download and expand the remote archive into target @@ -81,7 +81,7 @@ dest: "{{archive_destination_path}}" group: "{{ omit if archive_group == '' else archive_group }}" owner: "{{ omit if archive_owner == '' else archive_owner }}" - mode: 0755 + mode: '755' creates: "{{binary_base_path}}" when: installation_method == "archive" @@ -89,14 +89,14 @@ file: path: "{{ jolokia_jar_path | dirname }}" state: directory - mode: 0755 + mode: '755' when: jolokia_enabled|bool - name: Copy Jolokia Jar copy: src: "{{ jolokia_jar_url }}" dest: "{{ jolokia_jar_path }}" - mode: 0755 + mode: '755' when: - jolokia_enabled|bool - not jolokia_url_remote|bool @@ -106,7 +106,7 @@ url: "{{ jolokia_jar_url }}" dest: "{{ jolokia_jar_path }}" force: "{{ jolokia_jar_url_force }}" - mode: 0755 + mode: '755' register: jolokia_download_result until: jolokia_download_result is success retries: 5 @@ -120,14 +120,14 @@ file: path: "{{ jmxexporter_jar_path | dirname }}" state: directory - mode: 0755 + mode: '755' when: jmxexporter_enabled|bool - name: Copy Prometheus Jar copy: src: "{{ jmxexporter_jar_url }}" dest: "{{ jmxexporter_jar_path }}" - mode: 0755 + mode: '755' when: - jmxexporter_enabled|bool - not jmxexporter_url_remote|bool @@ -137,7 +137,7 @@ url: "{{ jmxexporter_jar_url }}" dest: "{{ jmxexporter_jar_path }}" force: "{{ jmxexporter_jar_url_force }}" - mode: 0755 + mode: '755' register: prometheus_download_result until: prometheus_download_result is success retries: 5 diff --git a/roles/common/tasks/masterkey.yml b/roles/common/tasks/masterkey.yml index 2670fcd27..e67db6bd1 100644 --- a/roles/common/tasks/masterkey.yml +++ b/roles/common/tasks/masterkey.yml @@ -20,7 +20,7 @@ copy: content: "{{ masterkey.stdout }}" dest: /tmp/masterkey - mode: 0640 + mode: '640' diff: "{{ not mask_sensitive_diff|bool }}" - name: Copy Security File Back to Ansible Host diff --git a/roles/common/tasks/rbac_setup.yml b/roles/common/tasks/rbac_setup.yml index f0a552319..318f46c43 100644 --- a/roles/common/tasks/rbac_setup.yml +++ b/roles/common/tasks/rbac_setup.yml @@ -12,7 +12,7 @@ force_basic_auth: true register: cluster_id_query until: cluster_id_query.status == 200 - retries: 20 + retries: "{{ mds_retries }}" delay: 10 when: cluster_id_source | default('erp') == 'erp' @@ -43,7 +43,7 @@ file: path: "{{ ssl_file_dir_final }}" state: directory - mode: 0755 + mode: '755' tags: - privileged - filesystem @@ -66,7 +66,7 @@ copy: src: "{{token_services_public_pem_file}}" dest: "{{rbac_enabled_public_pem_path}}" - mode: 0640 + mode: '640' owner: "{{user}}" group: "{{group}}" when: diff --git a/roles/common/tasks/redhat.yml b/roles/common/tasks/redhat.yml index fd8aa9b53..2b5f7af17 100644 --- a/roles/common/tasks/redhat.yml +++ b/roles/common/tasks/redhat.yml @@ -11,7 +11,7 @@ template: src: confluent.repo.j2 dest: /etc/yum.repos.d/confluent.repo - mode: 0644 + mode: '644' register: confluent_repo_result until: confluent_repo_result is success retries: 5 @@ -31,7 +31,7 @@ copy: src: "{{custom_yum_repofile_filepath}}" dest: /etc/yum.repos.d/custom-confluent.repo - mode: 0644 + mode: '644' register: custom_repo_result until: custom_repo_result is success retries: 5 @@ -98,3 +98,9 @@ ansible.builtin.pip: name: "{{pip_packages}}" tags: package + +- name: Enable FIPS on rhel 8+ + include_tasks: fips-redhat.yml + when: + - ansible_os_family == "RedHat" and ansible_distribution_major_version in ['8', '9'] + - fips_enabled|bool diff --git a/roles/common/tasks/secrets_protection.yml b/roles/common/tasks/secrets_protection.yml index 95710746d..e27b3905a 100644 --- a/roles/common/tasks/secrets_protection.yml +++ b/roles/common/tasks/secrets_protection.yml @@ -32,7 +32,7 @@ src: "{{ config_path }}" dest: "{{ config_path }}-backup" remote_src: true - mode: 0640 + mode: '640' owner: "{{ secrets_file_owner }}" group: "{{ secrets_file_group }}" when: config_stat.stat.exists @@ -41,7 +41,7 @@ template: src: properties.j2 dest: "{{ config_path }}" - mode: 0640 + mode: '640' owner: "{{ secrets_file_owner }}" group: "{{ secrets_file_group }}" vars: @@ -53,7 +53,7 @@ file: path: "{{ ssl_file_dir_final }}" state: directory - mode: 0755 + mode: '755' tags: - filesystem - privileged @@ -64,7 +64,7 @@ dest: "{{ secrets_file }}" owner: "{{ secrets_file_owner }}" group: "{{ secrets_file_group }}" - mode: 0640 + mode: '640' diff: "{{ not mask_sensitive_diff|bool }}" - name: Load masterkey @@ -116,7 +116,7 @@ src: "{{ config_path }}" dest: "{{ config_path }}-backup" remote_src: true - mode: 0640 + mode: '640' owner: "{{ secrets_file_owner }}" group: "{{ secrets_file_group }}" notify: "{{handler}}" diff --git a/roles/common/tasks/ubuntu.yml b/roles/common/tasks/ubuntu.yml index 5035b9dd4..d994f8b33 100644 --- a/roles/common/tasks/ubuntu.yml +++ b/roles/common/tasks/ubuntu.yml @@ -104,7 +104,7 @@ file: path: /usr/share/man/man1 state: directory - mode: 0755 + mode: '755' - name: Custom Java Install include_tasks: custom_java_install.yml diff --git a/roles/control_center/tasks/main.yml b/roles/control_center/tasks/main.yml index 597ce5ba5..d3e37d283 100644 --- a/roles/control_center/tasks/main.yml +++ b/roles/control_center/tasks/main.yml @@ -88,7 +88,7 @@ owner: "{{control_center_user}}" group: "{{control_center_group}}" state: directory - mode: 0750 + mode: '750' tags: - filesystem @@ -108,7 +108,7 @@ src: "{{binary_base_path}}/lib/systemd/system/{{control_center.systemd_file|basename}}" remote_src: true dest: "{{control_center.systemd_file}}" - mode: 0644 + mode: '644' force: true when: installation_method == "archive" tags: @@ -171,7 +171,7 @@ file: path: "{{ control_center.config_file | dirname }}" state: directory - mode: 0750 + mode: '750' owner: "{{control_center_user}}" group: "{{control_center_group}}" tags: @@ -182,7 +182,7 @@ template: src: control-center.properties.j2 dest: "{{control_center.config_file}}" - mode: 0640 + mode: '640' owner: "{{control_center_user}}" group: "{{control_center_group}}" notify: restart control center @@ -216,7 +216,7 @@ state: directory group: "{{control_center_group}}" owner: "{{control_center_user}}" - mode: 0770 + mode: '770' tags: - filesystem - log @@ -240,7 +240,7 @@ path: "{{control_center.log4j_file}}" group: "{{control_center_group}}" owner: "{{control_center_user}}" - mode: 0640 + mode: '640' tags: - filesystem - log @@ -249,7 +249,7 @@ file: path: "{{ logredactor_rule_path | dirname }}" state: directory - mode: 0755 + mode: '755' when: (control_center_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -258,7 +258,7 @@ copy: src: "{{ logredactor_rule_path_local }}" dest: "{{ logredactor_rule_path }}" - mode: 0644 + mode: '644' when: (control_center_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -289,7 +289,7 @@ path: "{{control_center_rocksdb_path}}" group: "{{control_center_group}}" owner: "{{control_center_user}}" - mode: 0750 + mode: '750' state: directory when: control_center_rocksdb_path != "" tags: @@ -309,7 +309,7 @@ template: src: jaas.conf.j2 dest: "{{control_center.jaas_file}}" - mode: 0640 + mode: '640' owner: "{{control_center_user}}" group: "{{control_center_group}}" notify: restart control center @@ -319,7 +319,7 @@ template: src: password.properties.j2 dest: "{{control_center.password_file}}" - mode: 0640 + mode: '640' owner: "{{control_center_user}}" group: "{{control_center_group}}" notify: restart control center @@ -331,7 +331,7 @@ state: directory owner: "{{control_center_user}}" group: "{{control_center_group}}" - mode: 0640 + mode: '640' tags: - systemd - privileged @@ -340,7 +340,7 @@ template: src: override.conf.j2 dest: "{{control_center.systemd_override}}" - mode: 0640 + mode: '640' owner: root group: root notify: restart control center diff --git a/roles/control_center/tasks/rbac.yml b/roles/control_center/tasks/rbac.yml index 5d9a664a5..6fc4231f9 100644 --- a/roles/control_center/tasks/rbac.yml +++ b/roles/control_center/tasks/rbac.yml @@ -27,7 +27,7 @@ status_code: 204 register: c3_mds_result until: c3_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 loop: "{{control_center_additional_system_admins}}" when: not ansible_check_mode @@ -52,6 +52,6 @@ status_code: 204 register: c3_mds_result until: c3_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: not ansible_check_mode diff --git a/roles/kafka_broker/defaults/main.yml b/roles/kafka_broker/defaults/main.yml index 2b44f2b4c..661244d74 100644 --- a/roles/kafka_broker/defaults/main.yml +++ b/roles/kafka_broker/defaults/main.yml @@ -24,7 +24,6 @@ kafka_broker_java_args: - "{% if kafka_broker_jolokia_enabled|bool %}-javaagent:{{jolokia_jar_path}}=config={{kafka_broker_jolokia_config}}{% endif %}" - "{% if kafka_broker_jmxexporter_enabled|bool %}-javaagent:{{jmxexporter_jar_path}}={{kafka_broker_jmxexporter_port}}:{{kafka_broker_jmxexporter_config_path}}{% endif %}" - "{% if zookeeper_client_authentication_type == 'kerberos' and zookeeper_kerberos_primary != 'zookeeper' %}-Dzookeeper.sasl.client.username={{zookeeper_kerberos_primary}}{% endif %}" - - "{% if fips_enabled %}-Djdk.tls.namedGroups='secp256r1,secp384r1,ffdhe2048,ffdhe3072'{% endif %}" - "{% if kerberos_client_config_file_dest != '/etc/krb5.conf' %}-Djava.security.krb5.conf={{kerberos_client_config_file_dest}}{% endif %}" # Strip primary from the zookeeper principal on first zk host. Adds defaults for if there is not even a zookeeper group @@ -45,7 +44,7 @@ kafka_broker_service_overrides: ### Environment Variables to be added to the Kafka Service. This variable is a dictionary. kafka_broker_service_environment_overrides: - KAFKA_HEAP_OPTS: "-Xms6g -Xmx6g -XX:MetaspaceSize=96m -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80" + KAFKA_HEAP_OPTS: "-Xms1g -Xmx6g -XX:MetaspaceSize=96m -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80" KAFKA_OPTS: "{{ kafka_broker_final_java_args | confluent.platform.java_arg_build_out }}" # Remove trailing slash if there is one KAFKA_LOG4J_OPTS: "{% if kafka_broker_custom_log4j|bool %}-Dlog4j.configuration=file:{{ kafka_broker.log4j_file }}{% endif %}" diff --git a/roles/kafka_broker/tasks/dynamic_groups.yml b/roles/kafka_broker/tasks/dynamic_groups.yml index fcf3d415e..da40c276c 100644 --- a/roles/kafka_broker/tasks/dynamic_groups.yml +++ b/roles/kafka_broker/tasks/dynamic_groups.yml @@ -18,7 +18,7 @@ template: src: zookeeper-tls-client.properties.j2 dest: "{{ kafka_broker.zookeeper_tls_client_config_file }}" - mode: 0640 + mode: '640' owner: "{{ kafka_broker_user }}" group: "{{ kafka_broker_group }}" when: zookeeper_ssl_enabled|bool diff --git a/roles/kafka_broker/tasks/health_check.yml b/roles/kafka_broker/tasks/health_check.yml index a764ee57d..374e98a48 100644 --- a/roles/kafka_broker/tasks/health_check.yml +++ b/roles/kafka_broker/tasks/health_check.yml @@ -45,7 +45,7 @@ force_basic_auth: true register: mds_result until: mds_result.status == 200 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 ignore_errors: true when: @@ -64,7 +64,7 @@ force_basic_auth: true register: erp_result until: erp_result.status == 200 - retries: 25 + retries: "{{ mds_retries }}" delay: 5 ignore_errors: true when: diff --git a/roles/kafka_broker/tasks/main.yml b/roles/kafka_broker/tasks/main.yml index 084b9b7e7..d3d53650d 100644 --- a/roles/kafka_broker/tasks/main.yml +++ b/roles/kafka_broker/tasks/main.yml @@ -109,7 +109,7 @@ src: "{{binary_base_path}}/lib/systemd/system/{{kafka_broker.systemd_file|basename}}" remote_src: true dest: "{{kafka_broker.systemd_file}}" - mode: 0644 + mode: '644' force: true when: installation_method == "archive" tags: @@ -176,7 +176,7 @@ owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" state: directory - mode: 0750 + mode: '750' tags: - privileged - filesystem @@ -187,7 +187,7 @@ owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" state: directory - mode: 0750 + mode: '750' with_items: "{{ kafka_broker_final_properties['log.dirs'].split(',') }}" tags: - filesystem @@ -206,7 +206,7 @@ file: path: "{{ kafka_broker.config_file | dirname }}" state: directory - mode: 0750 + mode: '750' owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" tags: @@ -216,7 +216,7 @@ template: src: server.properties.j2 dest: "{{kafka_broker.config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" notify: restart kafka @@ -228,7 +228,7 @@ template: src: client.properties.j2 dest: "{{kafka_broker.client_config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" diff: "{{ not mask_sensitive_diff|bool }}" @@ -244,7 +244,7 @@ template: src: zookeeper-tls-client.properties.j2 dest: "{{kafka_broker.zookeeper_tls_client_config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" when: zookeeper_ssl_enabled|bool and not kraft_enabled|bool @@ -257,7 +257,7 @@ state: directory group: "{{kafka_broker_group}}" owner: "{{kafka_broker_user}}" - mode: 0770 + mode: '770' tags: - filesystem - log @@ -282,7 +282,7 @@ path: "{{kafka_broker.log4j_file}}" group: "{{kafka_broker_group}}" owner: "{{kafka_broker_user}}" - mode: 0640 + mode: '640' tags: - filesystem - log @@ -291,7 +291,7 @@ file: path: "{{ logredactor_rule_path | dirname }}" state: directory - mode: 0755 + mode: '755' when: (kafka_broker_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -300,7 +300,7 @@ copy: src: "{{ logredactor_rule_path_local }}" dest: "{{ logredactor_rule_path }}" - mode: 0644 + mode: '644' when: (kafka_broker_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -330,7 +330,7 @@ template: src: kafka_jolokia.properties.j2 dest: "{{kafka_broker_jolokia_config}}" - mode: 0640 + mode: '640' owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" when: kafka_broker_jolokia_enabled|bool @@ -343,11 +343,10 @@ template: src: kafka_server_jaas.conf.j2 dest: "{{kafka_broker.jaas_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" when: "'GSSAPI' in kafka_broker_sasl_enabled_mechanisms or - zookeeper_sasl_protocol in ['kerberos', 'digest'] or zookeeper_client_authentication_type in ['kerberos', 'digest'] or (kafka_broker_rest_proxy_enabled and (not rbac_enabled or (rbac_enabled and external_mds_enabled)) and kafka_broker_rest_proxy_authentication_type == 'basic')" notify: restart kafka @@ -358,7 +357,7 @@ template: src: password.properties.j2 dest: "{{kafka_broker.rest_proxy_password_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" when: kafka_broker_rest_proxy_enabled and (not rbac_enabled or (rbac_enabled and external_mds_enabled)) and kafka_broker_rest_proxy_authentication_type == 'basic' @@ -412,7 +411,7 @@ template: src: "{{kafka_broker_jmxexporter_config_source_path}}" dest: "{{kafka_broker_jmxexporter_config_path}}" - mode: 0640 + mode: '640' owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" when: kafka_broker_jmxexporter_enabled|bool @@ -425,7 +424,7 @@ owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" state: directory - mode: 0640 + mode: '640' tags: - systemd - privileged @@ -434,7 +433,7 @@ template: src: override.conf.j2 dest: "{{ kafka_broker.systemd_override }}" - mode: 0640 + mode: '640' owner: root group: root notify: restart kafka @@ -447,7 +446,7 @@ file: path: /usr/lib/sysctl.d state: directory - mode: 0755 + mode: '755' when: ansible_distribution == "Debian" tags: - sysctl @@ -492,8 +491,8 @@ delegate_to: "{{item}}" when: - kafka_controller_secrets_protection_enabled|bool or kafka_controller_client_secrets_protection_enabled|bool - - rbac_enabled|bool and not external_mds_enabled|bool - - confluent_cli_version is version('3.0.0', '<') + - rbac_enabled|bool + - kafka_controller_ssl_mutual_auth_enabled|bool or ( not external_mds_enabled|bool and confluent_cli_version is version('3.0.0', '<')) - kraft_enabled|bool run_once: true diff --git a/roles/kafka_broker/tasks/rbac.yml b/roles/kafka_broker/tasks/rbac.yml index 90e7dbff9..2962e23d6 100644 --- a/roles/kafka_broker/tasks/rbac.yml +++ b/roles/kafka_broker/tasks/rbac.yml @@ -12,6 +12,13 @@ delegate_facts: true run_once: true +- name: Fail if No Authentication is set + fail: + msg: "Please configure Authentication on the Kafka cluster." + loop: "{{ groups['kafka_broker'] }}" + when: hostvars[item]['kafka_broker_principal'] is undefined + run_once: true + - name: Show principals debug: msg: "Principal for {{item}} is {{ hostvars[item]['kafka_broker_principal'] }}" @@ -63,7 +70,7 @@ file: path: /var/ssl/private state: directory - mode: 0755 + mode: '755' tags: - privileged - filesystem @@ -86,7 +93,7 @@ copy: src: "{{token_services_public_pem_file}}" dest: "{{rbac_enabled_public_pem_path}}" - mode: 0640 + mode: '640' owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" when: @@ -112,7 +119,7 @@ copy: src: "{{token_services_private_pem_file}}" dest: "{{rbac_enabled_private_pem_path}}" - mode: 0640 + mode: '640' owner: "{{kafka_broker_user}}" group: "{{kafka_broker_group}}" when: diff --git a/roles/kafka_broker/tasks/rbac_rolebindings.yml b/roles/kafka_broker/tasks/rbac_rolebindings.yml index 39b56a827..58c759cb2 100644 --- a/roles/kafka_broker/tasks/rbac_rolebindings.yml +++ b/roles/kafka_broker/tasks/rbac_rolebindings.yml @@ -27,7 +27,7 @@ status_code: 204 register: kb_mds_result until: kb_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 loop: "{{kafka_broker_additional_system_admins}}" when: not ansible_check_mode @@ -58,7 +58,7 @@ status_code: 204 register: kb_mds_result until: kb_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: audit_logs_destination_enabled|bool and not ansible_check_mode @@ -88,7 +88,7 @@ status_code: 204 register: kb_mds_result until: kb_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: audit_logs_destination_enabled|bool and not ansible_check_mode run_once: true diff --git a/roles/kafka_broker/tasks/register_cluster.yml b/roles/kafka_broker/tasks/register_cluster.yml index d51c5945f..493488b2f 100644 --- a/roles/kafka_broker/tasks/register_cluster.yml +++ b/roles/kafka_broker/tasks/register_cluster.yml @@ -36,5 +36,5 @@ run_once: true register: output until: output.status == 204 - retries: 20 + retries: "{{ mds_retries }}" delay: 10 diff --git a/roles/kafka_broker/tasks/secrets_protection.yml b/roles/kafka_broker/tasks/secrets_protection.yml index fd08ed89b..b8e85955e 100644 --- a/roles/kafka_broker/tasks/secrets_protection.yml +++ b/roles/kafka_broker/tasks/secrets_protection.yml @@ -37,7 +37,7 @@ template: src: override.conf.j2 dest: "{{ kafka_broker.systemd_override }}" - mode: 0640 + mode: '640' owner: root group: root diff: "{{ not mask_sensitive_diff|bool }}" diff --git a/roles/kafka_broker/tasks/set_controller_principal.yml b/roles/kafka_broker/tasks/set_controller_principal.yml index c5363fcf8..3881e7da2 100644 --- a/roles/kafka_broker/tasks/set_controller_principal.yml +++ b/roles/kafka_broker/tasks/set_controller_principal.yml @@ -1,9 +1,24 @@ --- +- name: Initialize Broker principals list in Controller + set_fact: + broker_principals: [] + +- name: Add Each Broker's Principal to Controller's List + set_fact: + broker_principals: "{{ broker_principals + [ hostvars[broker_item]['kafka_broker_principal'] ] }}" + loop: "{{ groups['kafka_broker'] }}" + loop_control: + loop_var: broker_item + +- name: Remove Duplicates and Convert to String + set_fact: + broker_principals: "{{ broker_principals | unique | join(';') }}" + - name: Add Super Users list to Kafka Controller Properties set_fact: kafka_controller_final_properties: "{{ hostvars[controller_host]['kafka_controller_final_properties'] | combine( { - 'super.users': hostvars[controller_host]['super_users'] + ';' + hostvars[groups.kafka_broker[0]]['kafka_broker_principal'] + 'super.users': hostvars[controller_host]['super_users'] + ';' + broker_principals } ) }}" diff --git a/roles/kafka_broker/templates/client.properties.j2 b/roles/kafka_broker/templates/client.properties.j2 index 25e89ca5e..5ecfa5f8c 100644 --- a/roles/kafka_broker/templates/client.properties.j2 +++ b/roles/kafka_broker/templates/client.properties.j2 @@ -4,5 +4,3 @@ {% for key, value in kafka_broker_client_properties|dictsort%} {{key}}={{value}} {% endfor %} -default.api.timeout.ms=20000 -request.timeout.ms=20000 diff --git a/roles/kafka_connect/tasks/confluent_hub.yml b/roles/kafka_connect/tasks/confluent_hub.yml index 66c9af0ca..8c4685e22 100644 --- a/roles/kafka_connect/tasks/confluent_hub.yml +++ b/roles/kafka_connect/tasks/confluent_hub.yml @@ -30,7 +30,7 @@ owner: "{{kafka_connect_user}}" group: "{{kafka_connect_group}}" state: directory - mode: 0750 + mode: '750' loop: "{{ kafka_connect_confluent_hub_plugins }}" when: kafka_connect_confluent_hub_plugins|length > 0 diff --git a/roles/kafka_connect/tasks/connect_plugins.yml b/roles/kafka_connect/tasks/connect_plugins.yml index ac7e66843..d7b9e5176 100644 --- a/roles/kafka_connect/tasks/connect_plugins.yml +++ b/roles/kafka_connect/tasks/connect_plugins.yml @@ -5,7 +5,7 @@ state: directory group: "{{kafka_connect_group}}" owner: "{{kafka_connect_user}}" - mode: 0755 + mode: '755' when: item != '/usr/share/java' with_items: "{{ kafka_connect_final_properties['plugin.path'].split(',') }}" @@ -41,7 +41,7 @@ file: path: /tmp/remote_plugins state: directory - mode: 0755 + mode: '755' owner: "{{kafka_connect_user}}" group: "{{kafka_connect_group}}" @@ -49,7 +49,7 @@ get_url: url: "{{item}}" dest: /tmp/remote_plugins - mode: 0755 + mode: '755' owner: "{{kafka_connect_user}}" group: "{{kafka_connect_group}}" register: download_remote_plugin_result diff --git a/roles/kafka_connect/tasks/main.yml b/roles/kafka_connect/tasks/main.yml index 43fcf1a45..00757cb49 100644 --- a/roles/kafka_connect/tasks/main.yml +++ b/roles/kafka_connect/tasks/main.yml @@ -89,7 +89,7 @@ src: "{{binary_base_path}}/lib/systemd/system/{{kafka_connect_default_service_name}}.service" remote_src: true dest: "{{kafka_connect.systemd_file}}" - mode: 0644 + mode: '644' force: true when: installation_method == "archive" tags: @@ -102,7 +102,7 @@ src: "{{systemd_base_dir}}/{{kafka_connect_default_service_name}}.service" remote_src: true dest: "{{kafka_connect.systemd_file}}" - mode: 0644 + mode: '644' force: true when: installation_method != "archive" and kafka_connect_default_service_name != kafka_connect_service_name tags: @@ -163,7 +163,7 @@ file: path: "{{ kafka_connect.config_file | dirname }}" state: directory - mode: 0750 + mode: '750' owner: "{{kafka_connect_user}}" group: "{{kafka_connect_group}}" tags: @@ -173,7 +173,7 @@ template: src: connect-distributed.properties.j2 dest: "{{kafka_connect.config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_user}}" group: "{{kafka_connect_group}}" notify: restart connect distributed @@ -210,7 +210,7 @@ state: directory group: "{{kafka_connect_group}}" owner: "{{kafka_connect_user}}" - mode: 0770 + mode: '770' tags: - filesystem - log @@ -235,7 +235,7 @@ path: "{{kafka_connect.log4j_file}}" group: "{{kafka_connect_group}}" owner: "{{kafka_connect_user}}" - mode: 0640 + mode: '640' tags: - filesystem - log @@ -244,7 +244,7 @@ file: path: "{{ logredactor_rule_path | dirname }}" state: directory - mode: 0755 + mode: '755' when: (kafka_connect_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -253,7 +253,7 @@ copy: src: "{{ logredactor_rule_path_local }}" dest: "{{ logredactor_rule_path }}" - mode: 0644 + mode: '644' when: (kafka_connect_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -283,7 +283,7 @@ template: src: kafka_connect_jolokia.properties.j2 dest: "{{kafka_connect_jolokia_config}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_user}}" group: "{{kafka_connect_group}}" when: kafka_connect_jolokia_enabled|bool @@ -296,7 +296,7 @@ copy: src: "{{kafka_connect_jmxexporter_config_source_path}}" dest: "{{kafka_connect_jmxexporter_config_path}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_user}}" group: "{{kafka_connect_group}}" when: kafka_connect_jmxexporter_enabled|bool @@ -307,7 +307,7 @@ template: src: jaas.conf.j2 dest: "{{kafka_connect.jaas_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_user}}" group: "{{kafka_connect_group}}" notify: restart connect distributed @@ -317,7 +317,7 @@ template: src: password.properties.j2 dest: "{{kafka_connect.password_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_user}}" group: "{{kafka_connect_group}}" notify: restart connect distributed @@ -329,7 +329,7 @@ owner: "{{kafka_connect_user}}" group: "{{kafka_connect_group}}" state: directory - mode: 0640 + mode: '640' tags: - systemd - privileged @@ -338,7 +338,7 @@ template: src: override.conf.j2 dest: "{{ kafka_connect.systemd_override }}" - mode: 0640 + mode: '640' owner: root group: root notify: restart connect distributed diff --git a/roles/kafka_connect/tasks/rbac.yml b/roles/kafka_connect/tasks/rbac.yml index 73c317592..3a8d23370 100644 --- a/roles/kafka_connect/tasks/rbac.yml +++ b/roles/kafka_connect/tasks/rbac.yml @@ -28,7 +28,7 @@ status_code: 204 register: connect_mds_result until: connect_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 loop: "{{kafka_connect_additional_system_admins}}" when: not ansible_check_mode @@ -54,7 +54,7 @@ status_code: 204 register: connect_mds_result until: connect_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: not ansible_check_mode @@ -107,7 +107,7 @@ status_code: 204 register: connect_mds_result until: connect_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: not ansible_check_mode @@ -145,7 +145,7 @@ status_code: 204 register: connect_mds_result until: connect_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: kafka_connect_secret_registry_enabled|bool and not ansible_check_mode @@ -176,6 +176,6 @@ status_code: 204 register: connect_mds_result until: connect_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: kafka_connect_monitoring_interceptors_enabled|bool and not ansible_check_mode diff --git a/roles/kafka_connect/tasks/register_cluster.yml b/roles/kafka_connect/tasks/register_cluster.yml index ff3c0dcfc..2f8d651a3 100644 --- a/roles/kafka_connect/tasks/register_cluster.yml +++ b/roles/kafka_connect/tasks/register_cluster.yml @@ -44,7 +44,7 @@ status_code: 204 register: output until: output.status == 204 - retries: 20 + retries: "{{ mds_retries }}" delay: 10 when: - hostvars[item].get("rbac_enabled", false)|bool diff --git a/roles/kafka_connect_replicator/tasks/main.yml b/roles/kafka_connect_replicator/tasks/main.yml index 974a0e429..64191f3b6 100644 --- a/roles/kafka_connect_replicator/tasks/main.yml +++ b/roles/kafka_connect_replicator/tasks/main.yml @@ -264,7 +264,7 @@ file: path: "{{ kafka_connect_replicator.replication_config_file | dirname }}" state: directory - mode: 0750 + mode: '750' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" tags: @@ -276,7 +276,7 @@ state: directory group: "{{kafka_connect_replicator_group}}" owner: "{{kafka_connect_replicator_user}}" - mode: 0770 + mode: '770' tags: - filesystem @@ -284,7 +284,7 @@ template: src: kafka-connect-replicator-log4j.properties.j2 dest: "{{kafka_connect_replicator.log4j_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" when: kafka_connect_replicator_custom_log4j|bool @@ -296,7 +296,7 @@ template: src: kafka-connect-replicator-jolokia.properties.j2 dest: "{{kafka_connect_replicator_jolokia_config}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" when: kafka_connect_replicator_jolokia_enabled|bool @@ -308,7 +308,7 @@ template: src: kafka-connect-replicator.properties.j2 dest: "{{kafka_connect_replicator.replication_config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" notify: Restart Kafka Connect Replicator @@ -319,7 +319,7 @@ template: src: kafka-connect-replicator-consumer.properties.j2 dest: "{{kafka_connect_replicator.consumer_config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" notify: Restart Kafka Connect Replicator @@ -330,7 +330,7 @@ template: src: kafka-connect-replicator-producer.properties.j2 dest: "{{kafka_connect_replicator.producer_config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" notify: Restart Kafka Connect Replicator @@ -341,7 +341,7 @@ template: src: kafka-connect-replicator-interceptors.properties.j2 dest: "{{kafka_connect_replicator.interceptors_config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" notify: Restart Kafka Connect Replicator @@ -352,7 +352,7 @@ template: src: kafka-connect-replicator.service.j2 dest: "{{kafka_connect_replicator.systemd_file}}" - mode: 0640 + mode: '640' owner: root group: root notify: Restart Kafka Connect Replicator @@ -365,7 +365,7 @@ owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" state: directory - mode: 0640 + mode: '640' tags: - systemd @@ -373,7 +373,7 @@ template: src: override.conf.j2 dest: "{{ kafka_connect_replicator.systemd_override }}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" notify: Restart Kafka Connect Replicator diff --git a/roles/kafka_connect_replicator/tasks/rbac_replicator.yml b/roles/kafka_connect_replicator/tasks/rbac_replicator.yml index 29f9b5687..c1e0aa35b 100644 --- a/roles/kafka_connect_replicator/tasks/rbac_replicator.yml +++ b/roles/kafka_connect_replicator/tasks/rbac_replicator.yml @@ -3,7 +3,7 @@ file: path: /var/ssl/private/kafka_connect_replicator state: directory - mode: 0755 + mode: '755' tags: - privileged @@ -25,7 +25,7 @@ copy: src: "{{ kafka_connect_replicator_erp_pem_file }}" dest: "{{ kafka_connect_replicator_rbac_enabled_public_pem_path }}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" diff: "{{ not mask_sensitive_diff|bool }}" diff --git a/roles/kafka_connect_replicator/tasks/rbac_replicator_consumer.yml b/roles/kafka_connect_replicator/tasks/rbac_replicator_consumer.yml index bd540f3ab..2b54d9fab 100644 --- a/roles/kafka_connect_replicator/tasks/rbac_replicator_consumer.yml +++ b/roles/kafka_connect_replicator/tasks/rbac_replicator_consumer.yml @@ -3,7 +3,7 @@ file: path: /var/ssl/private/kafka_connect_replicator_consumer state: directory - mode: 0755 + mode: '755' tags: - privileged @@ -25,7 +25,7 @@ copy: src: "{{ kafka_connect_replicator_consumer_erp_pem_file }}" dest: "{{ kafka_connect_replicator_consumer_rbac_enabled_public_pem_path }}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" when: replicator_pem_file.stat.exists|bool diff --git a/roles/kafka_connect_replicator/tasks/rbac_replicator_monitoring.yml b/roles/kafka_connect_replicator/tasks/rbac_replicator_monitoring.yml index 1cf1af95d..d25d4641c 100644 --- a/roles/kafka_connect_replicator/tasks/rbac_replicator_monitoring.yml +++ b/roles/kafka_connect_replicator/tasks/rbac_replicator_monitoring.yml @@ -3,7 +3,7 @@ file: path: /var/ssl/private/kafka_connect_replicator_monitoring_interceptor state: directory - mode: 0755 + mode: '755' tags: - privileged @@ -25,7 +25,7 @@ copy: src: "{{ kafka_connect_replicator_monitoring_interceptor_erp_pem_file }}" dest: "{{ kafka_connect_replicator_monitoring_interceptor_rbac_enabled_public_pem_path }}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" when: replicator_pem_file.stat.exists|bool diff --git a/roles/kafka_connect_replicator/tasks/rbac_replicator_producer.yml b/roles/kafka_connect_replicator/tasks/rbac_replicator_producer.yml index de95a002f..c9509f96b 100644 --- a/roles/kafka_connect_replicator/tasks/rbac_replicator_producer.yml +++ b/roles/kafka_connect_replicator/tasks/rbac_replicator_producer.yml @@ -3,7 +3,7 @@ file: path: /var/ssl/private/kafka_connect_replicator_producer state: directory - mode: 0755 + mode: '755' tags: - privileged @@ -25,7 +25,7 @@ copy: src: "{{ kafka_connect_replicator_producer_erp_pem_file }}" dest: "{{ kafka_connect_replicator_producer_rbac_enabled_public_pem_path }}" - mode: 0640 + mode: '640' owner: "{{kafka_connect_replicator_user}}" group: "{{kafka_connect_replicator_group}}" when: replicator_pem_file.stat.exists|bool diff --git a/roles/kafka_controller/defaults/main.yml b/roles/kafka_controller/defaults/main.yml index dd9e3410f..43ee0424e 100644 --- a/roles/kafka_controller/defaults/main.yml +++ b/roles/kafka_controller/defaults/main.yml @@ -22,7 +22,6 @@ kafka_controller_java_args: - "{% if 'GSSAPI' in kafka_controller_sasl_enabled_mechanisms %}-Djava.security.auth.login.config={{kafka_controller.jaas_file}}{% endif %}" - "{% if kafka_controller_jolokia_enabled|bool %}-javaagent:{{jolokia_jar_path}}=config={{kafka_controller_jolokia_config}}{% endif %}" - "{% if kafka_controller_jmxexporter_enabled|bool %}-javaagent:{{jmxexporter_jar_path}}={{kafka_controller_jmxexporter_port}}:{{kafka_controller_jmxexporter_config_path}}{% endif %}" - - "{% if fips_enabled %}-Djdk.tls.namedGroups='secp256r1,secp384r1,ffdhe2048,ffdhe3072'{% endif %}" - "{% if kerberos_client_config_file_dest != '/etc/krb5.conf' %}-Djava.security.krb5.conf={{kerberos_client_config_file_dest}}{% endif %}" ### Custom Java Args to add to the Kafka Process diff --git a/roles/kafka_controller/tasks/create_config.yml b/roles/kafka_controller/tasks/create_config.yml index 1f9a859cb..f65357e40 100644 --- a/roles/kafka_controller/tasks/create_config.yml +++ b/roles/kafka_controller/tasks/create_config.yml @@ -3,7 +3,7 @@ template: src: server.properties.j2 dest: "{{kafka_controller.config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" diff: "{{ not mask_sensitive_diff|bool }}" diff --git a/roles/kafka_controller/tasks/main.yml b/roles/kafka_controller/tasks/main.yml index 83fe38a55..6be894963 100644 --- a/roles/kafka_controller/tasks/main.yml +++ b/roles/kafka_controller/tasks/main.yml @@ -103,7 +103,7 @@ src: "{{ ((binary_base_path, 'lib/systemd/system') | path_join if installation_method=='archive' else systemd_base_dir, kafka_broker.systemd_file|basename) | path_join }}" remote_src: true dest: "{{kafka_controller.systemd_file}}" - mode: 0644 + mode: '644' force: true tags: - systemd @@ -167,7 +167,7 @@ owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" state: directory - mode: 0750 + mode: '750' tags: - privileged - filesystem @@ -178,7 +178,7 @@ owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" state: directory - mode: 0750 + mode: '750' tags: - filesystem - privileged @@ -196,7 +196,7 @@ file: path: "{{ kafka_controller.config_file | dirname }}" state: directory - mode: 0750 + mode: '750' owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" tags: @@ -208,7 +208,7 @@ template: src: server.properties.j2 dest: "{{kafka_controller.config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" notify: restart Kafka Controller @@ -221,7 +221,7 @@ template: src: client.properties.j2 dest: "{{kafka_controller.client_config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" diff: "{{ not mask_sensitive_diff|bool }}" @@ -238,7 +238,7 @@ state: directory group: "{{kafka_controller_group}}" owner: "{{kafka_controller_user}}" - mode: 0770 + mode: '770' tags: - filesystem - log @@ -264,7 +264,7 @@ path: "{{kafka_controller.log4j_file}}" group: "{{kafka_controller_group}}" owner: "{{kafka_controller_user}}" - mode: 0640 + mode: '640' tags: - filesystem - log @@ -274,7 +274,7 @@ file: path: "{{ logredactor_rule_path | dirname }}" state: directory - mode: 0755 + mode: '755' when: (kafka_controller_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -284,7 +284,7 @@ copy: src: "{{ logredactor_rule_path_local }}" dest: "{{ logredactor_rule_path }}" - mode: 0644 + mode: '644' when: (kafka_controller_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -315,7 +315,7 @@ template: src: kafka_jolokia.properties.j2 dest: "{{kafka_controller_jolokia_config}}" - mode: 0640 + mode: '640' owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" when: kafka_controller_jolokia_enabled|bool @@ -329,7 +329,7 @@ template: src: kafka_server_jaas.conf.j2 dest: "{{kafka_controller.jaas_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" when: @@ -343,7 +343,7 @@ template: src: "{{kafka_controller_jmxexporter_config_source_path}}" dest: "{{kafka_controller_jmxexporter_config_path}}" - mode: 0640 + mode: '640' owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" when: kafka_controller_jmxexporter_enabled|bool @@ -357,7 +357,7 @@ owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" state: directory - mode: 0640 + mode: '640' tags: - systemd - privileged @@ -366,7 +366,7 @@ template: src: override.conf.j2 dest: "{{ kafka_controller.systemd_override }}" - mode: 0640 + mode: '640' owner: root group: root notify: restart Kafka Controller @@ -379,7 +379,7 @@ file: path: /usr/lib/sysctl.d state: directory - mode: 0755 + mode: '755' when: ansible_distribution == "Debian" tags: - sysctl diff --git a/roles/kafka_controller/tasks/rbac.yml b/roles/kafka_controller/tasks/rbac.yml index 138074292..a748506ed 100644 --- a/roles/kafka_controller/tasks/rbac.yml +++ b/roles/kafka_controller/tasks/rbac.yml @@ -12,6 +12,13 @@ delegate_facts: true run_once: true +- name: Fail if No Authentication is set + fail: + msg: "Please configure Authentication on the Kraft cluster." + loop: "{{ groups['kafka_controller'] }}" + when: hostvars[item]['kafka_controller_principal'] is undefined + run_once: true + - name: Show principals debug: msg: "Principal for {{item}} is {{ hostvars[item]['kafka_controller_principal'] }}" @@ -45,7 +52,7 @@ file: path: /var/ssl/private state: directory - mode: 0755 + mode: '755' tags: - privileged - filesystem @@ -68,7 +75,7 @@ copy: src: "{{token_services_public_pem_file}}" dest: "{{rbac_enabled_public_pem_path}}" - mode: 0640 + mode: '640' owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" when: @@ -94,7 +101,7 @@ copy: src: "{{token_services_private_pem_file}}" dest: "{{rbac_enabled_private_pem_path}}" - mode: 0640 + mode: '640' owner: "{{kafka_controller_user}}" group: "{{kafka_controller_group}}" when: diff --git a/roles/kafka_controller/tasks/secrets_protection.yml b/roles/kafka_controller/tasks/secrets_protection.yml index 42765bcd5..1b40971ad 100644 --- a/roles/kafka_controller/tasks/secrets_protection.yml +++ b/roles/kafka_controller/tasks/secrets_protection.yml @@ -30,7 +30,7 @@ template: src: override.conf.j2 dest: "{{ kafka_controller.systemd_override }}" - mode: 0640 + mode: '640' owner: root group: root diff: "{{ not mask_sensitive_diff|bool }}" diff --git a/roles/kafka_controller/templates/client.properties.j2 b/roles/kafka_controller/templates/client.properties.j2 index 82d733dc5..a5e132320 100644 --- a/roles/kafka_controller/templates/client.properties.j2 +++ b/roles/kafka_controller/templates/client.properties.j2 @@ -4,6 +4,4 @@ {% for key, value in kafka_controller_client_properties|dictsort%} {{key}}={{value}} {% endfor %} -default.api.timeout.ms=20000 -request.timeout.ms=20000 confluent.use.controller.listener=true diff --git a/roles/kafka_rest/tasks/main.yml b/roles/kafka_rest/tasks/main.yml index 0f458a823..865c61e02 100644 --- a/roles/kafka_rest/tasks/main.yml +++ b/roles/kafka_rest/tasks/main.yml @@ -89,7 +89,7 @@ src: "{{binary_base_path}}/lib/systemd/system/{{kafka_rest.systemd_file|basename}}" remote_src: true dest: "{{kafka_rest.systemd_file}}" - mode: 0644 + mode: '644' force: true when: installation_method == "archive" tags: @@ -150,7 +150,7 @@ file: path: /var/ssl/private state: directory - mode: 0755 + mode: '755' when: rbac_enabled|bool tags: - filesystem @@ -189,7 +189,7 @@ file: path: "{{ kafka_rest.config_file | dirname }}" state: directory - mode: 0750 + mode: '750' owner: "{{kafka_rest_user}}" group: "{{kafka_rest_group}}" tags: @@ -199,7 +199,7 @@ template: src: kafka-rest.properties.j2 dest: "{{kafka_rest.config_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_rest_user}}" group: "{{kafka_rest_group}}" notify: restart kafka-rest @@ -233,7 +233,7 @@ state: directory owner: "{{kafka_rest_user}}" group: "{{kafka_rest_group}}" - mode: 0770 + mode: '770' tags: - filesystem - log @@ -257,7 +257,7 @@ path: "{{kafka_rest.log4j_file}}" group: "{{kafka_rest_group}}" owner: "{{kafka_rest_user}}" - mode: 0640 + mode: '640' tags: - configuration - log @@ -266,7 +266,7 @@ file: path: "{{ logredactor_rule_path | dirname }}" state: directory - mode: 0755 + mode: '755' when: (kafka_rest_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -275,7 +275,7 @@ copy: src: "{{ logredactor_rule_path_local }}" dest: "{{ logredactor_rule_path }}" - mode: 0644 + mode: '644' when: (kafka_rest_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -305,7 +305,7 @@ template: src: kafka_rest_jolokia.properties.j2 dest: "{{kafka_rest_jolokia_config}}" - mode: 0640 + mode: '640' owner: "{{kafka_rest_user}}" group: "{{kafka_rest_group}}" when: kafka_rest_jolokia_enabled|bool @@ -318,7 +318,7 @@ copy: src: "{{kafka_rest_jmxexporter_config_source_path}}" dest: "{{kafka_rest_jmxexporter_config_path}}" - mode: 0640 + mode: '640' owner: "{{kafka_rest_user}}" group: "{{kafka_rest_group}}" when: kafka_rest_jmxexporter_enabled|bool @@ -329,7 +329,7 @@ template: src: jaas.conf.j2 dest: "{{kafka_rest.jaas_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_rest_user}}" group: "{{kafka_rest_group}}" notify: restart kafka-rest @@ -339,7 +339,7 @@ template: src: password.properties.j2 dest: "{{kafka_rest.password_file}}" - mode: 0640 + mode: '640' owner: "{{kafka_rest_user}}" group: "{{kafka_rest_group}}" notify: restart kafka-rest @@ -351,7 +351,7 @@ owner: "{{kafka_rest_user}}" group: "{{kafka_rest_group}}" state: directory - mode: 0640 + mode: '640' tags: - systemd - privileged @@ -360,7 +360,7 @@ template: src: override.conf.j2 dest: "{{kafka_rest.systemd_override}}" - mode: 0640 + mode: '640' owner: root group: root notify: restart kafka-rest diff --git a/roles/kafka_rest/tasks/rbac.yml b/roles/kafka_rest/tasks/rbac.yml index f4dc8fedf..3c1407be2 100644 --- a/roles/kafka_rest/tasks/rbac.yml +++ b/roles/kafka_rest/tasks/rbac.yml @@ -36,7 +36,7 @@ status_code: 204 register: kr_mds_result until: kr_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: not ansible_check_mode @@ -69,6 +69,6 @@ status_code: 204 register: kr_mds_result until: kr_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: kafka_rest_monitoring_interceptors_enabled|bool and not ansible_check_mode diff --git a/roles/kerberos/tasks/main.yml b/roles/kerberos/tasks/main.yml index d8f992e04..7b3e2bc78 100644 --- a/roles/kerberos/tasks/main.yml +++ b/roles/kerberos/tasks/main.yml @@ -3,7 +3,7 @@ ansible.builtin.file: path: "{{ kerberos_client_config_file_dest | dirname }}" state: directory - mode: '0755' + mode: '755' - name: Copy the client configuration file template: @@ -16,7 +16,7 @@ path: "{{ kerberos_keytab_destination_path | dirname }}" state: directory group: "{{kerberos_group}}" - mode: 0750 + mode: '750' tags: - privileged @@ -26,7 +26,7 @@ dest: "{{kerberos_keytab_destination_path}}" owner: "{{kerberos_user}}" group: "{{kerberos_group}}" - mode: 0640 + mode: '640' notify: "{{kerberos_handler}}" tags: - privileged diff --git a/roles/ksql/tasks/ccloud_certs.yml b/roles/ksql/tasks/ccloud_certs.yml index 251dee94f..2e272b226 100644 --- a/roles/ksql/tasks/ccloud_certs.yml +++ b/roles/ksql/tasks/ccloud_certs.yml @@ -3,7 +3,7 @@ get_url: url: "https://letsencrypt.org/certs/{{item}}" dest: /tmp/{{item}} - mode: 0755 + mode: '755' loop: - isrgrootx1.der - isrg-root-x2.der diff --git a/roles/ksql/tasks/main.yml b/roles/ksql/tasks/main.yml index 8ae2bb765..becc8cb45 100644 --- a/roles/ksql/tasks/main.yml +++ b/roles/ksql/tasks/main.yml @@ -88,7 +88,7 @@ owner: "{{ksql_user}}" group: "{{ksql_group}}" state: directory - mode: 0750 + mode: '750' tags: - filesystem @@ -99,7 +99,7 @@ src: "{{binary_base_path}}/lib/systemd/system/{{ksql.systemd_file|basename}}" remote_src: true dest: "{{ksql.systemd_file}}" - mode: 0644 + mode: '644' force: true when: installation_method == "archive" tags: @@ -169,7 +169,7 @@ file: path: "{{ ksql.config_file | dirname }}" state: directory - mode: 0750 + mode: '750' owner: "{{ksql_user}}" group: "{{ksql_group}}" tags: @@ -179,7 +179,7 @@ template: src: ksql-server.properties.j2 dest: "{{ksql.config_file}}" - mode: 0640 + mode: '640' owner: "{{ksql_user}}" group: "{{ksql_group}}" notify: restart ksql @@ -213,7 +213,7 @@ state: directory group: "{{ksql_group}}" owner: "{{ksql_user}}" - mode: 0770 + mode: '770' tags: - filesystem - log @@ -224,7 +224,7 @@ owner: "{{ksql_user}}" group: "{{ksql_group}}" state: directory - mode: 0750 + mode: '750' tags: - filesystem - log @@ -233,7 +233,7 @@ template: src: ksql-server_log4j.properties.j2 dest: "{{ksql.log4j_file}}" - mode: 0640 + mode: '640' owner: "{{ksql_user}}" group: "{{ksql_group}}" when: ksql_custom_log4j|bool @@ -246,7 +246,7 @@ file: path: "{{ logredactor_rule_path | dirname }}" state: directory - mode: 0755 + mode: '755' when: (ksql_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -255,7 +255,7 @@ copy: src: "{{ logredactor_rule_path_local }}" dest: "{{ logredactor_rule_path }}" - mode: 0644 + mode: '644' when: (ksql_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -285,7 +285,7 @@ template: src: ksql_jolokia.properties.j2 dest: "{{ksql_jolokia_config}}" - mode: 0640 + mode: '640' owner: "{{ksql_user}}" group: "{{ksql_group}}" when: ksql_jolokia_enabled|bool @@ -299,7 +299,7 @@ path: "{{ksql_rocksdb_path}}" group: "{{ksql_group}}" owner: "{{ksql_user}}" - mode: 0750 + mode: '750' state: directory when: ksql_rocksdb_path != "" tags: @@ -319,7 +319,7 @@ copy: src: "{{ksql_jmxexporter_config_source_path}}" dest: "{{ksql_jmxexporter_config_path}}" - mode: 0640 + mode: '640' owner: "{{ksql_user}}" group: "{{ksql_group}}" when: ksql_jmxexporter_enabled|bool @@ -330,7 +330,7 @@ template: src: jaas.conf.j2 dest: "{{ksql.jaas_file}}" - mode: 0640 + mode: '640' owner: "{{ksql_user}}" group: "{{ksql_group}}" notify: restart ksql @@ -340,7 +340,7 @@ template: src: password.properties.j2 dest: "{{ksql.password_file}}" - mode: 0640 + mode: '640' owner: "{{ksql_user}}" group: "{{ksql_group}}" notify: restart ksql @@ -352,7 +352,7 @@ owner: "{{ksql_user}}" group: "{{ksql_group}}" state: directory - mode: 0640 + mode: '640' tags: - systemd - privileged @@ -361,7 +361,7 @@ template: src: override.conf.j2 dest: "{{ksql.systemd_override}}" - mode: 0640 + mode: '640' owner: root group: root notify: restart ksql diff --git a/roles/ksql/tasks/rbac.yml b/roles/ksql/tasks/rbac.yml index 376821e5b..1ed274aee 100644 --- a/roles/ksql/tasks/rbac.yml +++ b/roles/ksql/tasks/rbac.yml @@ -32,7 +32,7 @@ status_code: 204 register: ksql_mds_result until: ksql_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 loop: "{{ksql_additional_system_admins}}" when: not ansible_check_mode @@ -64,7 +64,7 @@ status_code: 204 register: ksql_mds_result until: ksql_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: not ansible_check_mode @@ -102,7 +102,7 @@ status_code: 204 register: ksql_mds_result until: ksql_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: not ansible_check_mode @@ -133,7 +133,7 @@ status_code: 204 register: ksql_mds_result until: ksql_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: not ansible_check_mode @@ -165,7 +165,7 @@ status_code: 204 register: ksql_mds_result until: ksql_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: ("'schema_registry' in groups") and not ansible_check_mode @@ -196,7 +196,7 @@ status_code: 204 register: ksql_mds_result until: ksql_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: ksql_monitoring_interceptors_enabled|bool and not ansible_check_mode @@ -229,7 +229,7 @@ status_code: 204 register: ksql_mds_result until: ksql_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 loop: - "{{ksql_ldap_user}}" diff --git a/roles/ksql/tasks/register_cluster.yml b/roles/ksql/tasks/register_cluster.yml index 460bc44d0..9b7fa022e 100644 --- a/roles/ksql/tasks/register_cluster.yml +++ b/roles/ksql/tasks/register_cluster.yml @@ -45,7 +45,7 @@ status_code: 204 register: output until: output.status == 204 - retries: 20 + retries: "{{ mds_retries }}" delay: 10 when: - hostvars[item].get("rbac_enabled", false)|bool diff --git a/roles/schema_registry/tasks/main.yml b/roles/schema_registry/tasks/main.yml index 6816f964b..213fe5442 100644 --- a/roles/schema_registry/tasks/main.yml +++ b/roles/schema_registry/tasks/main.yml @@ -90,7 +90,7 @@ src: "{{binary_base_path}}/lib/systemd/system/{{schema_registry.systemd_file|basename}}" remote_src: true dest: "{{schema_registry.systemd_file}}" - mode: 0644 + mode: '644' force: true when: installation_method == "archive" tags: @@ -150,7 +150,7 @@ file: path: "{{ schema_registry.config_file | dirname }}" state: directory - mode: 0750 + mode: '750' owner: "{{schema_registry_user}}" group: "{{schema_registry_group}}" tags: @@ -160,7 +160,7 @@ template: src: schema-registry.properties.j2 dest: "{{schema_registry.config_file}}" - mode: 0640 + mode: '640' owner: "{{schema_registry_user}}" group: "{{schema_registry_group}}" notify: restart schema-registry @@ -195,7 +195,7 @@ state: directory group: "{{schema_registry_group}}" owner: "{{schema_registry_user}}" - mode: 0770 + mode: '770' tags: - filesystem - log @@ -219,7 +219,7 @@ path: "{{schema_registry.log4j_file}}" group: "{{schema_registry_group}}" owner: "{{schema_registry_user}}" - mode: 0640 + mode: '640' tags: - configuration - log @@ -228,7 +228,7 @@ file: path: "{{ logredactor_rule_path | dirname }}" state: directory - mode: 0755 + mode: '755' when: (schema_registry_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -237,7 +237,7 @@ copy: src: "{{ logredactor_rule_path_local }}" dest: "{{ logredactor_rule_path }}" - mode: 0644 + mode: '644' when: (schema_registry_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -267,7 +267,7 @@ template: src: schema_registry_jolokia.properties.j2 dest: "{{schema_registry_jolokia_config}}" - mode: 0640 + mode: '640' owner: "{{schema_registry_user}}" group: "{{schema_registry_group}}" when: schema_registry_jolokia_enabled|bool @@ -280,7 +280,7 @@ copy: src: "{{schema_registry_jmxexporter_config_source_path}}" dest: "{{schema_registry_jmxexporter_config_path}}" - mode: 0640 + mode: '640' owner: "{{schema_registry_user}}" group: "{{schema_registry_group}}" when: schema_registry_jmxexporter_enabled|bool @@ -291,7 +291,7 @@ template: src: jaas.conf.j2 dest: "{{schema_registry.jaas_file}}" - mode: 0640 + mode: '640' owner: "{{schema_registry_user}}" group: "{{schema_registry_group}}" notify: restart schema-registry @@ -301,7 +301,7 @@ template: src: password.properties.j2 dest: "{{schema_registry.password_file}}" - mode: 0640 + mode: '640' owner: "{{schema_registry_user}}" group: "{{schema_registry_group}}" notify: restart schema-registry @@ -313,7 +313,7 @@ owner: "{{schema_registry_user}}" group: "{{schema_registry_group}}" state: directory - mode: 0640 + mode: '640' tags: - systemd - privileged @@ -322,7 +322,7 @@ template: src: override.conf.j2 dest: "{{schema_registry.systemd_override}}" - mode: 0640 + mode: '640' owner: root group: root notify: restart schema-registry diff --git a/roles/schema_registry/tasks/rbac.yml b/roles/schema_registry/tasks/rbac.yml index bb7287b0e..3b74a2141 100644 --- a/roles/schema_registry/tasks/rbac.yml +++ b/roles/schema_registry/tasks/rbac.yml @@ -28,7 +28,7 @@ status_code: 204 register: sr_mds_result until: sr_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 loop: "{{schema_registry_additional_system_admins}}" when: not ansible_check_mode @@ -54,7 +54,7 @@ status_code: 204 register: sr_mds_result until: sr_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: not ansible_check_mode @@ -97,6 +97,6 @@ status_code: 204 register: sr_mds_result until: sr_mds_result.status == 204 - retries: 30 + retries: "{{ mds_retries }}" delay: 5 when: not ansible_check_mode diff --git a/roles/schema_registry/tasks/register_cluster.yml b/roles/schema_registry/tasks/register_cluster.yml index 524416560..b8ca889a6 100644 --- a/roles/schema_registry/tasks/register_cluster.yml +++ b/roles/schema_registry/tasks/register_cluster.yml @@ -36,5 +36,5 @@ run_once: true register: output until: output.status == 204 - retries: 20 + retries: "{{ mds_retries }}" delay: 10 diff --git a/roles/ssl/tasks/import_ca_chain.yml b/roles/ssl/tasks/import_ca_chain.yml index 7873b83d2..28d0d3a63 100644 --- a/roles/ssl/tasks/import_ca_chain.yml +++ b/roles/ssl/tasks/import_ca_chain.yml @@ -3,7 +3,7 @@ file: path: "{{ ssl_file_dir_final }}/generation/trustCAs" state: directory - mode: 0755 + mode: '755' - name: Split CA Certificate Bundle into Cert Files shell: | diff --git a/roles/ssl/tasks/main.yml b/roles/ssl/tasks/main.yml index 135fc8404..2e6e791b3 100644 --- a/roles/ssl/tasks/main.yml +++ b/roles/ssl/tasks/main.yml @@ -28,7 +28,7 @@ path: "{{item}}" owner: "{{user}}" group: "{{group}}" - mode: 0640 + mode: '640' loop: - "{{keystore_path}}" - "{{truststore_path}}" @@ -39,7 +39,7 @@ path: "{{item}}" owner: "{{user}}" group: "{{group}}" - mode: 0640 + mode: '640' loop: - "{{ca_cert_path}}" - "{{cert_path}}" @@ -51,7 +51,7 @@ path: "{{item}}" owner: "{{user}}" group: "{{group}}" - mode: 0640 + mode: '640' loop: - "{{bcfks_keystore_path}}" - "{{bcfks_truststore_path}}" diff --git a/roles/ssl/tasks/manage_keystore_and_truststore.yml b/roles/ssl/tasks/manage_keystore_and_truststore.yml index 5c0b8fc8e..da33c8c67 100644 --- a/roles/ssl/tasks/manage_keystore_and_truststore.yml +++ b/roles/ssl/tasks/manage_keystore_and_truststore.yml @@ -32,7 +32,7 @@ file: path: "{{ ssl_file_dir_final }}/generation" state: directory - mode: 0755 + mode: '755' - name: Create Keystore and Truststore with Self Signed Certs include_tasks: self_signed_certs.yml diff --git a/roles/ssl/tasks/provided_keystore_and_truststore.yml b/roles/ssl/tasks/provided_keystore_and_truststore.yml index d50bc223a..d40e55b3d 100644 --- a/roles/ssl/tasks/provided_keystore_and_truststore.yml +++ b/roles/ssl/tasks/provided_keystore_and_truststore.yml @@ -5,7 +5,7 @@ dest: "{{truststore_path}}" owner: "{{user}}" group: "{{group}}" - mode: 0640 + mode: '640' when: not ( ssl_provided_keystore_and_truststore_remote_src|bool ) - name: Copy Host Keystore to Host if on control node @@ -14,7 +14,7 @@ dest: "{{keystore_path}}" owner: "{{user}}" group: "{{group}}" - mode: 0640 + mode: '640' when: not ( ssl_provided_keystore_and_truststore_remote_src|bool ) - name: Get stats and permissions of keystore diff --git a/roles/variables/defaults/main.yml b/roles/variables/defaults/main.yml index 237775c44..05e71c2bc 100644 --- a/roles/variables/defaults/main.yml +++ b/roles/variables/defaults/main.yml @@ -2,7 +2,7 @@ # Custom filters used in this file are defined in plugins/filter/filters.py ### Version of Confluent Platform to install -confluent_package_version: 7.4.0 +confluent_package_version: 7.5.3 confluent_full_package_version: "{{ confluent_package_version + '-1' }}" confluent_package_redhat_suffix: "{{ '-' + confluent_full_package_version if confluent_full_package_version != '' else ''}}" @@ -58,7 +58,7 @@ jmxexporter_jar_path: /opt/prometheus/jmx_prometheus_javaagent.jar ### Boolean to force update of Prometheus Exporter Agent Jar (must be set to true if jmxexporter_jar_path already exists) jmxexporter_jar_url_force: false -### Boolean to have cp-ansible configure components with FIPS security settings. Must have ssl_enabled: true and use Java 8 or 11. Only valid for self signed certs and ssl_custom_certs: true, not ssl_provided_keystore_and_truststore: true. +### Boolean to have cp-ansible configure components with FIPS security settings. Must have ssl_enabled: true. Only valid for self signed certs and ssl_custom_certs: true, not ssl_provided_keystore_and_truststore: true. Refer CP-Ansible docs for prerequisites. fips_enabled: false ### Boolean to configure ZK, Kafka Broker, Kafka Connect, and ksqlDB's logging with the RollingFileAppender and log cleanup functionality. Not necessary for other components. @@ -195,7 +195,7 @@ confluent_cli_base_path: /opt/confluent-cli confluent_cli_path: "/usr/local/bin/confluent" ### Confluent CLI version to download (e.g. "1.9.0"). Support matrix https://docs.confluent.io/platform/current/installation/versions-interoperability.html#confluent-cli -confluent_cli_version: 3.2.1 +confluent_cli_version: 3.30.1 ### Recommended replication factor, defaults to 3. When splitting your cluster across 2 DCs with 4 or more Brokers, this should be increased to 4 to balance topic replicas. default_internal_replication_factor: 3 @@ -316,7 +316,7 @@ zookeeper_ssl_mutual_auth_enabled: "{{ssl_mutual_auth_enabled}}" zookeeper_sasl_protocol: "{{sasl_protocol if sasl_protocol == 'kerberos' else 'none'}}" ### Authentication to put on ZK Server to Server connections. Available options: [mtls, digest, digest_over_tls]. -zookeeper_quorum_authentication_type: "{{ 'mtls' if zookeeper_ssl_enabled and zookeeper_ssl_mutual_auth_enabled else zookeeper_sasl_protocol }}" +zookeeper_quorum_authentication_type: "{% if zookeeper_ssl_enabled and zookeeper_ssl_mutual_auth_enabled %}mtls{% elif zookeeper_sasl_protocol == 'digest' %}digest{% else %}none{% endif %}" ### Authentication to put on ZK Client to Server connections. This is Kafka's connection to ZK. Available options: [mtls, digest, kerberos]. zookeeper_client_authentication_type: "{{ 'mtls' if zookeeper_ssl_enabled and zookeeper_ssl_mutual_auth_enabled else zookeeper_sasl_protocol }}" @@ -404,7 +404,7 @@ zookeeper_peer_port: 2888 ### Zookeeper leader port zookeeper_leader_port: 3888 -### Use to copy files from control node to zookeeper hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +### Use to copy files from control node to zookeeper hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. zookeeper_copy_files: [] # User provided properties, merged into the final properties dictionary with precedence @@ -438,7 +438,7 @@ kafka_controller_ssl_enabled: "{{ssl_enabled}}" ### Boolean to enable mTLS Authentication on controller (Server to Server and Client to Server). Configures kafka to authenticate with mTLS. kafka_controller_ssl_mutual_auth_enabled: "{{ssl_mutual_auth_enabled}}" -### SASL Mechanism for controller Server to Server and Server to Client Authentication. Options are none, kerberos, digest. Server to server auth only working for digest-md5 +### SASL Mechanism for controller Server to Server and Server to Client Authentication. Options are plain, kerberos, none kafka_controller_sasl_protocol: "{{sasl_protocol}}" # Uses custom filter to create a list of all sasl_protocols, removes ['none'], and reduces to unique items @@ -519,7 +519,7 @@ kafka_controller_jmxexporter_config_source_path: kafka.yml.j2 ### Destination path for Kafka controller jmx config file kafka_controller_jmxexporter_config_path: /opt/prometheus/kafka.yml -### Use to copy files from control node to kafka hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +### Use to copy files from control node to kafka hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. kafka_controller_copy_files: [] ### Replication Factor for internal topics. Defaults to the minimum of the number of controllers and can be overridden via default replication factor (see default_internal_replication_factor). @@ -534,7 +534,7 @@ kafka_controller_custom_properties: {} ### Use to add custom properties to variable kafka_controller_client_properties. This variable is a dictionary. Put values true/false in quotation marks to perserve case. kafka_controller_custom_client_properties: {} -### Boolean to enable the embedded rest proxy within Kafka. NOTE- Embedded Rest Proxy must be enabled if RBAC is enabled and Confluent Server must be enabled +### Boolean to enable the embedded rest proxy within Kraft Controller. Not yet supported. kafka_controller_rest_proxy_enabled: false ### Use to register and identify your Kafka cluster in the MDS. @@ -670,7 +670,7 @@ kafka_broker_jmxexporter_config_source_path: kafka.yml.j2 ### Destination path for Kafka Broker jmx config file kafka_broker_jmxexporter_config_path: /opt/prometheus/kafka.yml -### Use to copy files from control node to kafka hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +### Use to copy files from control node to kafka hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. kafka_broker_copy_files: [] ### Replication Factor for internal topics. Defaults to the minimum of the number of brokers and can be overridden via default replication factor (see default_internal_replication_factor). @@ -785,7 +785,7 @@ schema_registry_jmxexporter_config_path: /opt/prometheus/schema_registry.yml ### Port to expose prometheus metrics. Beware of port collisions if colocating components on same host schema_registry_jmxexporter_port: 8078 -### Use to copy files from control node to schema registry hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +### Use to copy files from control node to schema registry hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. schema_registry_copy_files: [] ### Use to set custom schema registry properties. This variable is a dictionary. Put values true/false in quotation marks to perserve case. NOTE- kafka_broker.properties is deprecated. @@ -882,7 +882,7 @@ kafka_rest_jmxexporter_config_path: /opt/prometheus/kafka_rest.yml ### Port to expose prometheus metrics. Beware of port collisions if colocating components on same host kafka_rest_jmxexporter_port: 8075 -### Use to copy files from control node to schema registry hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +### Use to copy files from control node to schema registry hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. kafka_rest_copy_files: [] ### Use to set custom Rest Proxy properties. This variable is a dictionary. Put values true/false in quotation marks to perserve case. NOTE- kafka_rest.properties is deprecated. @@ -997,7 +997,7 @@ kafka_connect_jmxexporter_config_path: /opt/prometheus/kafka_connect.yml ### Port to expose connect prometheus metrics. Beware of port collisions if colocating components on same host kafka_connect_jmxexporter_port: 8077 -### Use to copy files from control node to connect hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +### Use to copy files from control node to connect hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. kafka_connect_copy_files: [] ### Connect Service Group Id. Customize when configuring multiple connect clusters in same inventory @@ -1125,7 +1125,7 @@ ksql_jmxexporter_config_path: /opt/prometheus/ksql.yml ### Port to expose ksqlDB prometheus metrics. Beware of port collisions if colocating components on same host ksql_jmxexporter_port: 8076 -### Use to copy files from control node to ksqlDB hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +### Use to copy files from control node to ksqlDB hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. ksql_copy_files: [] ### Replication Factor for ksqlDB internal topics. Defaults to the minimum of the number of brokers and can be overridden via default replication factor (see default_internal_replication_factor). @@ -1204,7 +1204,7 @@ control_center_export_certs: "{{ssl_mutual_auth_enabled}}" control_center_keytab_path: /etc/security/keytabs/control_center.keytab control_center_kafka_listener_name: internal -### Use to copy files from control node to Control Center hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '0750') and file_mode (default: '0640') to set directory and file permissions. +### Use to copy files from control node to Control Center hosts. Set to list of dictionaries with keys: source_path (full path of file on control node) and destination_path (full path to copy file to). Optionally specify directory_mode (default: '750') and file_mode (default: '640') to set directory and file permissions. control_center_copy_files: [] ### Replication Factor for Control Center internal topics. Defaults to the minimum of the number of brokers and can be overridden via default replication factor (see default_internal_replication_factor). @@ -1407,12 +1407,48 @@ mds_acls_enabled: "{{rbac_enabled}}" rbac_enabled_public_pem_path: "{% if ssl_provided_keystore_and_truststore_remote_src %}{{token_services_public_pem_file}}{% else %}{{ ssl_file_dir_final }}/public.pem{% endif %}" rbac_enabled_private_pem_path: "{% if ssl_provided_keystore_and_truststore_remote_src %}{{token_services_private_pem_file}}{% else %}{{ ssl_file_dir_final }}/tokenKeypair.pem{% endif %}" +### SSO mode for C3. Possible values: oidc, not supported in ccs. If enabling oidc you must set sso_groups_claim, sso_sub_claim, sso_jwks_uri, sso_authorize_uri, sso_token_uri, sso_issuer_url, sso_client_id, sso_client_password in MDS +sso_mode: none + +### Groups in JWT +sso_groups_claim: groups + +### Sub in JWT +sso_sub_claim: sub + +### The issuer url, which is typically the authorization server's URL. This value is used to compare to issuer claim in the JWT token for verification +sso_issuer_url: none + +### JSON Web Key Set (JWKS) URI +sso_jwks_uri: none + +### Endpoint for an OAuth authorization request +sso_authorize_uri: none + +### IdP token endpoint, from where a token is requested by MDS +sso_token_uri: none + +### Client id for authorize and token request to Idp +sso_client_id: none + +### Client password for authorize and token request to Idp +sso_client_password: none + +### If any additional scope is needed to include groups in the token, this config is optional based on Idp. Possible values: groups,openid,offline_access etc. +sso_groups_scope: none + +### Configures whether offline_access scope would be requested in the authorization URI, Set this to false if offline tokens are not allowed for the user or client in IdP +sso_refresh_token: true + ### LDAP User which will be granted super user permissions to create role bindings in the MDS mds_super_user: mds ### Password to mds_super_user LDAP User mds_super_user_password: password +### Parameter to increase the number of retries for MDS API requests +mds_retries: 30 + ### LDAP User for Kafkas Embedded Rest Service to authenticate as kafka_broker_ldap_user: "{{mds_super_user}}" @@ -1528,7 +1564,7 @@ control_center_additional_system_admins: "{{rbac_component_additional_system_adm # Secrets Protection Variables -### Boolean to enable secrets protection on all components except Zookeeper. Starting from CP 7.1.0, secrets protection will work only with RBAC +### Boolean to enable secrets protection on all components except Zookeeper. secrets_protection_enabled: false ### Boolean to Recreate Secrets File and Masterkey. Only set to false AFTER first cp-ansible run. diff --git a/roles/variables/vars/main.yml b/roles/variables/vars/main.yml index fb244a4f4..8438f675b 100644 --- a/roles/variables/vars/main.yml +++ b/roles/variables/vars/main.yml @@ -1,5 +1,5 @@ --- -confluent_ansible_branch: 7.4.0-post +confluent_ansible_branch: 7.5.3-post systemd_base_dir: "{{'/lib/systemd/system' if ansible_os_family == 'Debian' else '/usr/lib/systemd/system'}}" @@ -188,7 +188,7 @@ kafka_controller_properties: ssl.keystore.location: "{{kafka_controller_keystore_path}}" ssl.keystore.password: "{{kafka_controller_keystore_storepass}}" ssl.key.password: "{{kafka_controller_keystore_storepass}}" - ssl.enabled.protocols: TLSv1.2 + ssl.enabled.protocols: TLSv1.2,TLSv1.3 sasl_enabled: enabled: "{{ kafka_controller_sasl_enabled_mechanisms|length > 0 }}" properties: @@ -410,7 +410,7 @@ kafka_broker_properties: ssl.keystore.location: "{{kafka_broker_keystore_path}}" ssl.keystore.password: "{{kafka_broker_keystore_storepass}}" ssl.key.password: "{{kafka_broker_keystore_storepass}}" - ssl.enabled.protocols: TLSv1.2 + ssl.enabled.protocols: TLSv1.2,TLSv1.3 inter_broker_sasl: enabled: "{{ kafka_broker_listeners[kafka_broker_inter_broker_listener_name]['sasl_protocol'] | default(sasl_protocol) | confluent.platform.normalize_sasl_protocol != 'none' }}" properties: @@ -504,6 +504,25 @@ kafka_broker_properties: confluent.metadata.server.ssl.keystore.type: BCFKS confluent.metadata.server.ssl.truststore.type: BCFKS confluent.metadata.server.http2.enabled: false + rbac_mds_sso: + enabled: "{{ rbac_enabled and not external_mds_enabled and sso_mode != 'none' }}" + properties: + confluent.metadata.server.sso.mode: "{{ sso_mode }}" + confluent.oidc.idp.groups.claim.name: "{{ sso_groups_claim }}" + confluent.oidc.idp.sub.claim.name: "{{ sso_sub_claim }}" + confluent.oidc.idp.issuer: "{{ sso_issuer_url }}" + confluent.oidc.idp.jwks.endpoint.uri: "{{ sso_jwks_uri }}" + confluent.oidc.idp.authorize.base.endpoint.uri: "{{ sso_authorize_uri }}" + confluent.oidc.idp.token.base.endpoint.uri: "{{ sso_token_uri }}" + confluent.oidc.idp.client.id: "{{ sso_client_id }}" + confluent.oidc.idp.client.secret: "{{ sso_client_password }}" + confluent.oidc.idp.refresh.token.enabled: "{{ sso_refresh_token }}" + confluent.oidc.session.token.expiry.ms: 900000 + confluent.oidc.session.max.timeout.ms: 21600000 + rbac_mds_sso_scope: + enabled: "{{ rbac_enabled and not external_mds_enabled and sso_mode != 'none' and sso_groups_scope != 'none' }}" + properties: + confluent.oidc.idp.groups.claim.scope: "{{ sso_groups_scope }}" rbac_external_mds: enabled: "{{rbac_enabled and external_mds_enabled}}" properties: @@ -1064,7 +1083,7 @@ ksql_properties: sasl.jaas.config: |- org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required username="{{ksql_ldap_user | default('ksql')}}" password="{{ksql_ldap_password | default('pass')}}" metadataServerUrls="{{mds_bootstrap_server_urls}}"; sr: - enabled: "{{ 'schema_registry' in groups }}" + enabled: "{{ 'schema_registry' in groups or ccloud_schema_registry_enabled|bool }}" properties: ksql.schema.registry.url: "{{schema_registry_url}}" sr_ssl: @@ -1218,7 +1237,7 @@ kafka_rest_properties: client.config.providers: securepass client.config.providers.securepass.class: io.confluent.kafka.security.config.provider.SecurePassConfigProvider sr: - enabled: "{{ 'schema_registry' in groups }}" + enabled: "{{ 'schema_registry' in groups or ccloud_schema_registry_enabled|bool }}" properties: schema.registry.url: "{{schema_registry_url}}" sr_ssl: @@ -1318,7 +1337,7 @@ control_center_properties: defaults: enabled: true properties: - confluent.controlcenter.streams.num.stream.threads: 8 + confluent.controlcenter.streams.num.stream.threads: 12 confluent.controlcenter.data.dir: /var/lib/confluent/control-center confluent.controlcenter.internal.topics.replication: "{{control_center_default_internal_replication_factor}}" confluent.metrics.topic.replication: "{{control_center_default_internal_replication_factor}}" @@ -1362,7 +1381,7 @@ control_center_properties: kerberos_kafka_broker_primary, control_center_keytab_path, control_center_kerberos_principal|default('c3'), false, control_center_ldap_user, control_center_ldap_password, mds_bootstrap_server_urls) }}" sr: - enabled: "{{ 'schema_registry' in groups }}" + enabled: "{{ 'schema_registry' in groups or ccloud_schema_registry_enabled|bool }}" properties: confluent.controlcenter.schema.registry.url: "{{schema_registry_url}}" sr_ssl: @@ -1401,6 +1420,10 @@ control_center_properties: properties: confluent.controlcenter.streams.ssl.truststore.location: "{{control_center_truststore_path}}" confluent.controlcenter.streams.ssl.truststore.password: "{{control_center_truststore_storepass}}" + rbac_mds_sso: + enabled: "{{ rbac_enabled and sso_mode != 'none' }}" + properties: + confluent.controlcenter.auth.sso.mode: "{{ sso_mode }}" mds_client: enabled: "{{rbac_enabled and mds_tls_enabled }}" properties: diff --git a/roles/zookeeper/defaults/main.yml b/roles/zookeeper/defaults/main.yml index a9d8c81c1..f259ac525 100644 --- a/roles/zookeeper/defaults/main.yml +++ b/roles/zookeeper/defaults/main.yml @@ -19,7 +19,7 @@ zookeeper_logredactor_logger_specs_list: zookeeper_java_args: - "{% if zookeeper_ssl_enabled|bool %}-Djdk.tls.ephemeralDHKeySize=2048{% endif %}" - - "{% if zookeeper_client_authentication_type in ['kerberos', 'digest'] or zookeeper_quorum_authentication_type in ['kerberos', 'digest', 'digest_over_tls'] %}-Djava.security.auth.login.config={{zookeeper.jaas_file}}{% endif %}" + - "{% if zookeeper_client_authentication_type in ['kerberos', 'digest'] or zookeeper_quorum_authentication_type in ['digest', 'digest_over_tls'] %}-Djava.security.auth.login.config={{zookeeper.jaas_file}}{% endif %}" - "{% if zookeeper_jolokia_enabled|bool %}-javaagent:{{jolokia_jar_path}}=config={{zookeeper_jolokia_config}}{% endif %}" - "{% if zookeeper_jmxexporter_enabled|bool %}-javaagent:{{jmxexporter_jar_path}}={{zookeeper_jmxexporter_port}}:{{zookeeper_jmxexporter_config_path}}{% endif %}" - "{% if kerberos_client_config_file_dest != '/etc/krb5.conf' %}-Djava.security.krb5.conf={{kerberos_client_config_file_dest}}{% endif %}" diff --git a/roles/zookeeper/tasks/main.yml b/roles/zookeeper/tasks/main.yml index adcc09845..13862363b 100644 --- a/roles/zookeeper/tasks/main.yml +++ b/roles/zookeeper/tasks/main.yml @@ -90,7 +90,7 @@ src: "{{binary_base_path}}/lib/systemd/system/{{zookeeper.systemd_file|basename}}" remote_src: true dest: "{{zookeeper.systemd_file}}" - mode: 0644 + mode: '644' force: true when: installation_method == "archive" tags: @@ -144,7 +144,7 @@ owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" state: directory - mode: 0750 + mode: '750' tags: - filesystem @@ -163,7 +163,7 @@ owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" state: directory - mode: 0750 + mode: '750' when: zookeeper_final_properties.dataLogDir is defined tags: - filesystem @@ -182,7 +182,7 @@ template: src: myid.j2 dest: "{{zookeeper_final_properties.dataDir}}/myid" - mode: 0640 + mode: '640' owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" tags: @@ -192,7 +192,7 @@ file: path: "{{ zookeeper.config_file | dirname }}" state: directory - mode: 0750 + mode: '750' owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" tags: @@ -202,7 +202,7 @@ template: src: zookeeper.properties.j2 dest: "{{zookeeper.config_file}}" - mode: 0640 + mode: '640' owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" register: zookeeper_config @@ -217,7 +217,7 @@ state: directory group: "{{zookeeper_group}}" owner: "{{zookeeper_user}}" - mode: 0770 + mode: '770' tags: - filesystem - log @@ -228,7 +228,7 @@ owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" state: directory - mode: 0750 + mode: '750' tags: - filesystem - log @@ -237,7 +237,7 @@ template: src: zookeeper_log4j.properties.j2 dest: "{{zookeeper.log4j_file}}" - mode: 0640 + mode: '640' owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" when: zookeeper_custom_log4j|bool @@ -250,7 +250,7 @@ file: path: "{{ logredactor_rule_path | dirname }}" state: directory - mode: 0755 + mode: '755' when: (zookeeper_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -259,7 +259,7 @@ copy: src: "{{ logredactor_rule_path_local }}" dest: "{{ logredactor_rule_path }}" - mode: 0644 + mode: '644' when: (zookeeper_custom_log4j|bool) and (logredactor_enabled|bool) and (logredactor_rule_url == '') tags: - log @@ -289,7 +289,7 @@ template: src: zookeeper_jolokia.properties.j2 dest: "{{zookeeper_jolokia_config}}" - mode: 0640 + mode: '640' owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" when: zookeeper_jolokia_enabled|bool @@ -302,10 +302,10 @@ template: src: zookeeper_jaas.conf.j2 dest: "{{zookeeper.jaas_file}}" - mode: 0640 + mode: '640' owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" - when: zookeeper_client_authentication_type in ['kerberos', 'digest'] or zookeeper_quorum_authentication_type in ['kerberos', 'digest', 'digest_over_tls'] + when: zookeeper_client_authentication_type in ['kerberos', 'digest'] or zookeeper_quorum_authentication_type in ['digest', 'digest_over_tls'] notify: restart zookeeper diff: "{{ not mask_sensitive_diff|bool }}" tags: @@ -315,7 +315,7 @@ copy: src: "{{zookeeper_jmxexporter_config_source_path}}" dest: "{{zookeeper_jmxexporter_config_path}}" - mode: 0640 + mode: '640' owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" when: zookeeper_jmxexporter_enabled|bool @@ -328,7 +328,7 @@ owner: "{{zookeeper_user}}" group: "{{zookeeper_group}}" state: directory - mode: 0640 + mode: '640' tags: - systemd - privileged @@ -337,7 +337,7 @@ template: src: override.conf.j2 dest: "{{zookeeper.systemd_override}}" - mode: 0640 + mode: '640' owner: root group: root notify: restart zookeeper diff --git a/test_roles/confluent.test.kerberos/tasks/main.yml b/test_roles/confluent.test.kerberos/tasks/main.yml index 6d4223d68..d8a36d5c7 100644 --- a/test_roles/confluent.test.kerberos/tasks/main.yml +++ b/test_roles/confluent.test.kerberos/tasks/main.yml @@ -9,7 +9,7 @@ file: path: /tmp/keytabs/ state: directory - mode: 0755 + mode: '755' - name: "Add Principal: {{item.principal}}" shell: "kadmin.local -q 'addprinc -randkey {{item.principal}}'" diff --git a/test_roles/confluent.test.ldap/tasks/tls.yml b/test_roles/confluent.test.ldap/tasks/tls.yml index ad8ac8ca3..e4b20b7f3 100644 --- a/test_roles/confluent.test.ldap/tasks/tls.yml +++ b/test_roles/confluent.test.ldap/tasks/tls.yml @@ -3,7 +3,7 @@ file: path: /var/ssl/private/ldaps/ state: directory - mode: 0755 + mode: '755' when: - name: Generate Self Signed Certificates diff --git a/test_roles/confluent.test.ldap/tasks/tls_custom_certs.yml b/test_roles/confluent.test.ldap/tasks/tls_custom_certs.yml index 41110781f..81bb8559b 100644 --- a/test_roles/confluent.test.ldap/tasks/tls_custom_certs.yml +++ b/test_roles/confluent.test.ldap/tasks/tls_custom_certs.yml @@ -3,16 +3,16 @@ copy: src: "{{ssl_ca_cert_filepath}}" dest: "/var/ssl/private/ldaps/{{ssl_ca_cert_filepath|basename}}" - mode: 0666 + mode: '666' - name: Copy Signed Cert to Host copy: src: "{{ssl_signed_cert_filepath}}" dest: "/var/ssl/private/ldaps/{{ssl_signed_cert_filepath|basename}}" - mode: 0666 + mode: '666' - name: Copy Key to Host copy: src: "{{ssl_key_filepath}}" dest: "/var/ssl/private/ldaps/{{ssl_key_filepath|basename}}" - mode: 0666 + mode: '666' diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt index 79a181d7e..fdb060b38 100644 --- a/tests/sanity/ignore-2.11.txt +++ b/tests/sanity/ignore-2.11.txt @@ -3,8 +3,8 @@ plugins/modules/kafka_connectors.py pylint:ansible-format-automatic-specificatio plugins/modules/kafka_connectors.py validate-modules:missing-gplv3-license molecule/certs-create.sh shebang molecule/certs-create.sh shellcheck!skip -molecule/mtls-custombundle-rhel/create_ca_bundle.sh shebang -molecule/mtls-custombundle-rhel/create_ca_bundle.sh shellcheck!skip +molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh shebang +molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh shellcheck!skip test_roles/confluent.test.kerberos/files/create_admin.sh shebang test_roles/confluent.test.kerberos/files/create_db.sh shebang discovery/utils/utils.py pylint!skip @@ -15,6 +15,8 @@ discovery/service/schema_registry.py pylint!skip discovery/service/zookeeper.py pylint!skip discovery/system/system.py pylint!skip discovery/service/service.py pylint!skip +discovery/service/control_center.py pylint!skip +discovery/service/kafka_connect.py pylint!skip discovery/utils/utils.py pep8!skip discovery/manager/manager.py pep8!skip discovery/utils/inventory.py pep8!skip @@ -23,3 +25,4 @@ discovery/service/schema_registry.py pep8!skip discovery/service/zookeeper.py pep8!skip discovery/system/system.py pep8!skip discovery/service/service.py pep8!skip +discovery/service/control_center.py pep8!skip diff --git a/tests/sanity/ignore-2.12.txt b/tests/sanity/ignore-2.12.txt index 79a181d7e..fdb060b38 100644 --- a/tests/sanity/ignore-2.12.txt +++ b/tests/sanity/ignore-2.12.txt @@ -3,8 +3,8 @@ plugins/modules/kafka_connectors.py pylint:ansible-format-automatic-specificatio plugins/modules/kafka_connectors.py validate-modules:missing-gplv3-license molecule/certs-create.sh shebang molecule/certs-create.sh shellcheck!skip -molecule/mtls-custombundle-rhel/create_ca_bundle.sh shebang -molecule/mtls-custombundle-rhel/create_ca_bundle.sh shellcheck!skip +molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh shebang +molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh shellcheck!skip test_roles/confluent.test.kerberos/files/create_admin.sh shebang test_roles/confluent.test.kerberos/files/create_db.sh shebang discovery/utils/utils.py pylint!skip @@ -15,6 +15,8 @@ discovery/service/schema_registry.py pylint!skip discovery/service/zookeeper.py pylint!skip discovery/system/system.py pylint!skip discovery/service/service.py pylint!skip +discovery/service/control_center.py pylint!skip +discovery/service/kafka_connect.py pylint!skip discovery/utils/utils.py pep8!skip discovery/manager/manager.py pep8!skip discovery/utils/inventory.py pep8!skip @@ -23,3 +25,4 @@ discovery/service/schema_registry.py pep8!skip discovery/service/zookeeper.py pep8!skip discovery/system/system.py pep8!skip discovery/service/service.py pep8!skip +discovery/service/control_center.py pep8!skip diff --git a/tests/sanity/ignore-2.13.txt b/tests/sanity/ignore-2.13.txt index 53f7c828a..b60e2487d 100644 --- a/tests/sanity/ignore-2.13.txt +++ b/tests/sanity/ignore-2.13.txt @@ -3,8 +3,8 @@ plugins/modules/kafka_connectors.py pylint:ansible-format-automatic-specificatio plugins/modules/kafka_connectors.py validate-modules:missing-gplv3-license molecule/certs-create.sh shebang molecule/certs-create.sh shellcheck!skip -molecule/mtls-custombundle-rhel/create_ca_bundle.sh shebang -molecule/mtls-custombundle-rhel/create_ca_bundle.sh shellcheck!skip +molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh shebang +molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh shellcheck!skip test_roles/confluent.test.kerberos/files/create_admin.sh shebang test_roles/confluent.test.kerberos/files/create_db.sh shebang discovery/utils/utils.py pylint!skip @@ -15,6 +15,8 @@ discovery/service/schema_registry.py pylint!skip discovery/service/zookeeper.py pylint!skip discovery/system/system.py pylint!skip discovery/service/service.py pylint!skip +discovery/service/control_center.py pylint!skip +discovery/service/kafka_connect.py pylint!skip discovery/utils/utils.py pep8!skip discovery/manager/manager.py pep8!skip discovery/utils/inventory.py pep8!skip @@ -23,4 +25,4 @@ discovery/service/schema_registry.py pep8!skip discovery/service/zookeeper.py pep8!skip discovery/system/system.py pep8!skip discovery/service/service.py pep8!skip - +discovery/service/control_center.py pep8!skip diff --git a/tests/sanity/ignore-2.14.txt b/tests/sanity/ignore-2.14.txt index 12bfa1461..b60e2487d 100644 --- a/tests/sanity/ignore-2.14.txt +++ b/tests/sanity/ignore-2.14.txt @@ -3,8 +3,8 @@ plugins/modules/kafka_connectors.py pylint:ansible-format-automatic-specificatio plugins/modules/kafka_connectors.py validate-modules:missing-gplv3-license molecule/certs-create.sh shebang molecule/certs-create.sh shellcheck!skip -molecule/mtls-custombundle-rhel/create_ca_bundle.sh shebang -molecule/mtls-custombundle-rhel/create_ca_bundle.sh shellcheck!skip +molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh shebang +molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh shellcheck!skip test_roles/confluent.test.kerberos/files/create_admin.sh shebang test_roles/confluent.test.kerberos/files/create_db.sh shebang discovery/utils/utils.py pylint!skip @@ -15,6 +15,8 @@ discovery/service/schema_registry.py pylint!skip discovery/service/zookeeper.py pylint!skip discovery/system/system.py pylint!skip discovery/service/service.py pylint!skip +discovery/service/control_center.py pylint!skip +discovery/service/kafka_connect.py pylint!skip discovery/utils/utils.py pep8!skip discovery/manager/manager.py pep8!skip discovery/utils/inventory.py pep8!skip @@ -23,3 +25,4 @@ discovery/service/schema_registry.py pep8!skip discovery/service/zookeeper.py pep8!skip discovery/system/system.py pep8!skip discovery/service/service.py pep8!skip +discovery/service/control_center.py pep8!skip diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt index 79a181d7e..fdb060b38 100644 --- a/tests/sanity/ignore-2.9.txt +++ b/tests/sanity/ignore-2.9.txt @@ -3,8 +3,8 @@ plugins/modules/kafka_connectors.py pylint:ansible-format-automatic-specificatio plugins/modules/kafka_connectors.py validate-modules:missing-gplv3-license molecule/certs-create.sh shebang molecule/certs-create.sh shellcheck!skip -molecule/mtls-custombundle-rhel/create_ca_bundle.sh shebang -molecule/mtls-custombundle-rhel/create_ca_bundle.sh shellcheck!skip +molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh shebang +molecule/mtls-custombundle-rhel-fips/create_ca_bundle.sh shellcheck!skip test_roles/confluent.test.kerberos/files/create_admin.sh shebang test_roles/confluent.test.kerberos/files/create_db.sh shebang discovery/utils/utils.py pylint!skip @@ -15,6 +15,8 @@ discovery/service/schema_registry.py pylint!skip discovery/service/zookeeper.py pylint!skip discovery/system/system.py pylint!skip discovery/service/service.py pylint!skip +discovery/service/control_center.py pylint!skip +discovery/service/kafka_connect.py pylint!skip discovery/utils/utils.py pep8!skip discovery/manager/manager.py pep8!skip discovery/utils/inventory.py pep8!skip @@ -23,3 +25,4 @@ discovery/service/schema_registry.py pep8!skip discovery/service/zookeeper.py pep8!skip discovery/system/system.py pep8!skip discovery/service/service.py pep8!skip +discovery/service/control_center.py pep8!skip