forked from hortonworks/ansible-hortonworks
-
Notifications
You must be signed in to change notification settings - Fork 0
/
all
231 lines (180 loc) · 11.2 KB
/
all
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
---
###########################
## cluster configuration ##
###########################
cluster_name: 'mytestcluster'
ambari_version: '2.7.4.0' # must be the 4-part full version number
hdp_version: '3.1.4.0' # must be the 4-part full version number
hdp_build_number: 'auto' # the HDP build number from docs.hortonworks.com (if set to 'auto', Ansible will try to get it from the repository)
hdf_version: '3.4.1.1' # must be the 4-part full version number
hdf_build_number: 'auto' # the HDF build number from docs.hortonworks.com (if set to 'auto', Ansible will try to get it from the repository)
hdpsearch_version: '4.0.0' # must be the full version number
hdpsearch_build_number: '400' # the HDP Search build number from docs.hortonworks.com
repo_base_url: 'http://public-repo-1.hortonworks.com' # change this if using a Local Repository
###########################
## general configuration ##
###########################
external_dns: yes # set to yes to use the existing DNS (when no, it will update the /etc/hosts file - must be set to 'no' when using Azure)
disable_firewall: no # set to yes to disable the existing local firewall service (iptables, firewalld, ufw)
timezone: UTC
########################
## java configuration ##
########################
java: 'embedded' # can be set to 'embedded', 'openjdk' or 'oraclejdk'
oraclejdk_options: # only used when java is set to 'oraclejdk'
base_folder: '/usr/java' # the folder where the Java package should be unpacked to
tarball_location: '/tmp/jdk-8u181-linux-x64.tar.gz' # the location of the tarball on the remote system or on the Ansible controller
jce_location: '/tmp/jce_policy-8.zip' # the location of the JCE package on the remote system or on the Ansible controller
remote_files: no # set to yes to indicate the files are already on the remote systems, otherwise they will be copied by Ansible from the Ansible controller
############################
## database configuration ##
############################
database: 'embedded' # can be set to 'embedded', 'postgres', 'mysql' or 'mariadb'
database_options:
add_repo: yes # if set to 'no', Ansible will not add a repo file (containing the public URL)
external_hostname: '' # if this is empty, Ansible will install and prepare the databases on the ambari-server node
ambari_db_name: 'ambari'
ambari_db_username: 'ambari'
ambari_db_password: 'bigdata'
hive_db_name: 'hive'
hive_db_username: 'hive'
hive_db_password: 'hive'
oozie_db_name: 'oozie'
oozie_db_username: 'oozie'
oozie_db_password: 'oozie'
druid_db_name: 'druid'
druid_db_username: 'druid'
druid_db_password: 'druid'
superset_db_name: 'superset'
superset_db_username: 'superset'
superset_db_password: 'superset'
rangeradmin_db_name: 'ranger'
rangeradmin_db_username: 'ranger'
rangeradmin_db_password: 'ranger'
rangerkms_db_name: 'rangerkms'
rangerkms_db_username: 'rangerkms'
rangerkms_db_password: 'rangerkms'
registry_db_name: 'registry'
registry_db_username: 'registry'
registry_db_password: 'registry'
streamline_db_name: 'streamline'
streamline_db_username: 'streamline'
streamline_db_password: 'streamline'
#####################################
## kerberos security configuration ## # useful if blueprint is dynamic, but can also be used to deploy the MIT KDC
#####################################
security: 'none' # can be set to 'none', 'mit-kdc', 'ipa' or 'active-directory'
security_options:
external_hostname: '' # if this is empty, Ansible will install and prepare the MIT KDC on the Ambari node
realm: 'EXAMPLE.COM'
admin_principal: 'admin' # the Kerberos principal that has the permissions to create new users (don't append the realm)
admin_password: "{{ default_password }}"
kdc_master_key: "{{ default_password }}" # only used when security is set to 'mit-kdc'
ldap_url: 'ldaps://ad.example.com:636' # only used when security is set to 'active-directory'
container_dn: 'OU=hadoop,DC=example,DC=com' # only used when security is set to 'active-directory'
http_authentication: yes # set to yes to enable HTTP authentication (SPNEGO)
manage_krb5_conf: yes # set to no if using FreeIPA/IdM
##########################
## ranger configuration ## # only useful if blueprint is dynamic
##########################
ranger_options: # only used if RANGER_ADMIN is part of the blueprint stack
enable_plugins: yes # set to 'yes' if the plugins should be enabled for all of the installed services
ranger_security_options: # only used if RANGER_ADMIN is part of the blueprint stack
ranger_admin_password: "{{ default_password }}" # the password for the Ranger admin users (both admin and amb_ranger_admin)
ranger_keyadmin_password: "{{ default_password }}" # the password for the Ranger keyadmin user (will only be set in HDP3, in HDP2 it will remain the default keyadmin)
kms_master_key_password: "{{ default_password }}" # password used for encrypting the Master Key
##################################
## other security configuration ## # only useful if blueprint is dynamic
##################################
ambari_admin_password: 'admin' # the password for the Ambari admin user
default_password: 'AsdQwe123456' # a default password for all required passwords which are not specified in the blueprint,
# dependency: NiFi password needs to be at least 12 characters
atlas_security_options:
admin_password: "{{ default_password }}" # the password for the Atlas admin user
knox_security_options:
master_secret: "{{ default_password }}" # Knox Master Secret
nifi_security_options:
encrypt_password: "{{ default_password }}" # the password used to encrypt raw configuration values
sensitive_props_key: "{{ default_password }}" # the password used to encrypt any sensitive property values that are configured in processors
superset_security_options:
secret_key: "{{ default_password }}"
admin_password: "{{ default_password }}" # the password for the Superset admin user
smartsense_security_options:
admin_password: "{{ default_password }}" # password for the Activity Explorer's Zeppelin admin user
logsearch_security_options:
admin_password: "{{ default_password }}" # the password for the Logsearch admin user
accumulo_security_options:
root_password: "{{ default_password }}" # the password for the Accumulo root user
instance_secret: "{{ default_password }}" # a secret unique to a given instance
trace_user: "trace" # user that the tracer process uses to write tracing data to Accumulo
trace_password: "{{ default_password }}" # password for the trace user
##########################
## ambari configuration ##
##########################
ambari_admin_user: 'admin'
ambari_admin_default_password: 'admin' # no need to change this (unless the Ambari default changes)
config_recommendation_strategy: 'NEVER_APPLY' # choose between 'NEVER_APPLY', 'ONLY_STACK_DEFAULTS_APPLY', 'ALWAYS_APPLY', 'ALWAYS_APPLY_DONT_OVERRIDE_CUSTOM_VALUES'
smartsense: # Hortonworks subscription details (can be left empty if there is no subscription)
id: ''
account_name: ''
customer_email: ''
wait: true # wait for the cluster to finish installing
wait_timeout: 3600 # 60 minutes
accept_gpl: yes # set to yes to allow Ambari to install GPL licensed libraries
cluster_template_file: 'cluster_template.j2' # the cluster creation template file
###########################
## folders configuration ##
###########################
base_log_dir: '/var/log'
base_tmp_dir: '/tmp'
#############################
## blueprint configuration ##
#############################
blueprint_name: '{{ cluster_name }}_blueprint' # the name of the blueprint as it will be stored in Ambari
blueprint_file: 'blueprint_dynamic.j2' # the blueprint JSON file - 'blueprint_dynamic.j2' is a Jinja2 template that generates the required JSON
blueprint_dynamic: # properties for the dynamic blueprint - these are only used by the 'blueprint_dynamic.j2' template to generate the JSON
- host_group: "hdp-master"
clients: ['ZOOKEEPER_CLIENT', 'HDFS_CLIENT', 'YARN_CLIENT', 'MAPREDUCE2_CLIENT', 'TEZ_CLIENT', 'PIG', 'SQOOP', 'HIVE_CLIENT', 'OOZIE_CLIENT', 'INFRA_SOLR_CLIENT', 'SPARK2_CLIENT']
services:
- ZOOKEEPER_SERVER
- NAMENODE
- SECONDARY_NAMENODE
- RESOURCEMANAGER
- APP_TIMELINE_SERVER
- YARN_REGISTRY_DNS
- TIMELINE_READER
- HISTORYSERVER
- SPARK2_JOBHISTORYSERVER
- HIVE_SERVER
- HIVE_METASTORE
- OOZIE_SERVER
- KNOX_GATEWAY
- AMBARI_SERVER
- INFRA_SOLR
- METRICS_COLLECTOR
- METRICS_GRAFANA
- METRICS_MONITOR
- HST_SERVER
- ACTIVITY_ANALYZER
- ACTIVITY_EXPLORER
- HST_AGENT
- host_group: "hdp-slave"
clients: ['ZOOKEEPER_CLIENT', 'HDFS_CLIENT', 'YARN_CLIENT', 'MAPREDUCE2_CLIENT', 'TEZ_CLIENT', 'PIG', 'SQOOP', 'HIVE_CLIENT', 'OOZIE_CLIENT', 'INFRA_SOLR_CLIENT', 'SPARK2_CLIENT']
services:
- DATANODE
- NODEMANAGER
- METRICS_MONITOR
- HST_AGENT
############################
## helper variables ## # don't change these unless there is a good reason
############################
hdp_minor_version: "{{ hdp_version | regex_replace('.[0-9]+.[0-9]+[0-9_-]*$','') }}"
hdp_major_version: "{{ hdp_minor_version.split('.').0 }}"
hdf_minor_version: "{{ hdf_version | regex_replace('.[0-9]+.[0-9]+[0-9_-]*$','') }}"
hdf_major_version: "{{ hdf_minor_version.split('.').0 }}"
utils_version: "{{ '1.1.0.20' if hdp_minor_version is version_compare('2.5', '<') else ('1.1.0.21' if hdp_version is version_compare('2.6.4', '<') else '1.1.0.22' ) }}"
hdfs_ha_name: "{{ cluster_name | regex_replace('_','-') }}"
#############################
### advanced configuration ##
#############################
is_vm_docker_containers: 'no' # docker containers have some specifics like mount points for example