diff --git a/demos_rstan/brms_demo.html b/demos_rstan/brms_demo.html
index 0cadc01..be11404 100644
--- a/demos_rstan/brms_demo.html
+++ b/demos_rstan/brms_demo.html
@@ -1615,7 +1615,7 @@
Bayesian data analysis - BRMS demos
Aki Vehtari
-First version 2023-12-05. Last modified 2023-12-05.
+First version 2023-12-05. Last modified 2024-01-03.
@@ -1625,11 +1625,15 @@ Setup
Load packages
library(tidyr)
library(dplyr)
+library(tibble)
+library(pillar)
+library(stringr)
library(brms)
options(brms.backend = "cmdstanr", mc.cores = 2)
library(posterior)
options(pillar.negative = FALSE)
library(loo)
+library(priorsense)
library(ggplot2)
library(bayesplot)
theme_set(bayesplot::theme_default(base_family = "sans"))
@@ -1647,9 +1651,27 @@ Introduction
Bernoulli model
Toy data with sequence of failures (0) and successes (1). We would like to learn about the unknown probability of success.
data_bern <- data.frame(y = c(1, 1, 1, 0, 1, 1, 1, 0, 1, 0))
-brms uses by default student_t(3, 0, 2.5), bu we can assign uniform prior (beta(1,1)).
+As usual in case of generalizd linear models, (GLMs) brms defines the priors on the latent model parameters. With Bernoulli the default link function is logit, and thus the prior is set on logit(theta). As there are no covariates logit(theta)=Intercept. The brms default prior for Intercept is student_t(3, 0, 2.5), but we use student_t(7, 0, 1.5) which is close to logistic distribution, and thus makes the prior near-uniform for theta. We can simulate from these priors to check the implied prior on theta. We next compare the result to using normal(0, 1) prior on logit probability. We visualize the implied priors by sampling from the priors.
+data.frame(theta = plogis(ggdist::rstudent_t(n=20000, df=3, mu=0, sigma=2.5))) |>
+ mcmc_hist() +
+ xlim(c(0,1)) +
+ labs(title='Default brms student_t(3, 0, 2.5) prior on Intercept')
+
+data.frame(theta = plogis(ggdist::rstudent_t(n=20000, df=7, mu=0, sigma=1.5))) |>
+ mcmc_hist() +
+ xlim(c(0,1)) +
+ labs(title='student_t(7, 0, 1.5) prior on Intercept')
+
+Almost uniform prior on theta could be obtained also with normal(0,1.5)
+data.frame(theta = plogis(rnorm(n=20000, mean=0, sd=1.5))) |>
+ mcmc_hist() +
+ xlim(c(0,1)) +
+ labs(title='normal(0, 1.5) prior on Intercept')
+
+Formula y ~ 1
corresponds to a model $() =
+#\alpha\times 1 = \alpha$. `brms? denotes the $\alpha$ as `Intercept`.
fit_bern <- brm(y ~ 1, family = bernoulli(), data = data_bern,
- prior = prior("", class='Intercept'),
+ prior = prior(student_t(7, 0, 1.5), class='Intercept'),
seed = SEED, refresh = 0)
Check the summary of the posterior and convergence
fit_bern
@@ -1662,7 +1684,7 @@ Bernoulli model
Population-Level Effects:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-Intercept 0.95 0.73 -0.43 2.49 1.00 1624 1768
+Intercept 0.76 0.64 -0.43 2.09 1.00 1734 1726
Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
@@ -1676,7 +1698,7 @@ Bernoulli model
# A tibble: 1 × 10
variable mean median sd mad q5 q95 rhat ess_bulk ess_tail
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
-1 b_Intercept 0.947 0.919 0.733 0.712 -0.202 2.18 1.00 1624. 1768.
+1 b_Intercept 0.763 0.746 0.641 0.636 -0.242 1.90 1.00 1734. 1726.
We can compute the probability of success by using plogis which is equal to inverse-logit function
draws <- draws |>
mutate_variables(theta=plogis(b_Intercept))
@@ -1687,79 +1709,32 @@ Bernoulli model
# A tibble: 1 × 10
variable mean median sd mad q5 q95 rhat ess_bulk ess_tail
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
-1 theta 0.700 0.715 0.138 0.142 0.450 0.898 1.00 1624. 1768.
+1 theta 0.668 0.678 0.130 0.134 0.440 0.870 1.00 1734. 1726.
Histogram of theta
mcmc_hist(draws, pars='theta') +
xlab('theta') +
xlim(c(0,1))
-
-We next compare the result to using normal(0, 1) prior on logit probability. Visualize the prior by drawing samples from it
-prior_mean <- 0
-prior_sd <- 1
-prior_draws <- data.frame(
- theta = plogis(rnorm(20000, prior_mean, prior_sd)))
-mcmc_hist(prior_draws) +
- xlim(c(0,1))
-
-fit_bern <- brm(y ~ 1, family = bernoulli(), data = data_bern,
- prior = prior(normal(0, 1), class='Intercept'),
- seed = SEED, refresh = 0)
-Check the summary of the posterior and convergence
-fit_bern
- Family: bernoulli
- Links: mu = logit
-Formula: y ~ 1
- Data: data_bern (Number of observations: 10)
- Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
- total post-warmup draws = 4000
-
-Population-Level Effects:
- Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-Intercept 0.61 0.52 -0.45 1.65 1.00 1688 2211
-
-Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
-and Tail_ESS are effective sample size measures, and Rhat is the potential
-scale reduction factor on split chains (at convergence, Rhat = 1).
-We can examine the latent parameter
-draws <- as_draws_df(fit_bern)
-draws |>
- subset_draws(variable='b_Intercept') |>
- summarise_draws()
-# A tibble: 1 × 10
- variable mean median sd mad q5 q95 rhat ess_bulk ess_tail
- <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
-1 b_Intercept 0.605 0.600 0.525 0.501 -0.245 1.48 1.00 1688. 2211.
-We can compute the probability of success by using plogis which is equal to inverse-logit function
-draws <- draws |>
- mutate_variables(theta=plogis(b_Intercept))
-Summary of theta by using summarise_draws()
-draws |>
- subset_draws(variable='theta') |>
- summarise_draws()
-# A tibble: 1 × 10
- variable mean median sd mad q5 q95 rhat ess_bulk ess_tail
- <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
-1 theta 0.639 0.646 0.115 0.115 0.439 0.815 1.00 1688. 2211.
-Histogram of theta
-mcmc_hist(draws, pars='theta') +
- xlab('theta') +
- xlim(c(0,1))
-
-As the number of observations is small, there is small change in the posterior mean when the prior is changed. You can experiment with different priors and varying the number of observations.
+
+Make prior sensitivity analysis by powerscaling both prior and likelihood. Focus on theta which is the quantity of interest.
+theta <- draws |>
+ subset_draws(variable='theta')
+powerscale_sensitivity(fit_bern, prediction = \(x, ...) theta, num_args=list(digits=2)
+ )$sensitivity |>
+ filter(variable=='theta') |>
+ mutate(across(where(is.double), ~num(.x, digits=2)))
+# A tibble: 1 × 4
+ variable prior likelihood diagnosis
+ <chr> <num:.2!> <num:.2!> <chr>
+1 theta 0.04 0.11 -
Binomial model
Instead of sequence of 0’s and 1’s, we can summarize the data with the number of trials and the number successes and use Binomial model. The prior is specified in the ‘latent space’. The actual probability of success, theta = plogis(alpha), where plogis is the inverse of the logistic function.
-
Visualize the prior by drawing samples from it
-
prior_mean <- 0
-prior_sd <- 1
-prior_draws <- data.frame(theta = plogis(rnorm(20000, prior_mean, prior_sd)))
-mcmc_hist(prior_draws)
-
-
Binomial model with the same data
+
Binomial model with the same data and prior
data_bin <- data.frame(N = c(10), y = c(7))
+
Formula y | trials(N) ~ 1
corresponds to a model \(\mathrm{logit}(\theta) = \alpha\), and the number of trials for each observation is provided by | trials(N)
fit_bin <- brm(y | trials(N) ~ 1, family = binomial(), data = data_bin,
- prior = prior(normal(0,1), class='Intercept'),
+ prior = prior(student_t(7, 0,1.5), class='Intercept'),
seed = SEED, refresh = 0)
Check the summary of the posterior and convergence
fit_bin
@@ -1772,11 +1747,12 @@
Binomial model
Population-Level Effects:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-Intercept 0.59 0.54 -0.50 1.64 1.00 1770 2034
+Intercept 0.77 0.64 -0.46 2.09 1.00 1660 1769
Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
+
The diagnostic indicates prior-data conflict, that is, both prior and likelihood are informative. If there is true strong prior information that would justify the normal(0,1) prior, then this is fine, but otherwise more thinking is required (goal is not adjust prior to remove diagnostic warnings withoyt thinking). In this toy example, we proceed with this prior.
Extract the posterior draws
draws <- as_draws_df(fit_bin)
We can get summary information using summarise_draws()
@@ -1786,7 +1762,7 @@
Binomial model
# A tibble: 1 × 10
variable mean median sd mad q5 q95 rhat ess_bulk ess_tail
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
-1 b_Intercept 0.586 0.592 0.540 0.519 -0.323 1.48 1.00 1770. 2034.
+1 b_Intercept 0.767 0.758 0.636 0.622 -0.249 1.88 1.00 1660. 1769.
We can compute the probability of success by using plogis which is equal to inverse-logit function
draws <- draws |>
mutate_variables(theta=plogis(b_Intercept))
@@ -1797,12 +1773,12 @@
Binomial model
# A tibble: 1 × 10
variable mean median sd mad q5 q95 rhat ess_bulk ess_tail
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
-1 theta 0.634 0.644 0.119 0.117 0.420 0.815 1.00 1770. 2034.
+1 theta 0.669 0.681 0.130 0.132 0.438 0.868 1.00 1660. 1769.
Histogram of theta
mcmc_hist(draws, pars='theta') +
xlab('theta') +
xlim(c(0,1))
-
+
Re-run the model with a new data dataset without recompiling
data_bin <- data.frame(N = c(5), y = c(4))
fit_bin <- update(fit_bin, newdata = data_bin)
@@ -1817,7 +1793,7 @@
Binomial model
Population-Level Effects:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-Intercept 0.76 0.71 -0.58 2.21 1.00 1325 1957
+Intercept 1.08 0.94 -0.63 3.07 1.00 1384 1655
Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
@@ -1831,7 +1807,7 @@
Binomial model
# A tibble: 1 × 10
variable mean median sd mad q5 q95 rhat ess_bulk ess_tail
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
-1 b_Intercept 0.758 0.745 0.715 0.705 -0.373 1.92 1.00 1325. 1957.
+1 b_Intercept 1.08 0.997 0.941 0.903 -0.319 2.72 1.00 1384. 1655.
We can compute the probability of success by using plogis which is equal to inverse-logit function
draws <- draws |>
mutate_variables(theta=plogis(b_Intercept))
@@ -1842,12 +1818,12 @@
Binomial model
# A tibble: 1 × 10
variable mean median sd mad q5 q95 rhat ess_bulk ess_tail
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
-1 theta 0.664 0.678 0.144 0.152 0.408 0.872 1.00 1325. 1957.
+1 theta 0.712 0.730 0.161 0.171 0.421 0.938 1.00 1384. 1655.
Histogram of theta
mcmc_hist(draws, pars='theta') +
xlab('theta') +
xlim(c(0,1))
-
+
Comparison of two groups with Binomial
@@ -1858,37 +1834,37 @@
Comparison of two gr
Data, where grp2
is an indicator variable defined as a factor type, which is useful for categorical variables.
data_bin2 <- data.frame(N = c(674, 680), y = c(39,22), grp2 = factor(c('control','treatment')))
-
To analyse whether the treatment is useful, we can use Binomial model for both groups and compute odds-ratio.
-
fit_bin2 <- brm(y | trials(N) ~ grp2, family = binomial(), data = data_bin2,
- prior = prior(normal(0,1), class='Intercept'),
+To analyse whether the treatment is useful, we can use Binomial model for both groups and compute odds-ratio. To recreate the model as two independent (separate) binomial models, we use formula y | trials(N) ~ 0 + grp2
, which corresponds to a model \(\mathrm{logit}(\theta) = \alpha \times 0 + \beta_\mathrm{control}\times x_\mathrm{control} + \beta_\mathrm{treatment}\times x_\mathrm{treatment} = \beta_\mathrm{control}\times x_\mathrm{control} + \beta_\mathrm{treatment}\times x_\mathrm{treatment}\), where \(x_\mathrm{control}\) is a vector with 1 for control and 0 for treatment, and \(x_\mathrm{treatemnt}\) is a vector with 1 for treatemnt and 0 for control. As only of the vectors have 1, this corresponds to separate models \(\mathrm{logit}(\theta_\mathrm{control}) = \beta_\mathrm{control}\) and \(\mathrm{logit}(\theta_\mathrm{treatment}) = \beta_\mathrm{treatment}\). We can provide the same prior for all \(\beta\)’s by setting the prior with class='b'
. With prior student_t(7, 0,1.5)
, both \(\beta\)’s are shrunk towards 0, but independently.
+fit_bin2 <- brm(y | trials(N) ~ 0 + grp2, family = binomial(), data = data_bin2,
+ prior = prior(student_t(7, 0,1.5), class='b'),
seed = SEED, refresh = 0)
-Check the summary of the posterior and convergence. brms is using the first factor level control
as the baseline and thus reports the coefficient (population-level effect) for treatment
(shown s grp2treatment
)
+Check the summary of the posterior and convergence. brms is using the first factor level control
as the baseline and thus reports the coefficient (population-level effect) for treatment
(shown s grp2treatment
) Check the summary of the posterior and convergence. With ~ 0 + grp2
there is no Intercept
and and are presented as grp2control
and grp2treatment
.
fit_bin2
Family: binomial
Links: mu = logit
-Formula: y | trials(N) ~ grp2
+Formula: y | trials(N) ~ 0 + grp2
Data: data_bin2 (Number of observations: 2)
Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
total post-warmup draws = 4000
Population-Level Effects:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-Intercept -2.76 0.16 -3.09 -2.46 1.00 4528 2752
-grp2treatment -0.58 0.27 -1.13 -0.07 1.00 2445 2245
+grp2control -2.77 0.16 -3.10 -2.48 1.00 3563 3085
+grp2treatment -3.37 0.22 -3.81 -2.93 1.00 3824 1939
Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
-Compute theta for each group and the odds-ratio
+Compute theta for each group and the odds-ratio. brms
uses bariable names b_grp2control
and b_grp2treatment
for \(\beta_\mathrm{control}\) and \(\beta_\mathrm{treatment}\) respectively.
draws_bin2 <- as_draws_df(fit_bin2) |>
- mutate(theta_control = plogis(b_Intercept),
- theta_treatment = plogis(b_Intercept + b_grp2treatment),
+ mutate(theta_control = plogis(b_grp2control),
+ theta_treatment = plogis(b_grp2treatment),
oddsratio = (theta_treatment/(1-theta_treatment))/(theta_control/(1-theta_control)))
Plot oddsratio
mcmc_hist(draws_bin2, pars='oddsratio') +
scale_x_continuous(breaks=seq(0.2,1.6,by=0.2))+
geom_vline(xintercept=1, linetype='dashed')
-
+
Probability that the oddsratio<1
draws_bin2 |>
mutate(poddsratio = oddsratio<1) |>
@@ -1897,15 +1873,105 @@ Comparison of two gr
# A tibble: 1 × 3
variable mean mcse_mean
<chr> <dbl> <dbl>
-1 poddsratio 0.988 0.00200
-
oddratio 95% posterior interval
+1 poddsratio 0.986 0.00230
+oddsratio 95% posterior interval
draws_bin2 |>
subset(variable='oddsratio') |>
summarise_draws(~quantile(.x, probs = c(0.025, 0.975)), ~mcse_quantile(.x, probs = c(0.025, 0.975)))
# A tibble: 1 × 5
variable `2.5%` `97.5%` mcse_q2.5 mcse_q97.5
<chr> <dbl> <dbl> <dbl> <dbl>
-1 oddsratio 0.322 0.936 0.00547 0.0134
+1 oddsratio 0.317 0.931 0.00586 0.0134
+
Make prior sensitivity analysis by powerscaling both prior and likelihood. Focus on oddsratio which is the quantity of interest. We see that the likelihood is much more informative than the prior, and we would expect to see a different posterior only with a highly informative prior (possibly based on previous similar experiments).
+
oddsratio <- draws_bin2 |>
+ subset_draws(variable='oddsratio')
+powerscale_sensitivity(fit_bin2, prediction = \(x, ...) oddsratio, num_args=list(digits=2)
+ )$sensitivity |>
+ filter(variable=='oddsratio') |>
+ mutate(across(where(is.double), ~num(.x, digits=2)))
+
# A tibble: 1 × 4
+ variable prior likelihood diagnosis
+ <chr> <num:.2!> <num:.2!> <chr>
+1 oddsratio 0.01 0.14 -
+
Above we used formula y | trials(N) ~ 0 + grp2
to have separate model for control and treatment group. An alternative model y | trials(N) ~ grp2
which is equal to y | trials(N) ~ 1 + grp2
, would correspond to a model $() = + x = + x. Now \(\alpha\) models the probability of death (via logistic link) in the control group and \(\alpha + \beta_\mathrm{treatment}\) models the probability of death (via logistic link) in the treatment group. Now the models for the groups are connected. Furthermore, if we set independent student_t(7, 0, 1.5)
priors on \(\alpha\) and \(\beta_\mathrm{treatment}\), the implied priors on \(\theta_\mathrm{control}\) and \(\theta_\mathrm{treatment}\) are different. We can verify this with a prior simulation.
+
data.frame(theta_control = plogis(ggdist::rstudent_t(n=20000, df=7, mu=0, sigma=1.5))) |>
+ mcmc_hist() +
+ xlim(c(0,1)) +
+ labs(title='student_t(7, 0, 1.5) prior on Intercept') +
+data.frame(theta_treatment = plogis(ggdist::rstudent_t(n=20000, df=7, mu=0, sigma=1.5))+
+ plogis(ggdist::rstudent_t(n=20000, df=7, mu=0, sigma=1.5))) |>
+ mcmc_hist() +
+ xlim(c(0,1)) +
+ labs(title='student_t(7, 0, 1.5) prior on Intercept and b_grp2treatment')
+
+
In this case, with relatively big treatment and control group, the likelihood is informative, and the difference between using y | trials(N) ~ 0 + grp2
or y | trials(N) ~ grp2
is negligible.
+
Third option would be a hierarchical model with formula y | trials(N) ~ 1 + (1 | grp2)
, which is equivalent to y | trials(N) ~ 1 + (1 | grp2)
, and corresponds to a model \(\mathrm{logit}(\theta) = \alpha \times 1 + \beta_\mathrm{control}\times x_\mathrm{control} + \beta_\mathrm{treatment}\times x_\mathrm{treatment}\), but now the prior on \(\beta_\mathrm{control}\) and \(\beta_\mathrm{treatment}\) is \(\mathrm{normal}(0, \sigma_\mathrm{grp})\). The default brms
prior for \(\sigma_\mathrm{grp}\) is student_t(3, 0, 2.5)
. Now \(\alpha\) models the overall probablity of death (via logistic link), and \(\beta_\mathrm{control}\) and \(\beta_\mathrm{treatment}\) model the difference from that having the same prior. Prior for \(\beta_\mathrm{control}\) and \(\beta_\mathrm{treatment}\) includes unknown scale \(\sigma_\mathrm{grp}\). If the there is not difference between control and treatment groups, the posterior of \(\sigma_\mathrm{grp}\) has more mass near 0, and bigger the difference between control and treatment groups are, more mass there is away from 0. With just two groups, there is not much information about \(\sigma_\mathrm{grp}\), and unless there is a informative prior on \(\sigma_\mathrm{grp}\), two group hierarchical model is not that useful. Hierarchical models are more useful with more than two groups. In the following, we use the previously used student_t(7, 0,1.5)
prior on intercept and the default brms
prior student_t(3, 0, 2.5)
on \(\sigma_\mathrm{grp}\).
+
fit_bin2 <- brm(y | trials(N) ~ 1 + (1 | grp2), family = binomial(), data = data_bin2,
+ prior = prior(student_t(7, 0,1.5), class='Intercept'),
+ seed = SEED, refresh = 0, control=list(adapt_delta=0.99))
+
Check the summary of the posterior and convergence. The summary reports that there are Group-Level Effects: ~grp2
with 2 levels (control and treatment), with sd(Intercept)
denoting \(\sigma_\mathrm{grp}\). In addition, the summary lists Population-Level Effects: Intercept
(\(\alpha\)) as in the prevous non-hierarchical models.
+
fit_bin2
+
Warning: There were 1 divergent transitions after warmup. Increasing
+adapt_delta above 0.99 may help. See
+http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
+
Family: binomial
+ Links: mu = logit
+Formula: y | trials(N) ~ 1 + (1 | grp2)
+ Data: data_bin2 (Number of observations: 2)
+ Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
+ total post-warmup draws = 4000
+
+Group-Level Effects:
+~grp2 (Number of levels: 2)
+ Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
+sd(Intercept) 1.69 1.57 0.15 5.69 1.01 538 1113
+
+Population-Level Effects:
+ Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
+Intercept -2.18 1.28 -3.85 1.01 1.01 569 1027
+
+Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
+and Tail_ESS are effective sample size measures, and Rhat is the potential
+scale reduction factor on split chains (at convergence, Rhat = 1).
+
We can also look at the variable names brms
uses internally
+
as_draws_rvars(fit_bin2)
+
# A draws_rvars: 1000 iterations, 4 chains, and 5 variables
+$b_Intercept: rvar<1000,4>[1] mean ± sd:
+[1] -2.2 ± 1.3
+
+$sd_grp2__Intercept: rvar<1000,4>[1] mean ± sd:
+[1] 1.7 ± 1.6
+
+$r_grp2: rvar<1000,4>[2,1] mean ± sd:
+ Intercept
+control -0.63 ± 1.3
+treatment -1.19 ± 1.3
+
+$lprior: rvar<1000,4>[1] mean ± sd:
+[1] -4.3 ± 0.74
+
+$lp__: rvar<1000,4>[1] mean ± sd:
+[1] -13 ± 1.8
+
Although there is no difference, illustrate how to compute the oddsratio from hierarchical model
+
draws_bin2 <- as_draws_df(fit_bin2)
+oddsratio <- draws_bin2 |>
+ mutate_variables(theta_control = plogis(b_Intercept + `r_grp2[control,Intercept]`),
+ theta_treatment = plogis(b_Intercept + `r_grp2[treatment,Intercept]`),
+ oddsratio = (theta_treatment/(1-theta_treatment))/(theta_control/(1-theta_control))) |>
+ subset_draws(variable='oddsratio')
+oddsratio |> mcmc_hist() +
+ scale_x_continuous(breaks=seq(0.2,1.6,by=0.2))+
+ geom_vline(xintercept=1, linetype='dashed')
+
+
Make also prior sensitivity analysis with focus on oddsratio.
+
powerscale_sensitivity(fit_bin2, prediction = \(x, ...) oddsratio, num_args=list(digits=2)
+ )$sensitivity |>
+ filter(variable=='oddsratio') |>
+ mutate(across(where(is.double), ~num(.x, digits=2)))
+
# A tibble: 1 × 4
+ variable prior likelihood diagnosis
+ <chr> <num:.2!> <num:.2!> <chr>
+1 oddsratio 0.00 0.16 -
Linear Gaussian model
@@ -1921,16 +1987,46 @@
Linear Gaussian mode
guides(linetype = "none")
To analyse has there been change in the average summer month temperature we use a linear model with Gaussian model for the unexplained variation. By default brms uses uniform prior for the coefficients.
-
temp ~ year
means temp depends on the intercept and temp
. The model could also be defined as temp ~ 1 + year
which explicitly shows the intercept part. The corresponding regression model is temp ~ normal(b_Intercept1 + b_yearyear, sigma)
+
Formula temp ~ year
corresponds to model \(\mathrm{temp} ~ \mathrm{normal}(\alpha + \beta \times \mathrm{temp}, \sigma). The model could also be defined as `temp ~ 1 + year` which explicitly shows the intercept (\)$) part. Using the variable names brms
uses the model can be written also as temp ~ normal(b_Intercept*1 + b_year*year, sigma)
. We start with the default priors to see some tricks that brms
does behind the curtain.
fit_lin <- brm(temp ~ year, data = data_lin, family = gaussian(),
seed = SEED, refresh = 0)
-
We can check the all the priors used. In general it is good to use proper priors, but sometimes flat priors are fine and produce proper posterior.
+
Check the summary of the posterior and convergence.
+
fit_lin
+
Family: gaussian
+ Links: mu = identity; sigma = identity
+Formula: temp ~ year
+ Data: data_lin (Number of observations: 71)
+ Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
+ total post-warmup draws = 4000
+
+Population-Level Effects:
+ Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
+Intercept -34.69 12.49 -58.73 -10.19 1.00 3995 3035
+year 0.02 0.01 0.01 0.03 1.00 3996 3035
+
+Family Specific Parameters:
+ Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
+sigma 1.08 0.09 0.91 1.28 1.00 3057 3011
+
+Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
+and Tail_ESS are effective sample size measures, and Rhat is the potential
+scale reduction factor on split chains (at convergence, Rhat = 1).
+
Convergence diagnostics look good. We see that posterior mean of Intercept
is -34.7, which may sound strange, but that is the intercept at year 0, that is, very far from the data range, and thus doesn’t have meaningful interpretation directly. The posterior mean of year
coefficient is 0.02, that is, we estimate that the summer temperature is increasing 0.02°C per year (which would make 1°C in 50 years).
+
We can check \(R^2\) which corresponds to the proporion of variance explained by the model. The linear model explains 0.16=16% of the total data variance.
+
bayes_R2(fit_lin) |> round(2)
+
Estimate Est.Error Q2.5 Q97.5
+R2 0.16 0.07 0.03 0.3
+
We can check the all the priors used.
prior_summary(fit_lin)
prior class coef group resp dpar nlpar lb ub source
(flat) b default
(flat) b year (vectorized)
student_t(3, 9.5, 2.5) Intercept default
student_t(3, 0, 2.5) sigma 0 default
+
We see that class=b
and coef=year
have flat
, that is, improper uniform prior, Intercept
has student_t(3, 9.5, 2.5)
, and sigma
has student_t(3, 0, 2.5)
prior. In general it is good to use proper priors, but sometimes flat priors are fine and produce proper posterior (like in this case). Important part here is that by default, brms
sets the prior on Intercept after centering the covariate values (design matrix). In this case, brms
uses temp - mean(temp) = temp - 1987
instead of original years. This in general improves the sampling efficiency. As the Intercept
is now defined at the middle of the data, the default Intercept
prior is centered on median of the target (here target is year
). If we would like to set informative priors, we need to set the informative prior on Intercept
given the centered covariate values. We can turn of the centering by setting argument center=FALSE
, and we can set the prior on original intercept by using a formula temp ~ 0 + Intercept + year
. In this case, we are happy with the default prior for the intercept. In this specific casse, the flat prior on coefficient is also fine, but we add an weakly informative prior just for the illustration. Let’s assume we expect the temperature to change less than 1°C in 10 years. With student_t(3, 0, 0.03)
about 95% prior mass has less than 0.1°C change in year, and with low degrees of freedom (3) we have thick tails making the likelihood dominate in case of prior-data conflict. In real life, we do have much more information about the temperature change, and naturally a hierarchical spatio-temporal model with all temperature measurement locations would be even better.
+
fit_lin <- brm(temp ~ year, data = data_lin, family = gaussian(),
+ prior = prior(student_t(3, 0, 0.03), class='b'),
+ seed = SEED, refresh = 0)
Check the summary of the posterior and convergence
fit_lin
Family: gaussian
@@ -1942,50 +2038,59 @@ Linear Gaussian mode
Population-Level Effects:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-Intercept -34.69 12.49 -58.73 -10.19 1.00 3995 3035
-year 0.02 0.01 0.01 0.03 1.00 3996 3035
+Intercept -32.54 12.28 -56.70 -9.01 1.00 4183 3259
+year 0.02 0.01 0.01 0.03 1.00 4182 3259
Family Specific Parameters:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-sigma 1.08 0.09 0.91 1.28 1.00 3057 3011
+sigma 1.08 0.09 0.92 1.27 1.00 3494 2709
Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
-
Extract the posterior draws and check the summaries
+
Make prior sensitivity analysis by powerscaling both prior and likelihood.
+
powerscale_sensitivity(fit_lin)$sensitivity |>
+ mutate(across(where(is.double), ~num(.x, digits=2)))
+
# A tibble: 3 × 4
+ variable prior likelihood diagnosis
+ <chr> <num:.2!> <num:.2!> <chr>
+1 b_Intercept 0.03 0.09 -
+2 b_year 0.03 0.09 -
+3 sigma 0.00 0.13 -
+
Our weakly informative proper prior has negligible sensitivity, and the likelihood is informative. Extract the posterior draws and check the summaries
draws_lin <- as_draws_df(fit_lin)
draws_lin |> summarise_draws()
# A tibble: 5 × 10
variable mean median sd mad q5 q95 rhat ess_bulk
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
-1 b_Intercept -3.47e+1 -3.47e+1 1.25e+1 1.23e+1 -5.49e+1 -1.39e+1 1.00 3995.
-2 b_year 2.22e-2 2.22e-2 6.29e-3 6.20e-3 1.17e-2 3.24e-2 1.00 3996.
-3 sigma 1.08e+0 1.08e+0 9.49e-2 9.45e-2 9.33e-1 1.25e+0 1.00 3057.
-4 lprior -3.27e+0 -3.26e+0 2.16e-2 2.11e-2 -3.30e+0 -3.23e+0 1.00 2908.
-5 lp__ -1.09e+2 -1.08e+2 1.31e+0 1.06e+0 -1.12e+2 -1.07e+2 1.00 2014.
+1 b_Intercept -3.25e+1 -3.24e+1 1.23e+1 1.24e+1 -5.29e+1 -1.29e+1 1.00 4183.
+2 b_year 2.11e-2 2.11e-2 6.18e-3 6.22e-3 1.12e-2 3.14e-2 1.00 4182.
+3 sigma 1.08e+0 1.07e+0 9.14e-2 9.08e-2 9.43e-1 1.24e+0 1.00 3494.
+4 lprior -1.08e+0 -1.06e+0 1.65e-1 1.65e-1 -1.38e+0 -8.51e-1 1.00 4173.
+5 lp__ -1.07e+2 -1.06e+2 1.21e+0 9.72e-1 -1.09e+2 -1.05e+2 1.00 1899.
# ℹ 1 more variable: ess_tail <dbl>
If one of the columns is hidden we can force printing all columns
draws_lin |> summarise_draws() |> print(width=Inf)
# A tibble: 5 × 10
variable mean median sd mad q5 q95 rhat
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
-1 b_Intercept -34.7 -34.7 12.5 12.3 -54.9 -13.9 1.00
-2 b_year 0.0222 0.0222 0.00629 0.00620 0.0117 0.0324 1.00
-3 sigma 1.08 1.08 0.0949 0.0945 0.933 1.25 1.00
-4 lprior -3.27 -3.26 0.0216 0.0211 -3.30 -3.23 1.00
-5 lp__ -109. -108. 1.31 1.06 -112. -107. 1.00
+1 b_Intercept -32.5 -32.4 12.3 12.4 -52.9 -12.9 1.00
+2 b_year 0.0211 0.0211 0.00618 0.00622 0.0112 0.0314 1.00
+3 sigma 1.08 1.07 0.0914 0.0908 0.943 1.24 1.00
+4 lprior -1.08 -1.06 0.165 0.165 -1.38 -0.851 1.00
+5 lp__ -107. -106. 1.21 0.972 -109. -105. 1.00
ess_bulk ess_tail
<dbl> <dbl>
-1 3995. 3035.
-2 3996. 3035.
-3 3057. 3011.
-4 2908. 2802.
-5 2014. 2714.
+1 4183. 3259.
+2 4182. 3259.
+3 3494. 2709.
+4 4173. 3285.
+5 1899. 2576.
Histogram of b_year
draws_lin |>
mcmc_hist(pars='b_year') +
xlab('Average temperature increase per year')
-
+
Probability that the coefficient b_year > 0 and the corresponding MCSE
draws_lin |>
mutate(I_b_year_gt_0 = b_year>0) |>
@@ -1994,7 +2099,8 @@ Linear Gaussian mode
# A tibble: 1 × 3
variable mean mcse_mean
<chr> <dbl> <dbl>
-1 I_b_year_gt_0 1.00 0.000353
+1 I_b_year_gt_0 1 NA
+
All posterior draws have b_year>0
, the probability gets rounded to 1, and MCSE is not available as the obserevd posterior variance is 0.
95% posterior interval for temperature increase per 100 years
draws_lin |>
mutate(b_year_100 = b_year*100) |>
@@ -2005,7 +2111,7 @@ Linear Gaussian mode
# A tibble: 1 × 5
variable `2.5%` `97.5%` mcse_q2.5 mcse_q97.5
<chr> <dbl> <dbl> <dbl> <dbl>
-1 b_year_100 0.99 3.44 0.03 0.03
+1 b_year_100 0.93 3.33 0.03 0.03
Plot posterior draws of the linear function values at each year. add_linpred_draws()
takes the years from the data and uses fit_lin
to make the predictions.
data_lin |>
add_linpred_draws(fit_lin) |>
@@ -2019,7 +2125,7 @@ Linear Gaussian mode
labs(x= "Year", y = 'Summer temp. @Kilpisjärvi') +
theme(legend.position="none")+
scale_x_continuous(breaks=seq(1950,2020,by=10))
-
+
Alternativelly plot a spaghetti plot for 100 draws
data_lin |>
add_linpred_draws(fit_lin, ndraws=100) |>
@@ -2033,7 +2139,7 @@ Linear Gaussian mode
labs(x= "Year", y = 'Summer temp. @Kilpisjärvi') +
theme(legend.position="none")+
scale_x_continuous(breaks=seq(1950,2020,by=10))
-
+
Plot posterior predictive distribution at each year until 2030 add_predicted_draws()
takes the years from the data and uses fit_lin
to make the predictions.
data_lin |>
add_row(year=2023:2030) |>
@@ -2049,14 +2155,15 @@ Linear Gaussian mode
theme(legend.position="none")+
scale_x_continuous(breaks=seq(1950,2030,by=10))
Warning: Removed 32000 rows containing missing values (`geom_point()`).
-
+
-
Linear Student’s t model
-
The temperatures used in the above analyses are averages over three months, which makes it more likely that they are normally distributed, but there can be extreme events in the feather and we can check whether more robust Student’s t observation model woul give different results.
+
Linear Student’s \(t\) model
+
The temperatures used in the above analyses are averages over three months, which makes it more likely that they are normally distributed, but there can be extreme events in the feather and we can check whether more robust Student’s \(t\) observation model would give different results.
fit_lin_t <- brm(temp ~ year, data = data_lin, family = student(),
+ prior = prior(student_t(3, 0, 0.03), class='b'),
seed = SEED, refresh = 0)
-
Check the summary of the posterior and convergence. The b_year posterior looks similar as before and the posterior for degrees of freedom nu
has most of the posterior mas for quite large values indicating there is no strong support for thick tailed variation in temperature.
+
Check the summary of the posterior and convergence. The b_year posterior looks similar as before and the posterior for degrees of freedom nu
has most of the posterior mass for quite large values indicating there is no strong support for thick tailed variation in average summer temperatures.
fit_lin_t
Family: student
Links: mu = identity; sigma = identity; nu = identity
@@ -2067,13 +2174,13 @@ Linear Student’s t
Population-Level Effects:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-Intercept -35.80 12.58 -59.61 -10.87 1.00 4023 2679
-year 0.02 0.01 0.01 0.03 1.00 4021 2678
+Intercept -34.01 12.27 -58.50 -9.31 1.00 3979 2893
+year 0.02 0.01 0.01 0.03 1.00 3979 2923
Family Specific Parameters:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-sigma 1.03 0.10 0.85 1.25 1.00 3454 3159
-nu 24.53 14.46 6.24 60.95 1.00 3450 2698
+sigma 1.03 0.10 0.86 1.24 1.00 3209 2302
+nu 24.54 14.36 6.36 60.80 1.00 2972 2325
Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
@@ -2082,11 +2189,11 @@ Linear Student’s t
Pareto-smoothed importance-sampling leave-one-out cross-validation (PSIS-LOO)
We can use leave-one-out cross-validation to compare the expected predictive performance.
-
LOO comparison shows normal and Student’s t model have similar performance.
+
LOO comparison shows normal and Student’s \(t\) model have similar performance.
loo_compare(loo(fit_lin), loo(fit_lin_t))
elpd_diff se_diff
fit_lin 0.0 0.0
-fit_lin_t -0.3 0.4
+fit_lin_t -0.4 0.3
Heteroskedastic linear model
@@ -2094,6 +2201,7 @@
Heteroskedastic line
fit_lin_h <- brm(bf(temp ~ year,
sigma ~ year),
data = data_lin, family = gaussian(),
+ prior = prior(student_t(3, 0, 0.03), class='b'),
seed = SEED, refresh = 0)
Check the summary of the posterior and convergence. The b_year posterior looks similar as before. The posterior for sigma_year looks like having mosst of the ma for negative values, indicating decrease in temperature variation around the mean.
fit_lin_h
@@ -2107,10 +2215,10 @@
Heteroskedastic line
Population-Level Effects:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-Intercept -38.66 12.65 -64.25 -13.47 1.00 3622 3040
-sigma_Intercept 19.17 8.99 0.99 36.23 1.00 4113 3133
-year 0.02 0.01 0.01 0.04 1.00 3642 3040
-sigma_year -0.01 0.00 -0.02 -0.00 1.00 4111 3056
+Intercept -36.37 12.49 -61.25 -10.49 1.00 3412 2842
+sigma_Intercept 19.10 8.69 1.56 35.80 1.00 3818 2899
+year 0.02 0.01 0.01 0.04 1.00 3426 2885
+sigma_year -0.01 0.00 -0.02 -0.00 1.00 3810 2855
Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
@@ -2118,7 +2226,7 @@ Heteroskedastic line
Histogram of b_year and b_sigma_year
as_draws_df(fit_lin_h) |>
mcmc_areas(pars=c('b_year', 'b_sigma_year'))
-
+
As log(x) is almost linear when x is close to zero, we can see that the sigma is decreasing about 1% per year (95% interval from 0% to 2%).
Plot posterior predictive distribution at each year until 2030 add_predicted_draws()
takes the years from the data and uses fit_lin_h
to make the predictions.
data_lin |>
@@ -2135,28 +2243,35 @@ Heteroskedastic line
theme(legend.position="none")+
scale_x_continuous(breaks=seq(1950,2030,by=10))
Warning: Removed 32000 rows containing missing values (`geom_point()`).
-
+
+
Make prior sensitivity analysis by powerscaling both prior and likelihood.
+
powerscale_sensitivity(fit_lin_h)$sensitivity |>
+ mutate(across(where(is.double), ~num(.x, digits=2)))
+
# A tibble: 4 × 4
+ variable prior likelihood diagnosis
+ <chr> <num:.2!> <num:.2!> <chr>
+1 b_Intercept 0.03 0.11 -
+2 b_sigma_Intercept 0.00 0.10 -
+3 b_year 0.03 0.11 -
+4 b_sigma_year 0.00 0.11 -
We can use leave-one-out cross-validation to compare the expected predictive performance.
LOO comparison shows homoskedastic normal and heteroskedastic normal models have similar performances.
loo_compare(loo(fit_lin), loo(fit_lin_h))
elpd_diff se_diff
fit_lin_h 0.0 0.0
-fit_lin -1.7 1.6
+fit_lin -1.6 1.6
Heteroskedastic non-linear model
We can test the linearity assumption by using non-linear spline functions, by uing s(year)
terms. Sampling is slower as the posterior gets more complex.
-
fit_lin_hs <- brm(bf(temp ~ s(year),
+fit_spline_h <- brm(bf(temp ~ s(year),
sigma ~ s(year)),
data = data_lin, family = gaussian(),
seed = SEED, refresh = 0)
We get warnings about divergences, and try rerunning with higher adapt_delta, which leads to using smaller step sizes. Often adapt_delta=0.999
leads to very slow sampling, but with this small data, this is not an issue.
-fit_lin_hs <- update(fit_lin_hs, control = list(adapt_delta=0.999))
-Check the summary of the posterior and convergence. The b_year posterior looks similar as before. The posterior for sigma_year looks like having mosst of the ma for negative values, indicating decrease in temperature variation around the mean.
-fit_lin_hs
-Warning: There were 4 divergent transitions after warmup. Increasing
-adapt_delta above 0.999 may help. See
-http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
+fit_spline_h <- update(fit_spline_h, control = list(adapt_delta=0.999))
+Check the summary of the posterior and convergence. We’re not anymore able to make interpretation of the temperature increase based on this summary. For splines, we see prior scales sds
for the spline coefficients.
+fit_spline_h
Family: gaussian
Links: mu = identity; sigma = log
Formula: temp ~ s(year)
@@ -2167,23 +2282,23 @@ Heteroskedastic non-
Smooth Terms:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-sds(syear_1) 1.10 1.25 0.04 3.87 1.00 1450 1845
-sds(sigma_syear_1) 0.94 0.91 0.03 3.33 1.00 1358 2175
+sds(syear_1) 1.00 0.91 0.04 3.37 1.00 1463 1648
+sds(sigma_syear_1) 0.96 0.95 0.02 3.60 1.00 1225 1585
Population-Level Effects:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-Intercept 9.42 0.13 9.16 9.68 1.00 5244 2923
-sigma_Intercept 0.04 0.09 -0.13 0.23 1.00 4473 2645
-syear_1 2.93 2.80 -2.82 9.12 1.00 1509 1068
-sigma_syear_1 -1.14 2.49 -6.99 3.80 1.00 1684 1312
+Intercept 9.42 0.13 9.15 9.67 1.00 4559 2617
+sigma_Intercept 0.04 0.09 -0.13 0.22 1.00 4601 2718
+syear_1 2.86 2.54 -2.82 8.08 1.00 2050 1906
+sigma_syear_1 -1.16 2.39 -7.17 3.49 1.00 1634 1042
Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
-Plot posterior predictive distribution at each year until 2030 add_predicted_draws()
takes the years from the data and uses fit_lin_h
to make the predictions.
+We can still plot posterior predictive distribution at each year until 2030 add_predicted_draws()
takes the years from the data and uses fit_lin_h
to make the predictions.
data_lin |>
add_row(year=2023:2030) |>
- add_predicted_draws(fit_lin_hs) |>
+ add_predicted_draws(fit_spline_h) |>
# plot data
ggplot(aes(x=year, y=temp)) +
geom_point(color=2) +
@@ -2195,20 +2310,68 @@ Heteroskedastic non-
theme(legend.position="none")+
scale_x_continuous(breaks=seq(1950,2030,by=10))
Warning: Removed 32000 rows containing missing values (`geom_point()`).
-
-We can use leave-one-out cross-validation to compare the expected predictive performance.
+
+And we can use leave-one-out cross-validation to compare the expected predictive performance.
LOO comparison shows homoskedastic normal linear and heteroskedastic normal spline models have similar performances. There are not enough observations to make clear difference between the models.
-loo_compare(loo(fit_lin), loo(fit_lin_hs))
-Warning: Found 1 observations with a pareto_k > 0.7 in model 'fit_lin_hs'. It
+loo_compare(loo(fit_lin), loo(fit_spline_h))
+Warning: Found 1 observations with a pareto_k > 0.7 in model 'fit_spline_h'. It
is recommended to set 'moment_match = TRUE' in order to perform moment matching
for problematic observations.
- elpd_diff se_diff
-fit_lin_hs 0.0 0.0
-fit_lin -0.7 1.8
+ elpd_diff se_diff
+fit_spline_h 0.0 0.0
+fit_lin -0.5 1.8
+For spline and other non-parametric models, we can use predictive estimates and predictions to get interpretable quantities. Let’s examine the difference of estimated average temperature in years 1952 and 2022.
+temp_diff <- posterior_epred(fit_spline_h, newdata=filter(data_lin,year==1952|year==2022)) |>
+ rvar() |>
+ diff() |>
+ as_draws_df() |>
+ set_variables('temp_diff')
+
+temp_diff <- data_lin |>
+ filter(year==1952|year==2022) |>
+ add_epred_draws(fit_spline_h) |>
+ pivot_wider(id_cols=.draw, names_from = year, values_from = .epred) |>
+ mutate(temp_diff = `2022`-`1952`,
+ .chain = (.draw - 1) %/% 1000 + 1,
+ .iteration = (.draw - 1) %% 1000 + 1) |>
+ as_draws_df() |>
+ subset_draws(variable='temp_diff')
+Posterior distribution for average summer temperature increase from 1952 to 2022
+temp_diff |>
+ mcmc_hist()
+
+95% posterior interval for average summer temperature increase from 1952 to 2022
+temp_diff |>
+ summarise_draws(~quantile(.x, probs = c(0.025, 0.975)),
+ ~mcse_quantile(.x, probs = c(0.025, 0.975)),
+ .num_args = list(digits = 2, notation = "dec"))
+# A tibble: 1 × 5
+ variable `2.5%` `97.5%` mcse_q2.5 mcse_q97.5
+ <chr> <dbl> <dbl> <dbl> <dbl>
+1 temp_diff 0.56 2.57 0.03 0.02
+Make prior sensitivity analysis by powerscaling both prior and likelihood with focus on average summer temperature increase from 1952 to 2022.
+powerscale_sensitivity(fit_spline_h, prediction = \(x, ...) temp_diff, num_args=list(digits=2)
+ )$sensitivity |>
+ filter(variable=='temp_diff') |>
+ mutate(across(where(is.double), ~num(.x, digits=2)))
+# A tibble: 1 × 4
+ variable prior likelihood diagnosis
+ <chr> <num:.2!> <num:.2!> <chr>
+1 temp_diff 0.01 0.08 -
+Probability that the average summer temperature has increased from 1952 to 2022 is 99.5%.
+temp_diff |>
+ mutate(I_temp_diff_gt_0 = temp_diff>0,
+ temp_diff = NULL) |>
+ subset_draws(variable='I_temp_diff_gt_0') |>
+ summarise_draws(mean, mcse_mean)
+# A tibble: 1 × 3
+ variable mean mcse_mean
+ <chr> <dbl> <dbl>
+1 I_temp_diff_gt_0 0.998 0.000787
Comparison of k groups with hierarchical normal models
-
Load factory data, which contain 5 quality measurements for each of 6 machines. We’re interested in analying are the quality differences between the machines.
+
Load factory data, which contain 5 quality measurements for each of 6 machines. We’re interested in analysing are the quality differences between the machines.
factory <- read.table(url('https://raw.githubusercontent.com/avehtari/BDA_course_Aalto/master/rpackage/data-raw/factory.txt'))
colnames(factory) <- 1:6
factory
@@ -2242,13 +2405,61 @@
Comparison of k gr
Pooled model
As comparison make also pooled model
fit_pooled <- brm(quality ~ 1, data = factory, refresh=0)
+
Check the summary of the posterior and convergence.
+
fit_pooled
+
Family: gaussian
+ Links: mu = identity; sigma = identity
+Formula: quality ~ 1
+ Data: factory (Number of observations: 30)
+ Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
+ total post-warmup draws = 4000
+
+Population-Level Effects:
+ Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
+Intercept 92.95 3.28 86.54 99.40 1.00 2644 2350
+
+Family Specific Parameters:
+ Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
+sigma 18.46 2.60 14.25 24.36 1.00 2771 2129
+
+Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
+and Tail_ESS are effective sample size measures, and Rhat is the potential
+scale reduction factor on split chains (at convergence, Rhat = 1).
Separate model
As comparison make also seprate model. To make it completely separate we need to have different sigma for each machine, too.
-
fit_separate <- brm(bf(quality ~ machine,
- sigma ~ machine),
+fit_separate <- brm(bf(quality ~ 0 + machine,
+ sigma ~ 0 + machine),
data = factory, refresh=0)
+Check the summary of the posterior and convergence.
+fit_separate
+ Family: gaussian
+ Links: mu = identity; sigma = log
+Formula: quality ~ 0 + machine
+ sigma ~ 0 + machine
+ Data: factory (Number of observations: 30)
+ Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
+ total post-warmup draws = 4000
+
+Population-Level Effects:
+ Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
+machine1 75.53 12.30 50.60 98.69 1.01 2164 1545
+machine2 106.31 7.28 91.55 121.14 1.00 2775 2052
+machine3 87.28 9.04 69.54 104.23 1.00 1502 1144
+machine4 111.54 4.37 102.46 120.46 1.00 2445 1803
+machine5 89.86 6.61 76.45 102.65 1.00 1915 1225
+machine6 86.02 11.82 61.70 108.54 1.00 2109 1822
+sigma_machine1 3.12 0.41 2.47 4.07 1.00 2352 1483
+sigma_machine2 2.61 0.41 1.94 3.53 1.00 2510 1829
+sigma_machine3 2.70 0.42 2.06 3.68 1.00 1838 1355
+sigma_machine4 2.15 0.39 1.51 3.03 1.00 2498 1795
+sigma_machine5 2.52 0.39 1.89 3.43 1.00 1833 1669
+sigma_machine6 3.09 0.39 2.46 3.96 1.00 2357 1717
+
+Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
+and Tail_ESS are effective sample size measures, and Rhat is the potential
+scale reduction factor on split chains (at convergence, Rhat = 1).
@@ -2267,37 +2478,79 @@
Common variance hi
Group-Level Effects:
~machine (Number of levels: 6)
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-sd(Intercept) 12.65 5.88 3.02 27.05 1.01 773 605
+sd(Intercept) 12.78 6.02 3.71 27.57 1.00 1038 1397
Population-Level Effects:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-Intercept 92.84 5.68 81.06 104.18 1.00 1637 1814
+Intercept 92.79 5.70 81.60 104.46 1.00 1400 1299
Family Specific Parameters:
Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
-sigma 15.05 2.26 11.48 20.24 1.00 1541 2260
+sigma 15.09 2.28 11.40 20.37 1.00 2195 2348
Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
-
LOO comparison shows the hierarchical model is the best
+
LOO comparison shows the hierarchical model is the best. The differences are small as the number of observations is small and there is a considerable prediction (aleatoric) uncertainty.
loo_compare(loo(fit_pooled), loo(fit_separate), loo(fit_hier))
-
Warning: Found 3 observations with a pareto_k > 0.7 in model 'fit_separate'. It
+Warning: Found 2 observations with a pareto_k > 0.7 in model 'fit_separate'. It
is recommended to set 'moment_match = TRUE' in order to perform moment matching
for problematic observations.
elpd_diff se_diff
fit_hier 0.0 0.0
-fit_separate -3.5 2.7
+fit_separate -3.2 2.9
fit_pooled -4.0 2.0
-Distributions of quality differences from the mean quality
-mcmc_areas(as_draws_df(fit_hier), regex_pars='r_machine')
-
-Posterior predictive distributions for 6 old and 1 new machines
-posterior_predict(fit_hier, newdata=data.frame(machine=1:7, quality=rep(NA,7)),
- allow_new_levels=TRUE) |>
+Different model posterior distributions for the mean quality. Pooled model ignores the varition between machines. Separate model doesn’t take benefit from the similariy of the machines and has higher uncertainty.
+ph <- fit_hier |>
+ spread_rvars(b_Intercept, r_machine[machine,]) |>
+ mutate(machine_mean = b_Intercept + r_machine) |>
+ ggplot(aes(xdist=machine_mean, y=machine)) +
+ stat_halfeye() +
+ scale_y_continuous(breaks=1:6) +
+ labs(x='Quality', y='Machine', title='Hierarchical')
+
+ps <- fit_separate |>
as_draws_df() |>
- mcmc_areas()
-
+ subset_draws(variable='b_machine', regex=TRUE) |>
+ set_variables(paste0('b_machine[', 1:6, ']')) |>
+ as_draws_rvars() |>
+ spread_rvars(b_machine[machine]) |>
+ mutate(machine_mean = b_machine) |>
+ ggplot(aes(xdist=machine_mean, y=machine)) +
+ stat_halfeye() +
+ scale_y_continuous(breaks=1:6) +
+ labs(x='Quality', y='Machine', title='Separate')
+
+pp <- fit_pooled |>
+ spread_rvars(b_Intercept) |>
+ mutate(machine_mean = b_Intercept) |>
+ ggplot(aes(xdist=machine_mean, y=0)) +
+ stat_halfeye() +
+ scale_y_continuous(breaks=NULL) +
+ labs(x='Quality', y='All machines', title='Pooled')
+
+(pp / ps / ph) * xlim(c(50,140))
+Warning: Removed 865 rows containing missing values (`geom_slabinterval()`).
+
+Make prior sensitivity analysis by powerscaling both prior and likelihood with focus on mean quality of each machine. We see no prior sensitivity.
+machine_mean <- fit_hier |>
+ as_draws_df() |>
+ mutate(across(matches('r_machine'), ~ .x - b_Intercept)) |>
+ subset_draws(variable='r_machine', regex=TRUE) |>
+ set_variables(paste0('machine_mean[', 1:6, ']'))
+powerscale_sensitivity(fit_hier, prediction = \(x, ...) machine_mean, num_args=list(digits=2)
+ )$sensitivity |>
+ filter(str_detect(variable,'machine_mean')) |>
+ mutate(across(where(is.double), ~num(.x, digits=2)))
+# A tibble: 6 × 4
+ variable prior likelihood diagnosis
+ <chr> <num:.2!> <num:.2!> <chr>
+1 machine_mean[1] 0.03 0.08 -
+2 machine_mean[2] 0.02 0.07 -
+3 machine_mean[3] 0.03 0.03 -
+4 machine_mean[4] 0.03 0.10 -
+5 machine_mean[5] 0.03 0.02 -
+6 machine_mean[6] 0.03 0.03 -
Hierarchical binomial model
@@ -2312,17 +2565,141 @@
Hierarchical binom
4 Awada 2005 400 1 10
5 Awada 2005 600 7 12
6 Awada 2005 800 1 3
-
Pooled model assumes all studies have the same dose effect
+
Pooled model assumes all studies have the same dose effect (reminder: ~ dose
is equivalent to ~ 1 + dose
)
fit_pooled <- brm(events | trials(total) ~ dose,
+ prior = c(prior(student_t(7, 0, 1.5), class='Intercept'),
+ prior(normal(0, 1), class='b')),
+ family=binomial(), data=dat.ursino2021)
+
Check the summary of the posterior and convergence
+
fit_pooled
+
Family: binomial
+ Links: mu = logit
+Formula: events | trials(total) ~ dose
+ Data: dat.ursino2021 (Number of observations: 49)
+ Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
+ total post-warmup draws = 4000
+
+Population-Level Effects:
+ Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
+Intercept -3.18 0.38 -3.93 -2.44 1.00 1261 1782
+dose 0.00 0.00 0.00 0.01 1.00 2464 2333
+
+Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
+and Tail_ESS are effective sample size measures, and Rhat is the potential
+scale reduction factor on split chains (at convergence, Rhat = 1).
+
Dose coefficient seems to be very small. Looking at the posterior, we see that it is positive with high probability.
+
fit_pooled |>
+ as_draws() |>
+ subset_draws(variable='b_dose') |>
+ summarise_draws(~quantile(.x, probs = c(0.025, 0.975)), ~mcse_quantile(.x, probs = c(0.025, 0.975)))
+
# A tibble: 1 × 5
+ variable `2.5%` `97.5%` mcse_q2.5 mcse_q97.5
+ <chr> <dbl> <dbl> <dbl> <dbl>
+1 b_dose 0.00228 0.00525 0.0000358 0.0000365
+
The dose was reported in mg, and most values are in hundreds. It is often sensible to switch to a scale in which the range of values is closer to unit range. In this case it is natural to use g instead of mg.
+
dat.ursino2021 <- dat.ursino2021 |>
+ mutate(doseg = dose/100)
+
Fit the pooled model again uing doseg
+
fit_pooled <- brm(events | trials(total) ~ doseg,
+ prior = c(prior(student_t(7, 0, 1.5), class='Intercept'),
+ prior(normal(0, 1), class='b')),
family=binomial(), data=dat.ursino2021)
-
Separate model assumes all studies have different dose effect
-
fit_separate <- brm(events | trials(total) ~ dose:study,
- family=binomial(), data=dat.ursino2021)
-fit_separate <- update(fit_separate, control=list(init=0.1))
-
Hierarchical model assumes common mean effect and variation round with normal population prior
-
fit_hier <- brm(events | trials(total) ~ dose + (dose | study),
- family=binomial(), data=dat.ursino2021)
-fit_hier <- update(fit_hier, control=list(adapt_delta=0.99))
+
Check the summary of the posterior and convergence.
+
fit_pooled
+
Family: binomial
+ Links: mu = logit
+Formula: events | trials(total) ~ doseg
+ Data: dat.ursino2021 (Number of observations: 49)
+ Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
+ total post-warmup draws = 4000
+
+Population-Level Effects:
+ Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
+Intercept -3.15 0.37 -3.92 -2.46 1.00 2009 2360
+doseg 0.37 0.08 0.22 0.51 1.00 2374 2519
+
+Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
+and Tail_ESS are effective sample size measures, and Rhat is the potential
+scale reduction factor on split chains (at convergence, Rhat = 1).
+
Now it is easier to interpret the presented values. Separate model assumes all studies have different dose effect. It would be a bit complicated to set a different prior on study specific intercepts and other coefficients, so we use the ame prior for all.
+
fit_separate <- brm(events | trials(total) ~ 0 + study + doseg:study,
+ prior=prior(student_t(7, 0, 1.5), class='b'),
+ family=binomial(), data=dat.ursino2021)
+
Check the summary of the posterior and convergence.
+
fit_separate
+
Family: binomial
+ Links: mu = logit
+Formula: events | trials(total) ~ 0 + study + doseg:study
+ Data: dat.ursino2021 (Number of observations: 49)
+ Draws: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
+ total post-warmup draws = 4000
+
+Population-Level Effects:
+ Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
+studyAwada -2.64 1.04 -4.92 -0.86 1.00 5046
+studyBorthakurMA -2.41 1.76 -6.53 0.49 1.00 6379
+studyBorthakurMB -1.64 1.42 -4.82 0.86 1.00 5944
+studyChen -0.80 1.57 -4.14 2.01 1.00 6776
+studyClark -3.03 1.44 -6.33 -0.76 1.00 5432
+studyCrumpMA -1.51 1.24 -4.29 0.73 1.00 5751
+studyCrumpMB -1.88 1.17 -4.44 0.12 1.00 5871
+studyFuruse -1.68 1.65 -5.41 1.13 1.00 5569
+studyMiller -1.12 0.81 -2.75 0.37 1.00 6231
+studyMinami -1.97 1.17 -4.54 0.12 1.00 5900
+studyMoore -2.33 1.19 -4.91 -0.28 1.00 5140
+studyNabors -2.92 1.44 -6.20 -0.49 1.00 5632
+studyStrumberg -1.96 0.85 -3.75 -0.45 1.00 6403
+studyAwada:doseg 0.37 0.20 0.01 0.80 1.00 4815
+studyBorthakurMA:doseg 0.00 0.39 -0.71 0.85 1.00 5594
+studyBorthakurMB:doseg 0.06 0.32 -0.53 0.73 1.00 6176
+studyChen:doseg -0.65 0.52 -1.68 0.38 1.00 6028
+studyClark:doseg 0.44 0.25 0.01 0.99 1.00 5658
+studyCrumpMA:doseg -0.31 0.49 -1.27 0.68 1.00 5526
+studyCrumpMB:doseg 0.10 0.29 -0.45 0.67 1.00 5536
+studyFuruse:doseg -0.53 0.61 -1.77 0.68 1.00 5238
+studyMiller:doseg 0.03 0.28 -0.50 0.57 1.00 6497
+studyMinami:doseg -0.17 0.36 -0.94 0.49 1.00 5627
+studyMoore:doseg 0.20 0.27 -0.30 0.77 1.00 5079
+studyNabors:doseg 0.31 0.20 -0.04 0.76 1.00 5668
+studyStrumberg:doseg 0.09 0.16 -0.23 0.42 1.00 6673
+ Tail_ESS
+studyAwada 2397
+studyBorthakurMA 2218
+studyBorthakurMB 2775
+studyChen 2492
+studyClark 2376
+studyCrumpMA 2159
+studyCrumpMB 2693
+studyFuruse 2199
+studyMiller 2690
+studyMinami 2466
+studyMoore 2732
+studyNabors 2369
+studyStrumberg 2879
+studyAwada:doseg 2153
+studyBorthakurMA:doseg 2410
+studyBorthakurMB:doseg 2712
+studyChen:doseg 2948
+studyClark:doseg 2487
+studyCrumpMA:doseg 2565
+studyCrumpMB:doseg 2904
+studyFuruse:doseg 2360
+studyMiller:doseg 2616
+studyMinami:doseg 2447
+studyMoore:doseg 2561
+studyNabors:doseg 2217
+studyStrumberg:doseg 2602
+
+Draws were sampled using sample(hmc). For each parameter, Bulk_ESS
+and Tail_ESS are effective sample size measures, and Rhat is the potential
+scale reduction factor on split chains (at convergence, Rhat = 1).
+
Hierarchical model assumes common mean effect and variation around with normal population prior (reminder: ~ doseg + (doseg | study)
is equivalent to ~ 1 + doseg + (1 + doseg | study)
)
+
fit_hier <- brm(events | trials(total) ~ doseg + (doseg | study),
+ prior=c(prior(student_t(7, 0, 1.5), class='Intercept'),
+ prior(normal(0, 1), class='b')),
+ family=binomial(), data=dat.ursino2021)
+
We seem some divergences and repeat with higher adapt_delta
+
fit_hier <- update(fit_hier, control=list(adapt_delta=0.99))
LOO-CV comparison
loo_compare(loo(fit_pooled), loo(fit_separate), loo(fit_hier))
Warning: Found 15 observations with a pareto_k > 0.7 in model 'fit_separate'.
@@ -2332,49 +2709,70 @@ Hierarchical binom
recommended to set 'moment_match = TRUE' in order to perform moment matching
for problematic observations.
elpd_diff se_diff
-fit_hier 0.0 0.0
-fit_pooled -1.4 2.8
-fit_separate -9.2 3.1
+fit_hier 0.0 0.0
+fit_pooled -1.3 2.9
+fit_separate -23.8 5.3
We get warnings about Pareto k’s > 0.7 in PSIS-LOO for separate model, but as in that case the LOO-CV estimate is usually overoptimistic and the separate model is the worst, there is no need to use more accurate computation.
Hierarchical model has better elpd than the pooled, but difference is negligible. However, when we look at the study specific parameters, we see that the Miller study has higher intercept (more events).
mcmc_areas(as_draws_df(fit_hier), regex_pars='r_study\\[.*Intercept')
-
+
There are no differences in slopes.
-
mcmc_areas(as_draws_df(fit_hier), regex_pars='r_study\\[.*dose')
-
-
The coefficient for the dose is clearly larger than 0
-
mcmc_areas(as_draws_df(fit_hier), regex_pars='b_dose') +
+mcmc_areas(as_draws_df(fit_hier), regex_pars='r_study\\[.*doseg')
+
+The population level coefficient for the dose is clearly larger than 0
+mcmc_areas(as_draws_df(fit_hier), regex_pars='b_doseg') +
geom_vline(xintercept=0, linetype='dashed') +
- xlim(c(0,0.01))
+ xlim(c(0,1))
Warning: Removed 1 rows containing missing values (`geom_segment()`).
-
-
The posterior for the probability of event given certain dose and a new study
+
+
Make prior sensitivity analysis by powerscaling both prior and likelihood.
+
powerscale_sensitivity(fit_hier, variable='b_doseg'
+ )$sensitivity |>
+ mutate(across(where(is.double), ~num(.x, digits=2)))
+
# A tibble: 1 × 4
+ variable prior likelihood diagnosis
+ <chr> <num:.2!> <num:.2!> <chr>
+1 b_doseg 0.03 0.08 -
+
The posterior for the probability of event given certain dose and a new study.
data.frame(study='new',
- dose=seq(100,1000,by=100),
+ doseg=seq(0.1,1,by=0.1),
total=1) |>
add_linpred_draws(fit_hier, transform=TRUE, allow_new_levels=TRUE) |>
- ggplot(aes(x=dose, y=.linpred)) +
+ ggplot(aes(x=doseg, y=.linpred)) +
stat_lineribbon(.width = c(.95), alpha = 1/2, color=brewer.pal(5, "Blues")[[5]]) +
scale_fill_brewer()+
- labs(x= "Dose", y = 'Probability of event') +
+ labs(x= "Dose (g)", y = 'Probability of event') +
+ theme(legend.position="none") +
+ geom_hline(yintercept=0) +
+ scale_x_continuous(breaks=seq(0.1,1,by=0.1))
+
+
If plot individual posterior draws, we see that there is a lot of uncertainty about the overall probability (explained by the variation in Intercept in different studies), but less uncertainty about the slope.
+
data.frame(study='new',
+ doseg=seq(0.1,1,by=0.1),
+ total=1) |>
+ add_linpred_draws(fit_hier, transform=TRUE, allow_new_levels=TRUE, ndraws=100) |>
+ ggplot(aes(x=doseg, y=.linpred)) +
+ geom_line(aes(group=.draw), alpha = 1/2, color = brewer.pal(5, "Blues")[[3]])+
+ scale_fill_brewer()+
+ labs(x= "Dose (g)", y = 'Probability of event') +
theme(legend.position="none") +
geom_hline(yintercept=0) +
- scale_x_continuous(breaks=seq(100,1000,by=100))
-
-
Posterior predictive checking
+ scale_x_continuous(breaks=seq(0.1,1,by=0.1))
+
+
Posterior predictive checking showing the observed and predicted number of events.
pp_check(fit_hier, type = "ribbon_grouped", group="study")
-
+
Licenses
-- Code © 2017-2023, Aki Vehtari, licensed under BSD-3.
-- Text © 2017-2023, Aki Vehtari, licensed under CC-BY-NC 4.0.
+- Code © 2017-2024, Aki Vehtari, licensed under BSD-3.
+- Text © 2017-2024, Aki Vehtari, licensed under CC-BY-NC 4.0.
-LS0tCnRpdGxlOiAiQmF5ZXNpYW4gZGF0YSBhbmFseXNpcyAtIEJSTVMgZGVtb3MiCmF1dGhvcjogIkFraSBWZWh0YXJpIgpkYXRlOiAiRmlyc3QgdmVyc2lvbiAyMDIzLTEyLTA1LiBMYXN0IG1vZGlmaWVkIGByIGZvcm1hdChTeXMuRGF0ZSgpKWAuIgpvdXRwdXQ6CiAgaHRtbF9kb2N1bWVudDoKICAgIGZpZ19jYXB0aW9uOiB5ZXMKICAgIHRvYzogVFJVRQogICAgdG9jX2RlcHRoOiAyCiAgICBudW1iZXJfc2VjdGlvbnM6IFRSVUUKICAgIHRvY19mbG9hdDoKICAgICAgc21vb3RoX3Njcm9sbDogRkFMU0UKICAgIHRoZW1lOiByZWFkYWJsZQogICAgY29kZV9kb3dubG9hZDogdHJ1ZQotLS0KIyBTZXR1cCAgey51bm51bWJlcmVkfQoKYGBge3Igc2V0dXAsIGluY2x1ZGU9RkFMU0V9CmtuaXRyOjpvcHRzX2NodW5rJHNldChjYWNoZT1GQUxTRSwgbWVzc2FnZT1GQUxTRSwgZXJyb3I9RkFMU0UsIHdhcm5pbmc9VFJVRSwgY29tbWVudD1OQSwgb3V0LndpZHRoPSc5NSUnKQpgYGAKCioqTG9hZCBwYWNrYWdlcyoqCgpgYGB7cn0KbGlicmFyeSh0aWR5cikKbGlicmFyeShkcGx5cikKbGlicmFyeShicm1zKQpvcHRpb25zKGJybXMuYmFja2VuZCA9ICJjbWRzdGFuciIsIG1jLmNvcmVzID0gMikKbGlicmFyeShwb3N0ZXJpb3IpCm9wdGlvbnMocGlsbGFyLm5lZ2F0aXZlID0gRkFMU0UpCmxpYnJhcnkobG9vKQpsaWJyYXJ5KGdncGxvdDIpCmxpYnJhcnkoYmF5ZXNwbG90KQp0aGVtZV9zZXQoYmF5ZXNwbG90Ojp0aGVtZV9kZWZhdWx0KGJhc2VfZmFtaWx5ID0gInNhbnMiKSkKbGlicmFyeSh0aWR5YmF5ZXMpCmxpYnJhcnkoZ2dkaXN0KQpsaWJyYXJ5KHBhdGNod29yaykKbGlicmFyeShSQ29sb3JCcmV3ZXIpClNFRUQgPC0gNDg5MjcgIyBzZXQgcmFuZG9tIHNlZWQgZm9yIHJlcHJvZHVjYWJpbGl0eQpgYGAKCiMgSW50cm9kdWN0aW9uCgpUaGlzIG5vdGVib29rIGNvbnRhaW5zIHNldmVyYWwgZXhhbXBsZXMgb2YgaG93IHRvIHVzZSBbU3Rhbl0oaHR0cHM6Ly9tYy1zdGFuLm9yZykgaW4gUiB3aXRoIFtfX2JybXNfX10oaHR0cHM6Ly9wYXVsLWJ1ZXJrbmVyLmdpdGh1Yi5pby9icm1zLykuIFRoaXMgbm90ZWJvb2sgYXNzdW1lcyBiYXNpYyBrbm93bGVkZ2Ugb2YgQmF5ZXNpYW4gaW5mZXJlbmNlIGFuZCBNQ01DLiBUaGUgZXhhbXBsZXMgYXJlIHJlbGF0ZWQgdG8gW0JheWVzaWFuIGRhdGEgYW5hbHlzaXMgY291cnNlXShodHRwczovL2F2ZWh0YXJpLmdpdGh1Yi5pby9CREFfY291cnNlX0FhbHRvLykuCgojIEJlcm5vdWxsaSBtb2RlbAoKVG95IGRhdGEgd2l0aCBzZXF1ZW5jZSBvZiBmYWlsdXJlcyAoMCkgYW5kIHN1Y2Nlc3NlcyAoMSkuIFdlIHdvdWxkCmxpa2UgdG8gbGVhcm4gYWJvdXQgdGhlIHVua25vd24gcHJvYmFiaWxpdHkgb2Ygc3VjY2Vzcy4KCmBgYHtyfQpkYXRhX2Jlcm4gPC0gZGF0YS5mcmFtZSh5ID0gYygxLCAxLCAxLCAwLCAxLCAxLCAxLCAwLCAxLCAwKSkKYGBgCgpicm1zIHVzZXMgYnkgZGVmYXVsdCBzdHVkZW50X3QoMywgMCwgMi41KSwgYnUgd2UgY2FuIGFzc2lnbiB1bmlmb3JtCnByaW9yIChiZXRhKDEsMSkpLiAKCmBgYHtyIHJlc3VsdHM9J2hpZGUnfQpmaXRfYmVybiA8LSBicm0oeSB+IDEsIGZhbWlseSA9IGJlcm5vdWxsaSgpLCBkYXRhID0gZGF0YV9iZXJuLAogICAgICAgICAgICAgICAgcHJpb3IgPSBwcmlvcigiIiwgY2xhc3M9J0ludGVyY2VwdCcpLAogICAgICAgICAgICAgICAgc2VlZCA9IFNFRUQsIHJlZnJlc2ggPSAwKQpgYGAKCkNoZWNrIHRoZSBzdW1tYXJ5IG9mIHRoZSBwb3N0ZXJpb3IgYW5kIGNvbnZlcmdlbmNlCgpgYGB7cn0KZml0X2Jlcm4KYGBgCgpFeHRyYWN0IHRoZSBwb3N0ZXJpb3IgZHJhd3MKCmBgYHtyfQpkcmF3cyA8LSBhc19kcmF3c19kZihmaXRfYmVybikKYGBgCgpXZSBjYW4gZ2V0IHN1bW1hcnkgaW5mb3JtYXRpb24gdXNpbmcgc3VtbWFyaXNlX2RyYXdzKCkKCmBgYHtyfQpkcmF3cyB8PgogIHN1YnNldF9kcmF3cyh2YXJpYWJsZT0nYl9JbnRlcmNlcHQnKSB8PgogIHN1bW1hcmlzZV9kcmF3cygpCmBgYAoKV2UgY2FuIGNvbXB1dGUgdGhlIHByb2JhYmlsaXR5IG9mIHN1Y2Nlc3MgYnkgdXNpbmcgcGxvZ2lzIHdoaWNoIGlzCmVxdWFsIHRvIGludmVyc2UtbG9naXQgZnVuY3Rpb24KCmBgYHtyfQpkcmF3cyA8LSBkcmF3cyB8PgogIG11dGF0ZV92YXJpYWJsZXModGhldGE9cGxvZ2lzKGJfSW50ZXJjZXB0KSkKYGBgCgpTdW1tYXJ5IG9mIHRoZXRhIGJ5IHVzaW5nIHN1bW1hcmlzZV9kcmF3cygpCgpgYGB7cn0KZHJhd3MgfD4KICBzdWJzZXRfZHJhd3ModmFyaWFibGU9J3RoZXRhJykgfD4KICBzdW1tYXJpc2VfZHJhd3MoKQpgYGAKCkhpc3RvZ3JhbSBvZiB0aGV0YQoKYGBge3J9Cm1jbWNfaGlzdChkcmF3cywgcGFycz0ndGhldGEnKSArCiAgeGxhYigndGhldGEnKSArCiAgeGxpbShjKDAsMSkpCmBgYAoKV2UgbmV4dCBjb21wYXJlIHRoZSByZXN1bHQgdG8gdXNpbmcgbm9ybWFsKDAsIDEpIHByaW9yIG9uIGxvZ2l0CnByb2JhYmlsaXR5LiBWaXN1YWxpemUgdGhlIHByaW9yIGJ5IGRyYXdpbmcgc2FtcGxlcyBmcm9tIGl0CgpgYGB7cn0KcHJpb3JfbWVhbiA8LSAwCnByaW9yX3NkIDwtIDEKcHJpb3JfZHJhd3MgPC0gZGF0YS5mcmFtZSgKICAgICAgICAgICAgICAgICB0aGV0YSA9IHBsb2dpcyhybm9ybSgyMDAwMCwgcHJpb3JfbWVhbiwgcHJpb3Jfc2QpKSkKbWNtY19oaXN0KHByaW9yX2RyYXdzKSArCiAgeGxpbShjKDAsMSkpCgpgYGAKYGBge3IgcmVzdWx0cz0naGlkZSd9CmZpdF9iZXJuIDwtIGJybSh5IH4gMSwgZmFtaWx5ID0gYmVybm91bGxpKCksIGRhdGEgPSBkYXRhX2Jlcm4sCiAgICAgICAgICAgICAgICBwcmlvciA9IHByaW9yKG5vcm1hbCgwLCAxKSwgY2xhc3M9J0ludGVyY2VwdCcpLAogICAgICAgICAgICAgICAgc2VlZCA9IFNFRUQsIHJlZnJlc2ggPSAwKQpgYGAKCkNoZWNrIHRoZSBzdW1tYXJ5IG9mIHRoZSBwb3N0ZXJpb3IgYW5kIGNvbnZlcmdlbmNlCgpgYGB7cn0KZml0X2Jlcm4KYGBgCgpXZSBjYW4gZXhhbWluZSB0aGUgbGF0ZW50IHBhcmFtZXRlcgoKYGBge3J9CmRyYXdzIDwtIGFzX2RyYXdzX2RmKGZpdF9iZXJuKQpkcmF3cyB8PgogIHN1YnNldF9kcmF3cyh2YXJpYWJsZT0nYl9JbnRlcmNlcHQnKSB8PgogIHN1bW1hcmlzZV9kcmF3cygpCmBgYAoKV2UgY2FuIGNvbXB1dGUgdGhlIHByb2JhYmlsaXR5IG9mIHN1Y2Nlc3MgYnkgdXNpbmcgcGxvZ2lzIHdoaWNoIGlzCmVxdWFsIHRvIGludmVyc2UtbG9naXQgZnVuY3Rpb24KCmBgYHtyfQpkcmF3cyA8LSBkcmF3cyB8PgogIG11dGF0ZV92YXJpYWJsZXModGhldGE9cGxvZ2lzKGJfSW50ZXJjZXB0KSkKYGBgCgpTdW1tYXJ5IG9mIHRoZXRhIGJ5IHVzaW5nIHN1bW1hcmlzZV9kcmF3cygpCgpgYGB7cn0KZHJhd3MgfD4KICBzdWJzZXRfZHJhd3ModmFyaWFibGU9J3RoZXRhJykgfD4KICBzdW1tYXJpc2VfZHJhd3MoKQpgYGAKCkhpc3RvZ3JhbSBvZiB0aGV0YQoKYGBge3J9Cm1jbWNfaGlzdChkcmF3cywgcGFycz0ndGhldGEnKSArCiAgeGxhYigndGhldGEnKSArCiAgeGxpbShjKDAsMSkpCmBgYAoKQXMgdGhlIG51bWJlciBvZiBvYnNlcnZhdGlvbnMgaXMgc21hbGwsIHRoZXJlIGlzIHNtYWxsIGNoYW5nZSBpbgp0aGUgcG9zdGVyaW9yIG1lYW4gd2hlbiB0aGUgcHJpb3IgaXMgY2hhbmdlZC4gWW91IGNhbiBleHBlcmltZW50CndpdGggZGlmZmVyZW50IHByaW9ycyBhbmQgdmFyeWluZyB0aGUgbnVtYmVyIG9mIG9ic2VydmF0aW9ucy4KCgojIEJpbm9taWFsIG1vZGVsCgpJbnN0ZWFkIG9mIHNlcXVlbmNlIG9mIDAncyBhbmQgMSdzLCB3ZSBjYW4gc3VtbWFyaXplIHRoZSBkYXRhIHdpdGgKdGhlIG51bWJlciBvZiB0cmlhbHMgYW5kIHRoZSBudW1iZXIgc3VjY2Vzc2VzIGFuZCB1c2UgQmlub21pYWwKbW9kZWwuIFRoZSBwcmlvciBpcyBzcGVjaWZpZWQgaW4gdGhlICdsYXRlbnQgc3BhY2UnLiBUaGUgYWN0dWFsCnByb2JhYmlsaXR5IG9mIHN1Y2Nlc3MsIHRoZXRhID0gcGxvZ2lzKGFscGhhKSwgd2hlcmUgcGxvZ2lzIGlzIHRoZQppbnZlcnNlIG9mIHRoZSBsb2dpc3RpYyBmdW5jdGlvbi4KClZpc3VhbGl6ZSB0aGUgcHJpb3IgYnkgZHJhd2luZyBzYW1wbGVzIGZyb20gaXQKCmBgYHtyfQpwcmlvcl9tZWFuIDwtIDAKcHJpb3Jfc2QgPC0gMQpwcmlvcl9kcmF3cyA8LSBkYXRhLmZyYW1lKHRoZXRhID0gcGxvZ2lzKHJub3JtKDIwMDAwLCBwcmlvcl9tZWFuLCBwcmlvcl9zZCkpKQptY21jX2hpc3QocHJpb3JfZHJhd3MpCmBgYAoKQmlub21pYWwgbW9kZWwgd2l0aCB0aGUgc2FtZSBkYXRhCgpgYGB7cn0KZGF0YV9iaW4gPC0gZGF0YS5mcmFtZShOID0gYygxMCksIHkgPSBjKDcpKQpgYGAKYGBge3IgcmVzdWx0cz0naGlkZSd9CmZpdF9iaW4gPC0gYnJtKHkgfCB0cmlhbHMoTikgfiAxLCBmYW1pbHkgPSBiaW5vbWlhbCgpLCBkYXRhID0gZGF0YV9iaW4sCiAgICAgICAgICAgICAgIHByaW9yID0gcHJpb3Iobm9ybWFsKDAsMSksIGNsYXNzPSdJbnRlcmNlcHQnKSwKICAgICAgICAgICAgICAgc2VlZCA9IFNFRUQsIHJlZnJlc2ggPSAwKQpgYGAKCkNoZWNrIHRoZSBzdW1tYXJ5IG9mIHRoZSBwb3N0ZXJpb3IgYW5kIGNvbnZlcmdlbmNlCgpgYGB7cn0KZml0X2JpbgpgYGAKCkV4dHJhY3QgdGhlIHBvc3RlcmlvciBkcmF3cwoKYGBge3J9CmRyYXdzIDwtIGFzX2RyYXdzX2RmKGZpdF9iaW4pCmBgYAoKV2UgY2FuIGdldCBzdW1tYXJ5IGluZm9ybWF0aW9uIHVzaW5nIHN1bW1hcmlzZV9kcmF3cygpCgpgYGB7cn0KZHJhd3MgfD4KICBzdWJzZXRfZHJhd3ModmFyaWFibGU9J2JfSW50ZXJjZXB0JykgfD4KICBzdW1tYXJpc2VfZHJhd3MoKQpgYGAKCldlIGNhbiBjb21wdXRlIHRoZSBwcm9iYWJpbGl0eSBvZiBzdWNjZXNzIGJ5IHVzaW5nIHBsb2dpcyB3aGljaCBpcwplcXVhbCB0byBpbnZlcnNlLWxvZ2l0IGZ1bmN0aW9uCgpgYGB7cn0KZHJhd3MgPC0gZHJhd3MgfD4KICBtdXRhdGVfdmFyaWFibGVzKHRoZXRhPXBsb2dpcyhiX0ludGVyY2VwdCkpCmBgYAoKU3VtbWFyeSBvZiB0aGV0YSBieSB1c2luZyBzdW1tYXJpc2VfZHJhd3MoKQoKYGBge3J9CmRyYXdzIHw+CiAgc3Vic2V0X2RyYXdzKHZhcmlhYmxlPSd0aGV0YScpIHw+CiAgc3VtbWFyaXNlX2RyYXdzKCkKYGBgCgpIaXN0b2dyYW0gb2YgdGhldGEKCmBgYHtyfQptY21jX2hpc3QoZHJhd3MsIHBhcnM9J3RoZXRhJykgKwogIHhsYWIoJ3RoZXRhJykgKwogIHhsaW0oYygwLDEpKQpgYGAKClJlLXJ1biB0aGUgbW9kZWwgd2l0aCBhIG5ldyBkYXRhIGRhdGFzZXQgd2l0aG91dCByZWNvbXBpbGluZwoKYGBge3J9CmRhdGFfYmluIDwtIGRhdGEuZnJhbWUoTiA9IGMoNSksIHkgPSBjKDQpKQpmaXRfYmluIDwtIHVwZGF0ZShmaXRfYmluLCBuZXdkYXRhID0gZGF0YV9iaW4pCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UKCmBgYHtyfQpmaXRfYmluCmBgYAoKRXh0cmFjdCB0aGUgcG9zdGVyaW9yIGRyYXdzCgpgYGB7cn0KZHJhd3MgPC0gYXNfZHJhd3NfZGYoZml0X2JpbikKYGBgCgpXZSBjYW4gZ2V0IHN1bW1hcnkgaW5mb3JtYXRpb24gdXNpbmcgc3VtbWFyaXNlX2RyYXdzKCkKCmBgYHtyfQpkcmF3cyB8PgogIHN1YnNldF9kcmF3cyh2YXJpYWJsZT0nYl9JbnRlcmNlcHQnKSB8PgogIHN1bW1hcmlzZV9kcmF3cygpCmBgYAoKV2UgY2FuIGNvbXB1dGUgdGhlIHByb2JhYmlsaXR5IG9mIHN1Y2Nlc3MgYnkgdXNpbmcgcGxvZ2lzIHdoaWNoIGlzCmVxdWFsIHRvIGludmVyc2UtbG9naXQgZnVuY3Rpb24KCmBgYHtyfQpkcmF3cyA8LSBkcmF3cyB8PgogIG11dGF0ZV92YXJpYWJsZXModGhldGE9cGxvZ2lzKGJfSW50ZXJjZXB0KSkKYGBgCgpTdW1tYXJ5IG9mIHRoZXRhIGJ5IHVzaW5nIHN1bW1hcmlzZV9kcmF3cygpCgpgYGB7cn0KZHJhd3MgfD4KICBzdWJzZXRfZHJhd3ModmFyaWFibGU9J3RoZXRhJykgfD4KICBzdW1tYXJpc2VfZHJhd3MoKQpgYGAKCkhpc3RvZ3JhbSBvZiB0aGV0YQoKYGBge3J9Cm1jbWNfaGlzdChkcmF3cywgcGFycz0ndGhldGEnKSArCiAgeGxhYigndGhldGEnKSArCiAgeGxpbShjKDAsMSkpCmBgYAoKIyBDb21wYXJpc29uIG9mIHR3byBncm91cHMgd2l0aCBCaW5vbWlhbCAKCkFuIGV4cGVyaW1lbnQgd2FzIHBlcmZvcm1lZCB0byBlc3RpbWF0ZSB0aGUgZWZmZWN0IG9mIGJldGEtYmxvY2tlcnMKb24gbW9ydGFsaXR5IG9mIGNhcmRpYWMgcGF0aWVudHMuIEEgZ3JvdXAgb2YgcGF0aWVudHMgd2VyZSByYW5kb21seQphc3NpZ25lZCB0byB0cmVhdG1lbnQgYW5kIGNvbnRyb2wgZ3JvdXBzOgoKLSBvdXQgb2YgNjc0IHBhdGllbnRzIHJlY2VpdmluZyB0aGUgY29udHJvbCwgMzkgZGllZAotIG91dCBvZiA2ODAgcmVjZWl2aW5nIHRoZSB0cmVhdG1lbnQsIDIyIGRpZWQKCkRhdGEsIHdoZXJlIGBncnAyYCBpcyBhbiBpbmRpY2F0b3IgdmFyaWFibGUgZGVmaW5lZCBhcyBhIGZhY3Rvcgp0eXBlLCB3aGljaCBpcyB1c2VmdWwgZm9yIGNhdGVnb3JpY2FsIHZhcmlhYmxlcy4KCmBgYHtyfQpkYXRhX2JpbjIgPC0gZGF0YS5mcmFtZShOID0gYyg2NzQsIDY4MCksIHkgPSBjKDM5LDIyKSwgZ3JwMiA9IGZhY3RvcihjKCdjb250cm9sJywndHJlYXRtZW50JykpKQpgYGAKClRvIGFuYWx5c2Ugd2hldGhlciB0aGUgdHJlYXRtZW50IGlzIHVzZWZ1bCwgd2UgY2FuIHVzZSBCaW5vbWlhbAptb2RlbCBmb3IgYm90aCBncm91cHMgYW5kIGNvbXB1dGUgb2Rkcy1yYXRpby4KCmBgYHtyfQpmaXRfYmluMiA8LSBicm0oeSB8IHRyaWFscyhOKSB+IGdycDIsIGZhbWlseSA9IGJpbm9taWFsKCksIGRhdGEgPSBkYXRhX2JpbjIsCiAgICAgICAgICAgICAgICBwcmlvciA9IHByaW9yKG5vcm1hbCgwLDEpLCBjbGFzcz0nSW50ZXJjZXB0JyksCiAgICAgICAgICAgICAgICBzZWVkID0gU0VFRCwgcmVmcmVzaCA9IDApCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuIGJybXMgaXMgdXNpbmcKdGhlIGZpcnN0IGZhY3RvciBsZXZlbCBgY29udHJvbGAgYXMgdGhlIGJhc2VsaW5lIGFuZCB0aHVzIHJlcG9ydHMKdGhlIGNvZWZmaWNpZW50IChwb3B1bGF0aW9uLWxldmVsIGVmZmVjdCkgZm9yIGB0cmVhdG1lbnRgIChzaG93biBzCmBncnAydHJlYXRtZW50YCkKCmBgYHtyfQpmaXRfYmluMgpgYGAKCkNvbXB1dGUgdGhldGEgZm9yIGVhY2ggZ3JvdXAgYW5kIHRoZSBvZGRzLXJhdGlvCgpgYGB7cn0KZHJhd3NfYmluMiA8LSBhc19kcmF3c19kZihmaXRfYmluMikgfD4KICBtdXRhdGUodGhldGFfY29udHJvbCA9IHBsb2dpcyhiX0ludGVyY2VwdCksCiAgICAgICAgIHRoZXRhX3RyZWF0bWVudCA9IHBsb2dpcyhiX0ludGVyY2VwdCArIGJfZ3JwMnRyZWF0bWVudCksCiAgICAgICAgIG9kZHNyYXRpbyA9ICh0aGV0YV90cmVhdG1lbnQvKDEtdGhldGFfdHJlYXRtZW50KSkvKHRoZXRhX2NvbnRyb2wvKDEtdGhldGFfY29udHJvbCkpKQpgYGAKClBsb3Qgb2Rkc3JhdGlvCgpgYGB7cn0KbWNtY19oaXN0KGRyYXdzX2JpbjIsIHBhcnM9J29kZHNyYXRpbycpICsKICBzY2FsZV94X2NvbnRpbnVvdXMoYnJlYWtzPXNlcSgwLjIsMS42LGJ5PTAuMikpKwogIGdlb21fdmxpbmUoeGludGVyY2VwdD0xLCBsaW5ldHlwZT0nZGFzaGVkJykKYGBgCgpQcm9iYWJpbGl0eSB0aGF0IHRoZSBvZGRzcmF0aW88MQoKYGBge3J9CmRyYXdzX2JpbjIgfD4KICBtdXRhdGUocG9kZHNyYXRpbyA9IG9kZHNyYXRpbzwxKSB8PgogIHN1YnNldCh2YXJpYWJsZT0ncG9kZHNyYXRpbycpIHw+CiAgc3VtbWFyaXNlX2RyYXdzKG1lYW4sIG1jc2VfbWVhbikKYGBgCgpvZGRyYXRpbyA5NSUgcG9zdGVyaW9yIGludGVydmFsCgpgYGB7cn0KZHJhd3NfYmluMiB8PgogIHN1YnNldCh2YXJpYWJsZT0nb2Rkc3JhdGlvJykgfD4KICBzdW1tYXJpc2VfZHJhd3MofnF1YW50aWxlKC54LCBwcm9icyA9IGMoMC4wMjUsIDAuOTc1KSksIH5tY3NlX3F1YW50aWxlKC54LCBwcm9icyA9IGMoMC4wMjUsIDAuOTc1KSkpCmBgYAoKIyBMaW5lYXIgR2F1c3NpYW4gbW9kZWwKClVzZSB0aGUgS2lscGlzasOkcnZpIHN1bW1lciBtb250aCB0ZW1wZXJhdHVyZXMgMTk1Mi0tMjAyMiBkYXRhIGZyb20gYGFhbHRvYmRhYCBwYWNrYWdlCgpgYGB7cn0KbG9hZCh1cmwoJ2h0dHBzOi8vZ2l0aHViLmNvbS9hdmVodGFyaS9CREFfY291cnNlX0FhbHRvL3Jhdy9tYXN0ZXIvcnBhY2thZ2UvZGF0YS9raWxwaXNqYXJ2aTIwMjIucmRhJykpCmRhdGFfbGluIDwtIGRhdGEuZnJhbWUoeWVhciA9IGtpbHBpc2phcnZpMjAyMiR5ZWFyLAogICAgICAgICAgICAgICAgICAgICAgIHRlbXAgPSBraWxwaXNqYXJ2aTIwMjIkdGVtcC5zdW1tZXIpCmBgYAoKUGxvdCB0aGUgZGF0YQoKYGBge3J9CmRhdGFfbGluIHw+CiAgZ2dwbG90KGFlcyh5ZWFyLCB0ZW1wKSkgKwogIGdlb21fcG9pbnQoY29sb3I9MikgKwogIGxhYnMoeD0gIlllYXIiLCB5ID0gJ1N1bW1lciB0ZW1wLiBAS2lscGlzasOkcnZpJykgKwogIGd1aWRlcyhsaW5ldHlwZSA9ICJub25lIikKYGBgCgpUbyBhbmFseXNlIGhhcyB0aGVyZSBiZWVuIGNoYW5nZSBpbiB0aGUgYXZlcmFnZSBzdW1tZXIgbW9udGgKdGVtcGVyYXR1cmUgd2UgdXNlIGEgbGluZWFyIG1vZGVsIHdpdGggR2F1c3NpYW4gbW9kZWwgZm9yIHRoZQp1bmV4cGxhaW5lZCB2YXJpYXRpb24uIEJ5IGRlZmF1bHQgYnJtcyB1c2VzIHVuaWZvcm0gcHJpb3IgZm9yIHRoZQpjb2VmZmljaWVudHMuCgpgdGVtcCB+IHllYXJgIG1lYW5zIHRlbXAgZGVwZW5kcyBvbiB0aGUgaW50ZXJjZXB0IGFuZCBgdGVtcGAuClRoZSBtb2RlbCBjb3VsZCBhbHNvIGJlIGRlZmluZWQgYXMgYHRlbXAgfiAxICsgeWVhcmAgd2hpY2ggZXhwbGljaXRseSBzaG93cyB0aGUKaW50ZXJjZXB0IHBhcnQuIFRoZSBjb3JyZXNwb25kaW5nIHJlZ3Jlc3Npb24gbW9kZWwgaXMKdGVtcCB+IG5vcm1hbChiX0ludGVyY2VwdCoxICsgYl95ZWFyKnllYXIsIHNpZ21hKQoKYGBge3J9CmZpdF9saW4gPC0gYnJtKHRlbXAgfiB5ZWFyLCBkYXRhID0gZGF0YV9saW4sIGZhbWlseSA9IGdhdXNzaWFuKCksCiAgICAgICAgICAgICAgIHNlZWQgPSBTRUVELCByZWZyZXNoID0gMCkKYGBgCgpXZSBjYW4gY2hlY2sgdGhlIGFsbCB0aGUgcHJpb3JzIHVzZWQuIEluIGdlbmVyYWwgaXQgaXMgZ29vZCB0byB1c2UKcHJvcGVyIHByaW9ycywgYnV0IHNvbWV0aW1lcyBmbGF0IHByaW9ycyBhcmUgZmluZSBhbmQgcHJvZHVjZQpwcm9wZXIgcG9zdGVyaW9yLgoKYGBge3J9CnByaW9yX3N1bW1hcnkoZml0X2xpbikKYGBgCgpDaGVjayB0aGUgc3VtbWFyeSBvZiB0aGUgcG9zdGVyaW9yIGFuZCBjb252ZXJnZW5jZQoKYGBge3J9CmZpdF9saW4KYGBgCgpFeHRyYWN0IHRoZSBwb3N0ZXJpb3IgZHJhd3MgYW5kIGNoZWNrIHRoZSBzdW1tYXJpZXMKCmBgYHtyfQpkcmF3c19saW4gPC0gYXNfZHJhd3NfZGYoZml0X2xpbikgCmRyYXdzX2xpbiB8PiBzdW1tYXJpc2VfZHJhd3MoKQpgYGAKCklmIG9uZSBvZiB0aGUgY29sdW1ucyBpcyBoaWRkZW4gd2UgY2FuIGZvcmNlIHByaW50aW5nIGFsbCBjb2x1bW5zCgpgYGB7cn0KZHJhd3NfbGluIHw+IHN1bW1hcmlzZV9kcmF3cygpIHw+IHByaW50KHdpZHRoPUluZikKYGBgCgpIaXN0b2dyYW0gb2YgYl95ZWFyCgpgYGB7cn0KZHJhd3NfbGluIHw+CiAgbWNtY19oaXN0KHBhcnM9J2JfeWVhcicpICsKICB4bGFiKCdBdmVyYWdlIHRlbXBlcmF0dXJlIGluY3JlYXNlIHBlciB5ZWFyJykKYGBgCgpQcm9iYWJpbGl0eSB0aGF0IHRoZSBjb2VmZmljaWVudCBiX3llYXIgPiAwIGFuZCB0aGUgY29ycmVzcG9uZGluZyBNQ1NFCgpgYGB7cn0KZHJhd3NfbGluIHw+CiAgbXV0YXRlKElfYl95ZWFyX2d0XzAgPSBiX3llYXI+MCkgfD4KICBzdWJzZXRfZHJhd3ModmFyaWFibGU9J0lfYl95ZWFyX2d0XzAnKSB8PgogIHN1bW1hcmlzZV9kcmF3cyhtZWFuLCBtY3NlX21lYW4pCmBgYAoKOTUlIHBvc3RlcmlvciBpbnRlcnZhbCBmb3IgdGVtcGVyYXR1cmUgaW5jcmVhc2UgcGVyIDEwMCB5ZWFycwoKYGBge3J9CmRyYXdzX2xpbiB8PgogIG11dGF0ZShiX3llYXJfMTAwID0gYl95ZWFyKjEwMCkgfD4KICBzdWJzZXRfZHJhd3ModmFyaWFibGU9J2JfeWVhcl8xMDAnKSB8PgogIHN1bW1hcmlzZV9kcmF3cyh+cXVhbnRpbGUoLngsIHByb2JzID0gYygwLjAyNSwgMC45NzUpKSwKICAgICAgICAgICAgICAgICAgfm1jc2VfcXVhbnRpbGUoLngsIHByb2JzID0gYygwLjAyNSwgMC45NzUpKSwKICAgICAgICAgICAgICAgICAgLm51bV9hcmdzID0gbGlzdChkaWdpdHMgPSAyLCBub3RhdGlvbiA9ICJkZWMiKSkKYGBgCgpQbG90IHBvc3RlcmlvciBkcmF3cyBvZiB0aGUgbGluZWFyIGZ1bmN0aW9uIHZhbHVlcyBhdCBlYWNoIHllYXIuCmBhZGRfbGlucHJlZF9kcmF3cygpYCB0YWtlcyB0aGUgeWVhcnMgZnJvbSB0aGUgZGF0YSBhbmQgdXNlcyBgZml0X2xpbmAgdG8gbWFrZQp0aGUgcHJlZGljdGlvbnMuCgpgYGB7cn0KZGF0YV9saW4gfD4KICBhZGRfbGlucHJlZF9kcmF3cyhmaXRfbGluKSB8PgogICMgcGxvdCBkYXRhCiAgZ2dwbG90KGFlcyh4PXllYXIsIHk9dGVtcCkpICsKICBnZW9tX3BvaW50KGNvbG9yPTIpICsKICAjIHBsb3QgbGluZXJpYmJvbiBmb3IgdGhlIGxpbmVhciBtb2RlbAogIHN0YXRfbGluZXJpYmJvbihhZXMoeSA9IC5saW5wcmVkKSwgLndpZHRoID0gYyguOTUpLCBhbHBoYSA9IDEvMiwgY29sb3I9YnJld2VyLnBhbCg1LCAiQmx1ZXMiKVtbNV1dKSArCiAgIyBkZWNvcmF0aW9uCiAgc2NhbGVfZmlsbF9icmV3ZXIoKSsKICBsYWJzKHg9ICJZZWFyIiwgeSA9ICdTdW1tZXIgdGVtcC4gQEtpbHBpc2rDpHJ2aScpICsKICB0aGVtZShsZWdlbmQucG9zaXRpb249Im5vbmUiKSsKICBzY2FsZV94X2NvbnRpbnVvdXMoYnJlYWtzPXNlcSgxOTUwLDIwMjAsYnk9MTApKQpgYGAKCkFsdGVybmF0aXZlbGx5IHBsb3QgYSBzcGFnaGV0dGkgcGxvdCBmb3IgMTAwIGRyYXdzCgpgYGB7cn0KZGF0YV9saW4gfD4KICBhZGRfbGlucHJlZF9kcmF3cyhmaXRfbGluLCBuZHJhd3M9MTAwKSB8PgogICMgcGxvdCBkYXRhCiAgZ2dwbG90KGFlcyh4PXllYXIsIHk9dGVtcCkpICsKICBnZW9tX3BvaW50KGNvbG9yPTIpICsKICAjIHBsb3QgYSBsaW5lIGZvciBlYWNoIHBvc3RlcmlvciBkcmF3CiAgZ2VvbV9saW5lKGFlcyh5PS5saW5wcmVkLCBncm91cD0uZHJhdyksIGFscGhhID0gMS8yLCBjb2xvciA9IGJyZXdlci5wYWwoNSwgIkJsdWVzIilbWzNdXSkrCiAgIyBkZWNvcmF0aW9uCiAgc2NhbGVfZmlsbF9icmV3ZXIoKSsKICBsYWJzKHg9ICJZZWFyIiwgeSA9ICdTdW1tZXIgdGVtcC4gQEtpbHBpc2rDpHJ2aScpICsKICB0aGVtZShsZWdlbmQucG9zaXRpb249Im5vbmUiKSsKICBzY2FsZV94X2NvbnRpbnVvdXMoYnJlYWtzPXNlcSgxOTUwLDIwMjAsYnk9MTApKQpgYGAKClBsb3QgcG9zdGVyaW9yIHByZWRpY3RpdmUgZGlzdHJpYnV0aW9uIGF0IGVhY2ggeWVhciB1bnRpbCAyMDMwCmBhZGRfcHJlZGljdGVkX2RyYXdzKClgIHRha2VzIHRoZSB5ZWFycyBmcm9tIHRoZSBkYXRhIGFuZCB1c2VzCmBmaXRfbGluYCB0byBtYWtlIHRoZSBwcmVkaWN0aW9ucy4KCmBgYHtyfQpkYXRhX2xpbiB8PgogIGFkZF9yb3coeWVhcj0yMDIzOjIwMzApIHw+CiAgYWRkX3ByZWRpY3RlZF9kcmF3cyhmaXRfbGluKSB8PgogICMgcGxvdCBkYXRhCiAgZ2dwbG90KGFlcyh4PXllYXIsIHk9dGVtcCkpICsKICBnZW9tX3BvaW50KGNvbG9yPTIpICsKICAjIHBsb3QgbGluZXJpYmJvbiBmb3IgdGhlIGxpbmVhciBtb2RlbAogIHN0YXRfbGluZXJpYmJvbihhZXMoeSA9IC5wcmVkaWN0aW9uKSwgLndpZHRoID0gYyguOTUpLCBhbHBoYSA9IDEvMiwgY29sb3I9YnJld2VyLnBhbCg1LCAiQmx1ZXMiKVtbNV1dKSArCiAgIyBkZWNvcmF0aW9uCiAgc2NhbGVfZmlsbF9icmV3ZXIoKSsKICBsYWJzKHg9ICJZZWFyIiwgeSA9ICdTdW1tZXIgdGVtcC4gQEtpbHBpc2rDpHJ2aScpICsKICB0aGVtZShsZWdlbmQucG9zaXRpb249Im5vbmUiKSsKICBzY2FsZV94X2NvbnRpbnVvdXMoYnJlYWtzPXNlcSgxOTUwLDIwMzAsYnk9MTApKQpgYGAKCiMgTGluZWFyIFN0dWRlbnQncyB0IG1vZGVsCgpUaGUgdGVtcGVyYXR1cmVzIHVzZWQgaW4gdGhlIGFib3ZlIGFuYWx5c2VzIGFyZSBhdmVyYWdlcyBvdmVyIHRocmVlCm1vbnRocywgd2hpY2ggbWFrZXMgaXQgbW9yZSBsaWtlbHkgdGhhdCB0aGV5IGFyZSBub3JtYWxseQpkaXN0cmlidXRlZCwgYnV0IHRoZXJlIGNhbiBiZSBleHRyZW1lIGV2ZW50cyBpbiB0aGUgZmVhdGhlciBhbmQgd2UKY2FuIGNoZWNrIHdoZXRoZXIgbW9yZSByb2J1c3QgU3R1ZGVudCdzIHQgb2JzZXJ2YXRpb24gbW9kZWwgd291bApnaXZlIGRpZmZlcmVudCByZXN1bHRzLgoKCmBgYHtyIHJlc3VsdHM9J2hpZGUnfQpmaXRfbGluX3QgPC0gYnJtKHRlbXAgfiB5ZWFyLCBkYXRhID0gZGF0YV9saW4sIGZhbWlseSA9IHN0dWRlbnQoKSwKICAgICAgICAgICAgICAgICBzZWVkID0gU0VFRCwgcmVmcmVzaCA9IDApCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuIFRoZSBiX3llYXIKcG9zdGVyaW9yIGxvb2tzIHNpbWlsYXIgYXMgYmVmb3JlIGFuZCB0aGUgcG9zdGVyaW9yIGZvciBkZWdyZWVzIG9mCmZyZWVkb20gYG51YCBoYXMgbW9zdCBvZiB0aGUgcG9zdGVyaW9yIG1hcyBmb3IgcXVpdGUgbGFyZ2UgdmFsdWVzCmluZGljYXRpbmcgdGhlcmUgaXMgbm8gc3Ryb25nIHN1cHBvcnQgZm9yIHRoaWNrIHRhaWxlZCB2YXJpYXRpb24gaW4KdGVtcGVyYXR1cmUuCgpgYGB7cn0KZml0X2xpbl90CmBgYAoKIyBQYXJldG8tc21vb3RoZWQgaW1wb3J0YW5jZS1zYW1wbGluZyBsZWF2ZS1vbmUtb3V0IGNyb3NzLXZhbGlkYXRpb24gKFBTSVMtTE9PKQoKV2UgY2FuIHVzZSBsZWF2ZS1vbmUtb3V0IGNyb3NzLXZhbGlkYXRpb24gdG8gY29tcGFyZSB0aGUgZXhwZWN0ZWQgcHJlZGljdGl2ZSBwZXJmb3JtYW5jZS4KCkxPTyBjb21wYXJpc29uIHNob3dzIG5vcm1hbCBhbmQgU3R1ZGVudCdzIHQgbW9kZWwgaGF2ZSBzaW1pbGFyIHBlcmZvcm1hbmNlLgoKYGBge3J9Cmxvb19jb21wYXJlKGxvbyhmaXRfbGluKSwgbG9vKGZpdF9saW5fdCkpCmBgYAoKIyBIZXRlcm9za2VkYXN0aWMgbGluZWFyIG1vZGVsCgpIZXRlcm9za2VkYXN0aWNpdHkgYXNzdW1lcyB0aGF0IHRoZSB2YXJpYXRpb24gYXJvdW5kIHRoZSBsaW5lYXIKbWVhbiBjYW4gYWxzbyB2YXJ5LiBXZSBjYW4gYWxsb3cgc2lnbWEgdG8gZGVwZW5kIG9uIHllYXIsIHRvby4KQWx0aG91Z2ggdGhlIGFkZGl0aW9uYWwgY29tcG9uZW50IGlzIHdyaXR0ZW4gYXMgYHNpZ21hIH4geWVhcmAsIHRoZQpsb2cgbGluayBmdW5jdGlvbiBpcyB1c2VkIGFuZCB0aGUgbW9kZWwgaXMgZm9yIGxvZyhzaWdtYSkuIGBiZigpYCBhbGxvd3MKbGlzdGluZyBzZXZlcmFsIGZvcm11bGFzLgoKCmBgYHtyIHJlc3VsdHM9J2hpZGUnfQpmaXRfbGluX2ggPC0gYnJtKGJmKHRlbXAgfiB5ZWFyLAogICAgICAgICAgICAgICAgICAgIHNpZ21hIH4geWVhciksCiAgICAgICAgICAgICAgICAgZGF0YSA9IGRhdGFfbGluLCBmYW1pbHkgPSBnYXVzc2lhbigpLAogICAgICAgICAgICAgICAgIHNlZWQgPSBTRUVELCByZWZyZXNoID0gMCkKYGBgCgpDaGVjayB0aGUgc3VtbWFyeSBvZiB0aGUgcG9zdGVyaW9yIGFuZCBjb252ZXJnZW5jZS4gVGhlIGJfeWVhcgpwb3N0ZXJpb3IgbG9va3Mgc2ltaWxhciBhcyBiZWZvcmUuIFRoZSBwb3N0ZXJpb3IgZm9yIHNpZ21hX3llYXIKbG9va3MgbGlrZSBoYXZpbmcgbW9zc3Qgb2YgdGhlIG1hIGZvciBuZWdhdGl2ZSB2YWx1ZXMsIGluZGljYXRpbmcKZGVjcmVhc2UgaW4gdGVtcGVyYXR1cmUgdmFyaWF0aW9uIGFyb3VuZCB0aGUgbWVhbi4KCmBgYHtyfQpmaXRfbGluX2gKYGBgCgpIaXN0b2dyYW0gb2YgYl95ZWFyIGFuZCBiX3NpZ21hX3llYXIKCmBgYHtyfQphc19kcmF3c19kZihmaXRfbGluX2gpIHw+CiAgbWNtY19hcmVhcyhwYXJzPWMoJ2JfeWVhcicsICdiX3NpZ21hX3llYXInKSkKYGBgCgpBcyBsb2coeCkgaXMgYWxtb3N0IGxpbmVhciB3aGVuIHggaXMgY2xvc2UgdG8gemVybywgd2UgY2FuIHNlZSB0aGF0IHRoZQpzaWdtYSBpcyBkZWNyZWFzaW5nIGFib3V0IDElIHBlciB5ZWFyICg5NSUgaW50ZXJ2YWwgZnJvbSAwJSB0byAyJSkuCgpQbG90IHBvc3RlcmlvciBwcmVkaWN0aXZlIGRpc3RyaWJ1dGlvbiBhdCBlYWNoIHllYXIgdW50aWwgMjAzMApgYWRkX3ByZWRpY3RlZF9kcmF3cygpYCB0YWtlcyB0aGUgeWVhcnMgZnJvbSB0aGUgZGF0YSBhbmQgdXNlcwpgZml0X2xpbl9oYCB0byBtYWtlIHRoZSBwcmVkaWN0aW9ucy4KCmBgYHtyfQpkYXRhX2xpbiB8PgogIGFkZF9yb3coeWVhcj0yMDIzOjIwMzApIHw+CiAgYWRkX3ByZWRpY3RlZF9kcmF3cyhmaXRfbGluX2gpIHw+CiAgIyBwbG90IGRhdGEKICBnZ3Bsb3QoYWVzKHg9eWVhciwgeT10ZW1wKSkgKwogIGdlb21fcG9pbnQoY29sb3I9MikgKwogICMgcGxvdCBsaW5lcmliYm9uIGZvciB0aGUgbGluZWFyIG1vZGVsCiAgc3RhdF9saW5lcmliYm9uKGFlcyh5ID0gLnByZWRpY3Rpb24pLCAud2lkdGggPSBjKC45NSksIGFscGhhID0gMS8yLCBjb2xvcj1icmV3ZXIucGFsKDUsICJCbHVlcyIpW1s1XV0pICsKICAjIGRlY29yYXRpb24KICBzY2FsZV9maWxsX2JyZXdlcigpKwogIGxhYnMoeD0gIlllYXIiLCB5ID0gJ1N1bW1lciB0ZW1wLiBAS2lscGlzasOkcnZpJykgKwogIHRoZW1lKGxlZ2VuZC5wb3NpdGlvbj0ibm9uZSIpKwogIHNjYWxlX3hfY29udGludW91cyhicmVha3M9c2VxKDE5NTAsMjAzMCxieT0xMCkpCmBgYAoKV2UgY2FuIHVzZSBsZWF2ZS1vbmUtb3V0IGNyb3NzLXZhbGlkYXRpb24gdG8gY29tcGFyZSB0aGUgZXhwZWN0ZWQgcHJlZGljdGl2ZSBwZXJmb3JtYW5jZS4KCkxPTyBjb21wYXJpc29uIHNob3dzIGhvbW9za2VkYXN0aWMgbm9ybWFsIGFuZCBoZXRlcm9za2VkYXN0aWMKbm9ybWFsIG1vZGVscyBoYXZlIHNpbWlsYXIgcGVyZm9ybWFuY2VzLgoKYGBge3J9Cmxvb19jb21wYXJlKGxvbyhmaXRfbGluKSwgbG9vKGZpdF9saW5faCkpCmBgYAoKIyBIZXRlcm9za2VkYXN0aWMgbm9uLWxpbmVhciBtb2RlbAoKV2UgY2FuIHRlc3QgdGhlIGxpbmVhcml0eSBhc3N1bXB0aW9uIGJ5IHVzaW5nIG5vbi1saW5lYXIgc3BsaW5lCmZ1bmN0aW9ucywgYnkgdWluZyBgcyh5ZWFyKWAgdGVybXMuIFNhbXBsaW5nIGlzIHNsb3dlciBhcyB0aGUKcG9zdGVyaW9yIGdldHMgbW9yZSBjb21wbGV4LgoKCmBgYHtyIHJlc3VsdHM9J2hpZGUnfQpmaXRfbGluX2hzIDwtIGJybShiZih0ZW1wIH4gcyh5ZWFyKSwKICAgICAgICAgICAgICAgICAgICAgc2lnbWEgfiBzKHllYXIpKSwKICAgICAgICAgICAgICAgICAgZGF0YSA9IGRhdGFfbGluLCBmYW1pbHkgPSBnYXVzc2lhbigpLAogICAgICAgICAgICAgICAgICBzZWVkID0gU0VFRCwgcmVmcmVzaCA9IDApCmBgYAoKV2UgZ2V0IHdhcm5pbmdzIGFib3V0IGRpdmVyZ2VuY2VzLCBhbmQgdHJ5IHJlcnVubmluZyB3aXRoIGhpZ2hlcgphZGFwdF9kZWx0YSwgd2hpY2ggbGVhZHMgdG8gdXNpbmcgc21hbGxlciBzdGVwIHNpemVzLiBPZnRlbgpgYWRhcHRfZGVsdGE9MC45OTlgIGxlYWRzIHRvIHZlcnkgc2xvdyBzYW1wbGluZywgYnV0IHdpdGggdGhpcwpzbWFsbCBkYXRhLCB0aGlzIGlzIG5vdCBhbiBpc3N1ZS4KCmBgYHtyfQpmaXRfbGluX2hzIDwtIHVwZGF0ZShmaXRfbGluX2hzLCBjb250cm9sID0gbGlzdChhZGFwdF9kZWx0YT0wLjk5OSkpCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuIFRoZSBiX3llYXIKcG9zdGVyaW9yIGxvb2tzIHNpbWlsYXIgYXMgYmVmb3JlLiBUaGUgcG9zdGVyaW9yIGZvciBzaWdtYV95ZWFyCmxvb2tzIGxpa2UgaGF2aW5nIG1vc3N0IG9mIHRoZSBtYSBmb3IgbmVnYXRpdmUgdmFsdWVzLCBpbmRpY2F0aW5nCmRlY3JlYXNlIGluIHRlbXBlcmF0dXJlIHZhcmlhdGlvbiBhcm91bmQgdGhlIG1lYW4uCgpgYGB7cn0KZml0X2xpbl9ocwpgYGAKClBsb3QgcG9zdGVyaW9yIHByZWRpY3RpdmUgZGlzdHJpYnV0aW9uIGF0IGVhY2ggeWVhciB1bnRpbCAyMDMwCmBhZGRfcHJlZGljdGVkX2RyYXdzKClgIHRha2VzIHRoZSB5ZWFycyBmcm9tIHRoZSBkYXRhIGFuZCB1c2VzCmBmaXRfbGluX2hgIHRvIG1ha2UgdGhlIHByZWRpY3Rpb25zLgoKYGBge3J9CmRhdGFfbGluIHw+CiAgYWRkX3Jvdyh5ZWFyPTIwMjM6MjAzMCkgfD4KICBhZGRfcHJlZGljdGVkX2RyYXdzKGZpdF9saW5faHMpIHw+CiAgIyBwbG90IGRhdGEKICBnZ3Bsb3QoYWVzKHg9eWVhciwgeT10ZW1wKSkgKwogIGdlb21fcG9pbnQoY29sb3I9MikgKwogICMgcGxvdCBsaW5lcmliYm9uIGZvciB0aGUgbGluZWFyIG1vZGVsCiAgc3RhdF9saW5lcmliYm9uKGFlcyh5ID0gLnByZWRpY3Rpb24pLCAud2lkdGggPSBjKC45NSksIGFscGhhID0gMS8yLCBjb2xvcj1icmV3ZXIucGFsKDUsICJCbHVlcyIpW1s1XV0pICsKICAjIGRlY29yYXRpb24KICBzY2FsZV9maWxsX2JyZXdlcigpKwogIGxhYnMoeD0gIlllYXIiLCB5ID0gJ1N1bW1lciB0ZW1wLiBAS2lscGlzasOkcnZpJykgKwogIHRoZW1lKGxlZ2VuZC5wb3NpdGlvbj0ibm9uZSIpKwogIHNjYWxlX3hfY29udGludW91cyhicmVha3M9c2VxKDE5NTAsMjAzMCxieT0xMCkpCmBgYAoKV2UgY2FuIHVzZSBsZWF2ZS1vbmUtb3V0IGNyb3NzLXZhbGlkYXRpb24gdG8gY29tcGFyZSB0aGUgZXhwZWN0ZWQgcHJlZGljdGl2ZSBwZXJmb3JtYW5jZS4KCkxPTyBjb21wYXJpc29uIHNob3dzIGhvbW9za2VkYXN0aWMgbm9ybWFsIGxpbmVhciBhbmQKaGV0ZXJvc2tlZGFzdGljIG5vcm1hbCBzcGxpbmUgbW9kZWxzIGhhdmUgc2ltaWxhcgpwZXJmb3JtYW5jZXMuIFRoZXJlIGFyZSBub3QgZW5vdWdoIG9ic2VydmF0aW9ucyB0byBtYWtlIGNsZWFyCmRpZmZlcmVuY2UgYmV0d2VlbiB0aGUgbW9kZWxzLgoKYGBge3J9Cmxvb19jb21wYXJlKGxvbyhmaXRfbGluKSwgbG9vKGZpdF9saW5faHMpKQpgYGAKCgojIENvbXBhcmlzb24gb2YgayBncm91cHMgd2l0aCBoaWVyYXJjaGljYWwgbm9ybWFsIG1vZGVscwoKTG9hZCBmYWN0b3J5IGRhdGEsIHdoaWNoIGNvbnRhaW4gNSBxdWFsaXR5IG1lYXN1cmVtZW50cyBmb3IgZWFjaCBvZgo2IG1hY2hpbmVzLiBXZSdyZSBpbnRlcmVzdGVkIGluIGFuYWx5aW5nIGFyZSB0aGUgcXVhbGl0eSBkaWZmZXJlbmNlcwpiZXR3ZWVuIHRoZSBtYWNoaW5lcy4KCmBgYHtyfQpmYWN0b3J5IDwtIHJlYWQudGFibGUodXJsKCdodHRwczovL3Jhdy5naXRodWJ1c2VyY29udGVudC5jb20vYXZlaHRhcmkvQkRBX2NvdXJzZV9BYWx0by9tYXN0ZXIvcnBhY2thZ2UvZGF0YS1yYXcvZmFjdG9yeS50eHQnKSkKY29sbmFtZXMoZmFjdG9yeSkgPC0gMTo2CmZhY3RvcnkKYGBgCgpXZSBwaXZvdCB0aGUgZGF0YSB0byBsb25nIGZvcm1hdAoKYGBge3J9CmZhY3RvcnkgPC0gZmFjdG9yeSB8PgogIHBpdm90X2xvbmdlcihjb2xzID0gZXZlcnl0aGluZygpLAogICAgICAgICAgICAgICBuYW1lc190byA9ICdtYWNoaW5lJywKICAgICAgICAgICAgICAgdmFsdWVzX3RvID0gJ3F1YWxpdHknKQpmYWN0b3J5CmBgYAoKIyMgUG9vbGVkIG1vZGVsCgpBcyBjb21wYXJpc29uIG1ha2UgYWxzbyBwb29sZWQgbW9kZWwKCmBgYHtyfQpmaXRfcG9vbGVkIDwtIGJybShxdWFsaXR5IH4gMSwgZGF0YSA9IGZhY3RvcnksIHJlZnJlc2g9MCkKYGBgCgojIyBTZXBhcmF0ZSBtb2RlbAoKQXMgY29tcGFyaXNvbiBtYWtlIGFsc28gc2VwcmF0ZSBtb2RlbC4gVG8gbWFrZSBpdCBjb21wbGV0ZWx5CnNlcGFyYXRlIHdlIG5lZWQgdG8gaGF2ZSBkaWZmZXJlbnQgc2lnbWEgZm9yIGVhY2ggbWFjaGluZSwgdG9vLgoKYGBge3J9CmZpdF9zZXBhcmF0ZSA8LSBicm0oYmYocXVhbGl0eSB+IG1hY2hpbmUsCiAgICAgICAgICAgICAgICAgICAgICAgc2lnbWEgfiBtYWNoaW5lKSwKICAgICAgICAgICAgICAgICAgICBkYXRhID0gZmFjdG9yeSwgcmVmcmVzaD0wKQpgYGAKCiMgQ29tbW9uIHZhcmlhbmNlIGhpZXJhcmNoaWNhbCBtb2RlbCAoQU5PVkEpCgoKYGBge3J9CmZpdF9oaWVyIDwtIGJybShxdWFsaXR5IH4gMSArICgxIHwgbWFjaGluZSksCiAgICAgICAgICAgICAgICBkYXRhID0gZmFjdG9yeSwgcmVmcmVzaCA9IDApCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuCgpgYGB7cn0KZml0X2hpZXIKYGBgCgpMT08gY29tcGFyaXNvbiBzaG93cyB0aGUgaGllcmFyY2hpY2FsIG1vZGVsIGlzIHRoZSBiZXN0CgpgYGB7cn0KbG9vX2NvbXBhcmUobG9vKGZpdF9wb29sZWQpLCBsb28oZml0X3NlcGFyYXRlKSwgbG9vKGZpdF9oaWVyKSkKYGBgCgpEaXN0cmlidXRpb25zIG9mIHF1YWxpdHkgZGlmZmVyZW5jZXMgZnJvbSB0aGUgbWVhbiBxdWFsaXR5CgpgYGB7cn0KbWNtY19hcmVhcyhhc19kcmF3c19kZihmaXRfaGllciksIHJlZ2V4X3BhcnM9J3JfbWFjaGluZScpCmBgYAoKUG9zdGVyaW9yIHByZWRpY3RpdmUgZGlzdHJpYnV0aW9ucyBmb3IgNiBvbGQgYW5kIDEgbmV3IG1hY2hpbmVzCgpgYGB7cn0KcG9zdGVyaW9yX3ByZWRpY3QoZml0X2hpZXIsIG5ld2RhdGE9ZGF0YS5mcmFtZShtYWNoaW5lPTE6NywgcXVhbGl0eT1yZXAoTkEsNykpLAogICAgICAgICAgICAgICAgICBhbGxvd19uZXdfbGV2ZWxzPVRSVUUpIHw+CiAgYXNfZHJhd3NfZGYoKSB8PgogIG1jbWNfYXJlYXMoKQpgYGAKCgojIEhpZXJhcmNoaWNhbCBiaW5vbWlhbCBtb2RlbAoKW1NvcmFmZW5pYiBUb3hpY2l0eSBEYXRhc2V0IGluIGBtZXRhZGF0YCBSIHBhY2thZ2VdKGh0dHBzOi8vd3ZpZWNodGIuZ2l0aHViLmlvL21ldGFkYXQvcmVmZXJlbmNlL2RhdC51cnNpbm8yMDIxLmh0bWwpCmluY2x1ZGVzIHJlc3VsdHMgZnJtIDEzIHN0dWRpZXMgaW52ZXN0aWdhdGluZyB0aGUgb2NjdXJyZW5jZSBvZgpkb3NlIGxpbWl0aW5nIHRveGljaXRpZXMgKERMVHMpIGF0IGRpZmZlcmVudCBkb3NlcyBvZiBTb3JhZmVuaWIuCgpMb2FkIGRhdGEKCmBgYHtyfQpsb2FkKHVybCgnaHR0cHM6Ly9naXRodWIuY29tL3d2aWVjaHRiL21ldGFkYXQvcmF3L21hc3Rlci9kYXRhL2RhdC51cnNpbm8yMDIxLnJkYScpKQpoZWFkKGRhdC51cnNpbm8yMDIxKQpgYGAKClBvb2xlZCBtb2RlbCBhc3N1bWVzIGFsbCBzdHVkaWVzIGhhdmUgdGhlIHNhbWUgZG9zZSBlZmZlY3QKCmBgYHtyfQpmaXRfcG9vbGVkIDwtIGJybShldmVudHMgfCB0cmlhbHModG90YWwpIH4gZG9zZSwKICAgICAgICAgICAgICAgICAgZmFtaWx5PWJpbm9taWFsKCksIGRhdGE9ZGF0LnVyc2lubzIwMjEpCmBgYAoKU2VwYXJhdGUgbW9kZWwgYXNzdW1lcyBhbGwgc3R1ZGllcyBoYXZlIGRpZmZlcmVudCBkb3NlIGVmZmVjdAoKYGBge3J9CmZpdF9zZXBhcmF0ZSA8LSBicm0oZXZlbnRzIHwgdHJpYWxzKHRvdGFsKSB+IGRvc2U6c3R1ZHksCiAgICAgICAgICAgICAgICAgICAgZmFtaWx5PWJpbm9taWFsKCksIGRhdGE9ZGF0LnVyc2lubzIwMjEpCmZpdF9zZXBhcmF0ZSA8LSB1cGRhdGUoZml0X3NlcGFyYXRlLCBjb250cm9sPWxpc3QoaW5pdD0wLjEpKQpgYGAKCkhpZXJhcmNoaWNhbCBtb2RlbCBhc3N1bWVzIGNvbW1vbiBtZWFuIGVmZmVjdCBhbmQgdmFyaWF0aW9uIHJvdW5kIHdpdGggbm9ybWFsIHBvcHVsYXRpb24gcHJpb3IKCmBgYHtyfQpmaXRfaGllciA8LSBicm0oZXZlbnRzIHwgdHJpYWxzKHRvdGFsKSB+IGRvc2UgKyAoZG9zZSB8IHN0dWR5KSwKICAgICAgICAgICAgICAgIGZhbWlseT1iaW5vbWlhbCgpLCBkYXRhPWRhdC51cnNpbm8yMDIxKQpmaXRfaGllciA8LSB1cGRhdGUoZml0X2hpZXIsIGNvbnRyb2w9bGlzdChhZGFwdF9kZWx0YT0wLjk5KSkKYGBgCgpMT08tQ1YgY29tcGFyaXNvbgoKYGBge3J9Cmxvb19jb21wYXJlKGxvbyhmaXRfcG9vbGVkKSwgbG9vKGZpdF9zZXBhcmF0ZSksIGxvbyhmaXRfaGllcikpCmBgYAoKV2UgZ2V0IHdhcm5pbmdzIGFib3V0IFBhcmV0byBrJ3MgPiAwLjcgaW4gUFNJUy1MT08gZm9yIHNlcGFyYXRlCm1vZGVsLCBidXQgYXMgaW4gdGhhdCBjYXNlIHRoZSBMT08tQ1YgZXN0aW1hdGUgaXMgdXN1YWxseQpvdmVyb3B0aW1pc3RpYyBhbmQgdGhlIHNlcGFyYXRlIG1vZGVsIGlzIHRoZSB3b3JzdCwgdGhlcmUgaXMgbm8KbmVlZCB0byB1c2UgbW9yZSBhY2N1cmF0ZSBjb21wdXRhdGlvbi4KCkhpZXJhcmNoaWNhbCBtb2RlbCBoYXMgYmV0dGVyIGVscGQgdGhhbiB0aGUgcG9vbGVkLCBidXQgZGlmZmVyZW5jZQppcyBuZWdsaWdpYmxlLiBIb3dldmVyLCB3aGVuIHdlIGxvb2sgYXQgdGhlIHN0dWR5IHNwZWNpZmljCnBhcmFtZXRlcnMsIHdlIHNlZSB0aGF0IHRoZSBNaWxsZXIgc3R1ZHkgaGFzIGhpZ2hlciBpbnRlcmNlcHQgKG1vcmUKZXZlbnRzKS4KCmBgYHtyfQptY21jX2FyZWFzKGFzX2RyYXdzX2RmKGZpdF9oaWVyKSwgcmVnZXhfcGFycz0ncl9zdHVkeVxcWy4qSW50ZXJjZXB0JykKYGBgCgoKVGhlcmUgYXJlIG5vIGRpZmZlcmVuY2VzIGluIHNsb3Blcy4KCmBgYHtyfQptY21jX2FyZWFzKGFzX2RyYXdzX2RmKGZpdF9oaWVyKSwgcmVnZXhfcGFycz0ncl9zdHVkeVxcWy4qZG9zZScpCmBgYAoKClRoZSBjb2VmZmljaWVudCBmb3IgdGhlIGRvc2UgaXMgY2xlYXJseSBsYXJnZXIgdGhhbiAwCgpgYGB7cn0KbWNtY19hcmVhcyhhc19kcmF3c19kZihmaXRfaGllciksIHJlZ2V4X3BhcnM9J2JfZG9zZScpICsKICBnZW9tX3ZsaW5lKHhpbnRlcmNlcHQ9MCwgbGluZXR5cGU9J2Rhc2hlZCcpICsKICB4bGltKGMoMCwwLjAxKSkKYGBgCgpUaGUgcG9zdGVyaW9yIGZvciB0aGUgcHJvYmFiaWxpdHkgb2YgZXZlbnQgZ2l2ZW4gY2VydGFpbiBkb3NlIGFuZCBhIG5ldyBzdHVkeQoKYGBge3J9CmRhdGEuZnJhbWUoc3R1ZHk9J25ldycsCiAgICAgICAgICAgZG9zZT1zZXEoMTAwLDEwMDAsYnk9MTAwKSwKICAgICAgICAgICB0b3RhbD0xKSB8PgogIGFkZF9saW5wcmVkX2RyYXdzKGZpdF9oaWVyLCB0cmFuc2Zvcm09VFJVRSwgYWxsb3dfbmV3X2xldmVscz1UUlVFKSB8PgogIGdncGxvdChhZXMoeD1kb3NlLCB5PS5saW5wcmVkKSkgKwogIHN0YXRfbGluZXJpYmJvbigud2lkdGggPSBjKC45NSksIGFscGhhID0gMS8yLCBjb2xvcj1icmV3ZXIucGFsKDUsICJCbHVlcyIpW1s1XV0pICsKICBzY2FsZV9maWxsX2JyZXdlcigpKwogIGxhYnMoeD0gIkRvc2UiLCB5ID0gJ1Byb2JhYmlsaXR5IG9mIGV2ZW50JykgKwogIHRoZW1lKGxlZ2VuZC5wb3NpdGlvbj0ibm9uZSIpICsKICBnZW9tX2hsaW5lKHlpbnRlcmNlcHQ9MCkgKwogIHNjYWxlX3hfY29udGludW91cyhicmVha3M9c2VxKDEwMCwxMDAwLGJ5PTEwMCkpCmBgYAoKUG9zdGVyaW9yIHByZWRpY3RpdmUgY2hlY2tpbmcKCmBgYHtyfQpwcF9jaGVjayhmaXRfaGllciwgdHlwZSA9ICJyaWJib25fZ3JvdXBlZCIsIGdyb3VwPSJzdHVkeSIpCmBgYAoKPGJyIC8+CgojIExpY2Vuc2VzIHsudW5udW1iZXJlZH0KCiogQ29kZSAmY29weTsgMjAxNy0yMDIzLCBBa2kgVmVodGFyaSwgbGljZW5zZWQgdW5kZXIgQlNELTMuCiogVGV4dCAmY29weTsgMjAxNy0yMDIzLCBBa2kgVmVodGFyaSwgbGljZW5zZWQgdW5kZXIgQ0MtQlktTkMgNC4wLgo=
+LS0tCnRpdGxlOiAiQmF5ZXNpYW4gZGF0YSBhbmFseXNpcyAtIEJSTVMgZGVtb3MiCmF1dGhvcjogIkFraSBWZWh0YXJpIgpkYXRlOiAiRmlyc3QgdmVyc2lvbiAyMDIzLTEyLTA1LiBMYXN0IG1vZGlmaWVkIGByIGZvcm1hdChTeXMuRGF0ZSgpKWAuIgpvdXRwdXQ6CiAgaHRtbF9kb2N1bWVudDoKICAgIGZpZ19jYXB0aW9uOiB5ZXMKICAgIHRvYzogVFJVRQogICAgdG9jX2RlcHRoOiAyCiAgICBudW1iZXJfc2VjdGlvbnM6IFRSVUUKICAgIHRvY19mbG9hdDoKICAgICAgc21vb3RoX3Njcm9sbDogRkFMU0UKICAgIHRoZW1lOiByZWFkYWJsZQogICAgY29kZV9kb3dubG9hZDogdHJ1ZQotLS0KIyBTZXR1cCAgey51bm51bWJlcmVkfQoKYGBge3Igc2V0dXAsIGluY2x1ZGU9RkFMU0V9CmtuaXRyOjpvcHRzX2NodW5rJHNldChjYWNoZT1GQUxTRSwgbWVzc2FnZT1GQUxTRSwgZXJyb3I9RkFMU0UsIHdhcm5pbmc9VFJVRSwgY29tbWVudD1OQSwgb3V0LndpZHRoPSc5NSUnKQpgYGAKCioqTG9hZCBwYWNrYWdlcyoqCgpgYGB7cn0KbGlicmFyeSh0aWR5cikKbGlicmFyeShkcGx5cikKbGlicmFyeSh0aWJibGUpCmxpYnJhcnkocGlsbGFyKQpsaWJyYXJ5KHN0cmluZ3IpCmxpYnJhcnkoYnJtcykKb3B0aW9ucyhicm1zLmJhY2tlbmQgPSAiY21kc3RhbnIiLCBtYy5jb3JlcyA9IDIpCmxpYnJhcnkocG9zdGVyaW9yKQpvcHRpb25zKHBpbGxhci5uZWdhdGl2ZSA9IEZBTFNFKQpsaWJyYXJ5KGxvbykKbGlicmFyeShwcmlvcnNlbnNlKQpsaWJyYXJ5KGdncGxvdDIpCmxpYnJhcnkoYmF5ZXNwbG90KQp0aGVtZV9zZXQoYmF5ZXNwbG90Ojp0aGVtZV9kZWZhdWx0KGJhc2VfZmFtaWx5ID0gInNhbnMiKSkKbGlicmFyeSh0aWR5YmF5ZXMpCmxpYnJhcnkoZ2dkaXN0KQpsaWJyYXJ5KHBhdGNod29yaykKbGlicmFyeShSQ29sb3JCcmV3ZXIpClNFRUQgPC0gNDg5MjcgIyBzZXQgcmFuZG9tIHNlZWQgZm9yIHJlcHJvZHVjYWJpbGl0eQpgYGAKCiMgSW50cm9kdWN0aW9uCgpUaGlzIG5vdGVib29rIGNvbnRhaW5zIHNldmVyYWwgZXhhbXBsZXMgb2YgaG93IHRvIHVzZSBbU3Rhbl0oaHR0cHM6Ly9tYy1zdGFuLm9yZykgaW4gUiB3aXRoIFtfX2JybXNfX10oaHR0cHM6Ly9wYXVsLWJ1ZXJrbmVyLmdpdGh1Yi5pby9icm1zLykuIFRoaXMgbm90ZWJvb2sgYXNzdW1lcyBiYXNpYyBrbm93bGVkZ2Ugb2YgQmF5ZXNpYW4gaW5mZXJlbmNlIGFuZCBNQ01DLiBUaGUgZXhhbXBsZXMgYXJlIHJlbGF0ZWQgdG8gW0JheWVzaWFuIGRhdGEgYW5hbHlzaXMgY291cnNlXShodHRwczovL2F2ZWh0YXJpLmdpdGh1Yi5pby9CREFfY291cnNlX0FhbHRvLykuCgojIEJlcm5vdWxsaSBtb2RlbAoKVG95IGRhdGEgd2l0aCBzZXF1ZW5jZSBvZiBmYWlsdXJlcyAoMCkgYW5kIHN1Y2Nlc3NlcyAoMSkuIFdlIHdvdWxkCmxpa2UgdG8gbGVhcm4gYWJvdXQgdGhlIHVua25vd24gcHJvYmFiaWxpdHkgb2Ygc3VjY2Vzcy4KCmBgYHtyfQpkYXRhX2Jlcm4gPC0gZGF0YS5mcmFtZSh5ID0gYygxLCAxLCAxLCAwLCAxLCAxLCAxLCAwLCAxLCAwKSkKYGBgCgpBcyB1c3VhbCBpbiBjYXNlIG9mIGdlbmVyYWxpemQgbGluZWFyIG1vZGVscywgKEdMTXMpIGJybXMgZGVmaW5lcwp0aGUgcHJpb3JzIG9uIHRoZSBsYXRlbnQgbW9kZWwgcGFyYW1ldGVycy4gV2l0aCBCZXJub3VsbGkgdGhlCmRlZmF1bHQgbGluayBmdW5jdGlvbiBpcyBsb2dpdCwgYW5kIHRodXMgdGhlIHByaW9yIGlzIHNldCBvbgpsb2dpdCh0aGV0YSkuIEFzIHRoZXJlIGFyZSBubyBjb3ZhcmlhdGVzIGxvZ2l0KHRoZXRhKT1JbnRlcmNlcHQuClRoZSBicm1zIGRlZmF1bHQgcHJpb3IgZm9yIEludGVyY2VwdCBpcyBzdHVkZW50X3QoMywgMCwgMi41KSwgYnV0CndlIHVzZSBzdHVkZW50X3QoNywgMCwgMS41KSB3aGljaCBpcyBjbG9zZSB0byBsb2dpc3RpYwpkaXN0cmlidXRpb24sIGFuZCB0aHVzIG1ha2VzIHRoZSBwcmlvciBuZWFyLXVuaWZvcm0gZm9yIHRoZXRhLgpXZSBjYW4gc2ltdWxhdGUgZnJvbSB0aGVzZSBwcmlvcnMgdG8gY2hlY2sgdGhlIGltcGxpZWQgcHJpb3Igb24gdGhldGEuCldlIG5leHQgY29tcGFyZSB0aGUgcmVzdWx0IHRvIHVzaW5nIG5vcm1hbCgwLCAxKSBwcmlvciBvbiBsb2dpdApwcm9iYWJpbGl0eS4gV2UgdmlzdWFsaXplIHRoZSBpbXBsaWVkIHByaW9ycyBieSBzYW1wbGluZyBmcm9tIHRoZSBwcmlvcnMuCgpgYGB7cn0KZGF0YS5mcmFtZSh0aGV0YSA9IHBsb2dpcyhnZ2Rpc3Q6OnJzdHVkZW50X3Qobj0yMDAwMCwgZGY9MywgbXU9MCwgc2lnbWE9Mi41KSkpIHw+CiAgbWNtY19oaXN0KCkgKwogIHhsaW0oYygwLDEpKSArCiAgbGFicyh0aXRsZT0nRGVmYXVsdCBicm1zIHN0dWRlbnRfdCgzLCAwLCAyLjUpIHByaW9yIG9uIEludGVyY2VwdCcpCmRhdGEuZnJhbWUodGhldGEgPSBwbG9naXMoZ2dkaXN0Ojpyc3R1ZGVudF90KG49MjAwMDAsIGRmPTcsIG11PTAsIHNpZ21hPTEuNSkpKSB8PgogIG1jbWNfaGlzdCgpICsKICB4bGltKGMoMCwxKSkgKwogIGxhYnModGl0bGU9J3N0dWRlbnRfdCg3LCAwLCAxLjUpIHByaW9yIG9uIEludGVyY2VwdCcpCmBgYAoKQWxtb3N0IHVuaWZvcm0gcHJpb3Igb24gdGhldGEgY291bGQgYmUgb2J0YWluZWQgYWxzbyB3aXRoIG5vcm1hbCgwLDEuNSkKCmBgYHtyfQpkYXRhLmZyYW1lKHRoZXRhID0gcGxvZ2lzKHJub3JtKG49MjAwMDAsIG1lYW49MCwgc2Q9MS41KSkpIHw+CiAgbWNtY19oaXN0KCkgKwogIHhsaW0oYygwLDEpKSArCiAgbGFicyh0aXRsZT0nbm9ybWFsKDAsIDEuNSkgcHJpb3Igb24gSW50ZXJjZXB0JykKYGBgCgpGb3JtdWxhIGB5IH4gMWAgY29ycmVzcG9uZHMgdG8gYSBtb2RlbCAkXG1hdGhybXtsb2dpdH0oXHRoZXRhKSA9CgpgYGB7cn0KI1xhbHBoYVx0aW1lcyAxID0gXGFscGhhJC4gYGJybXM/IGRlbm90ZXMgdGhlICRcYWxwaGEkIGFzIGBJbnRlcmNlcHRgLgpgYGAKYGBge3IgcmVzdWx0cz0naGlkZSd9CmZpdF9iZXJuIDwtIGJybSh5IH4gMSwgZmFtaWx5ID0gYmVybm91bGxpKCksIGRhdGEgPSBkYXRhX2Jlcm4sCiAgICAgICAgICAgICAgICBwcmlvciA9IHByaW9yKHN0dWRlbnRfdCg3LCAwLCAxLjUpLCBjbGFzcz0nSW50ZXJjZXB0JyksCiAgICAgICAgICAgICAgICBzZWVkID0gU0VFRCwgcmVmcmVzaCA9IDApCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UKCmBgYHtyfQpmaXRfYmVybgpgYGAKCkV4dHJhY3QgdGhlIHBvc3RlcmlvciBkcmF3cwoKYGBge3J9CmRyYXdzIDwtIGFzX2RyYXdzX2RmKGZpdF9iZXJuKQpgYGAKCldlIGNhbiBnZXQgc3VtbWFyeSBpbmZvcm1hdGlvbiB1c2luZyBzdW1tYXJpc2VfZHJhd3MoKQoKYGBge3J9CmRyYXdzIHw+CiAgc3Vic2V0X2RyYXdzKHZhcmlhYmxlPSdiX0ludGVyY2VwdCcpIHw+CiAgc3VtbWFyaXNlX2RyYXdzKCkKYGBgCgpXZSBjYW4gY29tcHV0ZSB0aGUgcHJvYmFiaWxpdHkgb2Ygc3VjY2VzcyBieSB1c2luZyBwbG9naXMgd2hpY2ggaXMKZXF1YWwgdG8gaW52ZXJzZS1sb2dpdCBmdW5jdGlvbgoKYGBge3J9CmRyYXdzIDwtIGRyYXdzIHw+CiAgbXV0YXRlX3ZhcmlhYmxlcyh0aGV0YT1wbG9naXMoYl9JbnRlcmNlcHQpKQpgYGAKClN1bW1hcnkgb2YgdGhldGEgYnkgdXNpbmcgc3VtbWFyaXNlX2RyYXdzKCkKCmBgYHtyfQpkcmF3cyB8PgogIHN1YnNldF9kcmF3cyh2YXJpYWJsZT0ndGhldGEnKSB8PgogIHN1bW1hcmlzZV9kcmF3cygpCmBgYAoKSGlzdG9ncmFtIG9mIHRoZXRhCgpgYGB7cn0KbWNtY19oaXN0KGRyYXdzLCBwYXJzPSd0aGV0YScpICsKICB4bGFiKCd0aGV0YScpICsKICB4bGltKGMoMCwxKSkKYGBgCgpNYWtlIHByaW9yIHNlbnNpdGl2aXR5IGFuYWx5c2lzIGJ5IHBvd2Vyc2NhbGluZyBib3RoIHByaW9yIGFuZApsaWtlbGlob29kLiBGb2N1cyBvbiB0aGV0YSB3aGljaCBpcyB0aGUgcXVhbnRpdHkgb2YgaW50ZXJlc3QuCgpgYGB7cn0KdGhldGEgPC0gZHJhd3MgfD4KICBzdWJzZXRfZHJhd3ModmFyaWFibGU9J3RoZXRhJykKcG93ZXJzY2FsZV9zZW5zaXRpdml0eShmaXRfYmVybiwgcHJlZGljdGlvbiA9IFwoeCwgLi4uKSB0aGV0YSwgbnVtX2FyZ3M9bGlzdChkaWdpdHM9MikKICAgICAgICAgICAgICAgICAgICAgICApJHNlbnNpdGl2aXR5IHw+CiAgICAgICAgICAgICAgICAgICAgICAgICBmaWx0ZXIodmFyaWFibGU9PSd0aGV0YScpIHw+CiAgICAgICAgICAgICAgICAgICAgICAgICBtdXRhdGUoYWNyb3NzKHdoZXJlKGlzLmRvdWJsZSksICB+bnVtKC54LCBkaWdpdHM9MikpKQpgYGAKCgojIEJpbm9taWFsIG1vZGVsCgpJbnN0ZWFkIG9mIHNlcXVlbmNlIG9mIDAncyBhbmQgMSdzLCB3ZSBjYW4gc3VtbWFyaXplIHRoZSBkYXRhIHdpdGgKdGhlIG51bWJlciBvZiB0cmlhbHMgYW5kIHRoZSBudW1iZXIgc3VjY2Vzc2VzIGFuZCB1c2UgQmlub21pYWwKbW9kZWwuIFRoZSBwcmlvciBpcyBzcGVjaWZpZWQgaW4gdGhlICdsYXRlbnQgc3BhY2UnLiBUaGUgYWN0dWFsCnByb2JhYmlsaXR5IG9mIHN1Y2Nlc3MsIHRoZXRhID0gcGxvZ2lzKGFscGhhKSwgd2hlcmUgcGxvZ2lzIGlzIHRoZQppbnZlcnNlIG9mIHRoZSBsb2dpc3RpYyBmdW5jdGlvbi4KCkJpbm9taWFsIG1vZGVsIHdpdGggdGhlIHNhbWUgZGF0YSBhbmQgcHJpb3IKCmBgYHtyfQpkYXRhX2JpbiA8LSBkYXRhLmZyYW1lKE4gPSBjKDEwKSwgeSA9IGMoNykpCmBgYAoKRm9ybXVsYSBgeSB8IHRyaWFscyhOKSB+IDFgIGNvcnJlc3BvbmRzIHRvIGEgbW9kZWwKJFxtYXRocm17bG9naXR9KFx0aGV0YSkgPSBcYWxwaGEkLCBhbmQgdGhlIG51bWJlciBvZiB0cmlhbHMgZm9yCmVhY2ggb2JzZXJ2YXRpb24gaXMgcHJvdmlkZWQgYnkgYHwgdHJpYWxzKE4pYAoKYGBge3IgcmVzdWx0cz0naGlkZSd9CmZpdF9iaW4gPC0gYnJtKHkgfCB0cmlhbHMoTikgfiAxLCBmYW1pbHkgPSBiaW5vbWlhbCgpLCBkYXRhID0gZGF0YV9iaW4sCiAgICAgICAgICAgICAgIHByaW9yID0gcHJpb3Ioc3R1ZGVudF90KDcsIDAsMS41KSwgY2xhc3M9J0ludGVyY2VwdCcpLAogICAgICAgICAgICAgICBzZWVkID0gU0VFRCwgcmVmcmVzaCA9IDApCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UKCmBgYHtyfQpmaXRfYmluCmBgYAoKVGhlIGRpYWdub3N0aWMgaW5kaWNhdGVzIHByaW9yLWRhdGEgY29uZmxpY3QsIHRoYXQgaXMsIGJvdGggcHJpb3IKYW5kIGxpa2VsaWhvb2QgYXJlIGluZm9ybWF0aXZlLiBJZiB0aGVyZSBpcyB0cnVlIHN0cm9uZyBwcmlvcgppbmZvcm1hdGlvbiB0aGF0IHdvdWxkIGp1c3RpZnkgdGhlIG5vcm1hbCgwLDEpIHByaW9yLCB0aGVuIHRoaXMgaXMKZmluZSwgYnV0IG90aGVyd2lzZSBtb3JlIHRoaW5raW5nIGlzIHJlcXVpcmVkIChnb2FsIGlzIG5vdCBhZGp1c3QKcHJpb3IgdG8gcmVtb3ZlIGRpYWdub3N0aWMgd2FybmluZ3Mgd2l0aG95dCB0aGlua2luZykuIEluIHRoaXMgdG95CmV4YW1wbGUsIHdlIHByb2NlZWQgd2l0aCB0aGlzIHByaW9yLgoKRXh0cmFjdCB0aGUgcG9zdGVyaW9yIGRyYXdzCgpgYGB7cn0KZHJhd3MgPC0gYXNfZHJhd3NfZGYoZml0X2JpbikKYGBgCgpXZSBjYW4gZ2V0IHN1bW1hcnkgaW5mb3JtYXRpb24gdXNpbmcgc3VtbWFyaXNlX2RyYXdzKCkKCmBgYHtyfQpkcmF3cyB8PgogIHN1YnNldF9kcmF3cyh2YXJpYWJsZT0nYl9JbnRlcmNlcHQnKSB8PgogIHN1bW1hcmlzZV9kcmF3cygpCmBgYAoKV2UgY2FuIGNvbXB1dGUgdGhlIHByb2JhYmlsaXR5IG9mIHN1Y2Nlc3MgYnkgdXNpbmcgcGxvZ2lzIHdoaWNoIGlzCmVxdWFsIHRvIGludmVyc2UtbG9naXQgZnVuY3Rpb24KCmBgYHtyfQpkcmF3cyA8LSBkcmF3cyB8PgogIG11dGF0ZV92YXJpYWJsZXModGhldGE9cGxvZ2lzKGJfSW50ZXJjZXB0KSkKYGBgCgpTdW1tYXJ5IG9mIHRoZXRhIGJ5IHVzaW5nIHN1bW1hcmlzZV9kcmF3cygpCgpgYGB7cn0KZHJhd3MgfD4KICBzdWJzZXRfZHJhd3ModmFyaWFibGU9J3RoZXRhJykgfD4KICBzdW1tYXJpc2VfZHJhd3MoKQpgYGAKCkhpc3RvZ3JhbSBvZiB0aGV0YQoKYGBge3J9Cm1jbWNfaGlzdChkcmF3cywgcGFycz0ndGhldGEnKSArCiAgeGxhYigndGhldGEnKSArCiAgeGxpbShjKDAsMSkpCmBgYAoKUmUtcnVuIHRoZSBtb2RlbCB3aXRoIGEgbmV3IGRhdGEgZGF0YXNldCB3aXRob3V0IHJlY29tcGlsaW5nCgpgYGB7cn0KZGF0YV9iaW4gPC0gZGF0YS5mcmFtZShOID0gYyg1KSwgeSA9IGMoNCkpCmZpdF9iaW4gPC0gdXBkYXRlKGZpdF9iaW4sIG5ld2RhdGEgPSBkYXRhX2JpbikKYGBgCgpDaGVjayB0aGUgc3VtbWFyeSBvZiB0aGUgcG9zdGVyaW9yIGFuZCBjb252ZXJnZW5jZQoKYGBge3J9CmZpdF9iaW4KYGBgCgpFeHRyYWN0IHRoZSBwb3N0ZXJpb3IgZHJhd3MKCmBgYHtyfQpkcmF3cyA8LSBhc19kcmF3c19kZihmaXRfYmluKQpgYGAKCldlIGNhbiBnZXQgc3VtbWFyeSBpbmZvcm1hdGlvbiB1c2luZyBzdW1tYXJpc2VfZHJhd3MoKQoKYGBge3J9CmRyYXdzIHw+CiAgc3Vic2V0X2RyYXdzKHZhcmlhYmxlPSdiX0ludGVyY2VwdCcpIHw+CiAgc3VtbWFyaXNlX2RyYXdzKCkKYGBgCgpXZSBjYW4gY29tcHV0ZSB0aGUgcHJvYmFiaWxpdHkgb2Ygc3VjY2VzcyBieSB1c2luZyBwbG9naXMgd2hpY2ggaXMKZXF1YWwgdG8gaW52ZXJzZS1sb2dpdCBmdW5jdGlvbgoKYGBge3J9CmRyYXdzIDwtIGRyYXdzIHw+CiAgbXV0YXRlX3ZhcmlhYmxlcyh0aGV0YT1wbG9naXMoYl9JbnRlcmNlcHQpKQpgYGAKClN1bW1hcnkgb2YgdGhldGEgYnkgdXNpbmcgc3VtbWFyaXNlX2RyYXdzKCkKCmBgYHtyfQpkcmF3cyB8PgogIHN1YnNldF9kcmF3cyh2YXJpYWJsZT0ndGhldGEnKSB8PgogIHN1bW1hcmlzZV9kcmF3cygpCmBgYAoKSGlzdG9ncmFtIG9mIHRoZXRhCgpgYGB7cn0KbWNtY19oaXN0KGRyYXdzLCBwYXJzPSd0aGV0YScpICsKICB4bGFiKCd0aGV0YScpICsKICB4bGltKGMoMCwxKSkKYGBgCgojIENvbXBhcmlzb24gb2YgdHdvIGdyb3VwcyB3aXRoIEJpbm9taWFsIAoKQW4gZXhwZXJpbWVudCB3YXMgcGVyZm9ybWVkIHRvIGVzdGltYXRlIHRoZSBlZmZlY3Qgb2YgYmV0YS1ibG9ja2VycwpvbiBtb3J0YWxpdHkgb2YgY2FyZGlhYyBwYXRpZW50cy4gQSBncm91cCBvZiBwYXRpZW50cyB3ZXJlIHJhbmRvbWx5CmFzc2lnbmVkIHRvIHRyZWF0bWVudCBhbmQgY29udHJvbCBncm91cHM6CgotIG91dCBvZiA2NzQgcGF0aWVudHMgcmVjZWl2aW5nIHRoZSBjb250cm9sLCAzOSBkaWVkCi0gb3V0IG9mIDY4MCByZWNlaXZpbmcgdGhlIHRyZWF0bWVudCwgMjIgZGllZAoKRGF0YSwgd2hlcmUgYGdycDJgIGlzIGFuIGluZGljYXRvciB2YXJpYWJsZSBkZWZpbmVkIGFzIGEgZmFjdG9yCnR5cGUsIHdoaWNoIGlzIHVzZWZ1bCBmb3IgY2F0ZWdvcmljYWwgdmFyaWFibGVzLgoKYGBge3J9CmRhdGFfYmluMiA8LSBkYXRhLmZyYW1lKE4gPSBjKDY3NCwgNjgwKSwgeSA9IGMoMzksMjIpLCBncnAyID0gZmFjdG9yKGMoJ2NvbnRyb2wnLCd0cmVhdG1lbnQnKSkpCmBgYAoKVG8gYW5hbHlzZSB3aGV0aGVyIHRoZSB0cmVhdG1lbnQgaXMgdXNlZnVsLCB3ZSBjYW4gdXNlIEJpbm9taWFsCm1vZGVsIGZvciBib3RoIGdyb3VwcyBhbmQgY29tcHV0ZSBvZGRzLXJhdGlvLiBUbyByZWNyZWF0ZSB0aGUgbW9kZWwKYXMgdHdvIGluZGVwZW5kZW50IChzZXBhcmF0ZSkgYmlub21pYWwgbW9kZWxzLCB3ZSB1c2UgZm9ybXVsYSBgeSB8CnRyaWFscyhOKSB+IDAgKyBncnAyYCwgd2hpY2ggY29ycmVzcG9uZHMgdG8gYSBtb2RlbAokXG1hdGhybXtsb2dpdH0oXHRoZXRhKSA9IFxhbHBoYSBcdGltZXMgMCArClxiZXRhX1xtYXRocm17Y29udHJvbH1cdGltZXMgeF9cbWF0aHJte2NvbnRyb2x9ICsKXGJldGFfXG1hdGhybXt0cmVhdG1lbnR9XHRpbWVzIHhfXG1hdGhybXt0cmVhdG1lbnR9ID0KXGJldGFfXG1hdGhybXtjb250cm9sfVx0aW1lcyB4X1xtYXRocm17Y29udHJvbH0gKwpcYmV0YV9cbWF0aHJte3RyZWF0bWVudH1cdGltZXMgeF9cbWF0aHJte3RyZWF0bWVudH0kLCB3aGVyZQokeF9cbWF0aHJte2NvbnRyb2x9JCBpcyBhIHZlY3RvciB3aXRoIDEgZm9yIGNvbnRyb2wgYW5kIDAgZm9yCnRyZWF0bWVudCwgYW5kICR4X1xtYXRocm17dHJlYXRlbW50fSQgaXMgYSB2ZWN0b3Igd2l0aCAxIGZvcgp0cmVhdGVtbnQgYW5kIDAgZm9yIGNvbnRyb2wuIEFzIG9ubHkgb2YgdGhlIHZlY3RvcnMgaGF2ZSAxLCB0aGlzCmNvcnJlc3BvbmRzIHRvIHNlcGFyYXRlIG1vZGVscwokXG1hdGhybXtsb2dpdH0oXHRoZXRhX1xtYXRocm17Y29udHJvbH0pID0gXGJldGFfXG1hdGhybXtjb250cm9sfSQKYW5kICRcbWF0aHJte2xvZ2l0fShcdGhldGFfXG1hdGhybXt0cmVhdG1lbnR9KSA9ClxiZXRhX1xtYXRocm17dHJlYXRtZW50fSQuICBXZSBjYW4gcHJvdmlkZSB0aGUgc2FtZSBwcmlvciBmb3IgYWxsCiRcYmV0YSQncyBieSBzZXR0aW5nIHRoZSBwcmlvciB3aXRoIGBjbGFzcz0nYidgLiBXaXRoIHByaW9yCmBzdHVkZW50X3QoNywgMCwxLjUpYCwgYm90aCAkXGJldGEkJ3MgYXJlIHNocnVuayB0b3dhcmRzIDAsIGJ1dAppbmRlcGVuZGVudGx5LgoKYGBge3J9CmZpdF9iaW4yIDwtIGJybSh5IHwgdHJpYWxzKE4pIH4gMCArIGdycDIsIGZhbWlseSA9IGJpbm9taWFsKCksIGRhdGEgPSBkYXRhX2JpbjIsCiAgICAgICAgICAgICAgICBwcmlvciA9IHByaW9yKHN0dWRlbnRfdCg3LCAwLDEuNSksIGNsYXNzPSdiJyksCiAgICAgICAgICAgICAgICBzZWVkID0gU0VFRCwgcmVmcmVzaCA9IDApCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuIGJybXMgaXMgdXNpbmcKdGhlIGZpcnN0IGZhY3RvciBsZXZlbCBgY29udHJvbGAgYXMgdGhlIGJhc2VsaW5lIGFuZCB0aHVzIHJlcG9ydHMKdGhlIGNvZWZmaWNpZW50IChwb3B1bGF0aW9uLWxldmVsIGVmZmVjdCkgZm9yIGB0cmVhdG1lbnRgIChzaG93biBzCmBncnAydHJlYXRtZW50YCkKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuIFdpdGggYH4gMCArCmdycDJgIHRoZXJlIGlzIG5vIGBJbnRlcmNlcHRgIGFuZCBcYmV0YV9cbWF0aHJte2NvbnRyb2x9IGFuZApcYmV0YV9cbWF0aHJte3RyZWF0bWVudH0gYXJlIHByZXNlbnRlZCBhcyBgZ3JwMmNvbnRyb2xgIGFuZApgZ3JwMnRyZWF0bWVudGAuCgpgYGB7cn0KZml0X2JpbjIKYGBgCgpDb21wdXRlIHRoZXRhIGZvciBlYWNoIGdyb3VwIGFuZCB0aGUgb2Rkcy1yYXRpby4gYGJybXNgIHVzZXMKYmFyaWFibGUgbmFtZXMgYGJfZ3JwMmNvbnRyb2xgIGFuZCBgYl9ncnAydHJlYXRtZW50YCBmb3IKJFxiZXRhX1xtYXRocm17Y29udHJvbH0kIGFuZCAkXGJldGFfXG1hdGhybXt0cmVhdG1lbnR9JApyZXNwZWN0aXZlbHkuCgpgYGB7cn0KZHJhd3NfYmluMiA8LSBhc19kcmF3c19kZihmaXRfYmluMikgfD4KICBtdXRhdGUodGhldGFfY29udHJvbCA9IHBsb2dpcyhiX2dycDJjb250cm9sKSwKICAgICAgICAgdGhldGFfdHJlYXRtZW50ID0gcGxvZ2lzKGJfZ3JwMnRyZWF0bWVudCksCiAgICAgICAgIG9kZHNyYXRpbyA9ICh0aGV0YV90cmVhdG1lbnQvKDEtdGhldGFfdHJlYXRtZW50KSkvKHRoZXRhX2NvbnRyb2wvKDEtdGhldGFfY29udHJvbCkpKQpgYGAKClBsb3Qgb2Rkc3JhdGlvCgpgYGB7cn0KbWNtY19oaXN0KGRyYXdzX2JpbjIsIHBhcnM9J29kZHNyYXRpbycpICsKICBzY2FsZV94X2NvbnRpbnVvdXMoYnJlYWtzPXNlcSgwLjIsMS42LGJ5PTAuMikpKwogIGdlb21fdmxpbmUoeGludGVyY2VwdD0xLCBsaW5ldHlwZT0nZGFzaGVkJykKYGBgCgpQcm9iYWJpbGl0eSB0aGF0IHRoZSBvZGRzcmF0aW88MQoKYGBge3J9CmRyYXdzX2JpbjIgfD4KICBtdXRhdGUocG9kZHNyYXRpbyA9IG9kZHNyYXRpbzwxKSB8PgogIHN1YnNldCh2YXJpYWJsZT0ncG9kZHNyYXRpbycpIHw+CiAgc3VtbWFyaXNlX2RyYXdzKG1lYW4sIG1jc2VfbWVhbikKYGBgCgpvZGRzcmF0aW8gOTUlIHBvc3RlcmlvciBpbnRlcnZhbAoKYGBge3J9CmRyYXdzX2JpbjIgfD4KICBzdWJzZXQodmFyaWFibGU9J29kZHNyYXRpbycpIHw+CiAgc3VtbWFyaXNlX2RyYXdzKH5xdWFudGlsZSgueCwgcHJvYnMgPSBjKDAuMDI1LCAwLjk3NSkpLCB+bWNzZV9xdWFudGlsZSgueCwgcHJvYnMgPSBjKDAuMDI1LCAwLjk3NSkpKQpgYGAKCk1ha2UgcHJpb3Igc2Vuc2l0aXZpdHkgYW5hbHlzaXMgYnkgcG93ZXJzY2FsaW5nIGJvdGggcHJpb3IgYW5kCmxpa2VsaWhvb2QuICBGb2N1cyBvbiBvZGRzcmF0aW8gd2hpY2ggaXMgdGhlIHF1YW50aXR5IG9mCmludGVyZXN0LiBXZSBzZWUgdGhhdCB0aGUgbGlrZWxpaG9vZCBpcyBtdWNoIG1vcmUgaW5mb3JtYXRpdmUgdGhhbgp0aGUgcHJpb3IsIGFuZCB3ZSB3b3VsZCBleHBlY3QgdG8gc2VlIGEgZGlmZmVyZW50IHBvc3RlcmlvciBvbmx5CndpdGggYSBoaWdobHkgaW5mb3JtYXRpdmUgcHJpb3IgKHBvc3NpYmx5IGJhc2VkIG9uIHByZXZpb3VzIHNpbWlsYXIKZXhwZXJpbWVudHMpLgoKYGBge3J9Cm9kZHNyYXRpbyA8LSBkcmF3c19iaW4yIHw+CiAgc3Vic2V0X2RyYXdzKHZhcmlhYmxlPSdvZGRzcmF0aW8nKQpwb3dlcnNjYWxlX3NlbnNpdGl2aXR5KGZpdF9iaW4yLCBwcmVkaWN0aW9uID0gXCh4LCAuLi4pIG9kZHNyYXRpbywgbnVtX2FyZ3M9bGlzdChkaWdpdHM9MikKICAgICAgICAgICAgICAgICAgICAgICApJHNlbnNpdGl2aXR5IHw+CiAgICAgICAgICAgICAgICAgICAgICAgICBmaWx0ZXIodmFyaWFibGU9PSdvZGRzcmF0aW8nKSB8PgogICAgICAgICAgICAgICAgICAgICAgICAgbXV0YXRlKGFjcm9zcyh3aGVyZShpcy5kb3VibGUpLCAgfm51bSgueCwgZGlnaXRzPTIpKSkKYGBgCgpBYm92ZSB3ZSB1c2VkIGZvcm11bGEgYHkgfCB0cmlhbHMoTikgfiAwICsgZ3JwMmAgdG8gaGF2ZSBzZXBhcmF0ZQptb2RlbCBmb3IgY29udHJvbCBhbmQgdHJlYXRtZW50IGdyb3VwLiBBbiBhbHRlcm5hdGl2ZSBtb2RlbCBgeSB8CnRyaWFscyhOKSB+IGdycDJgIHdoaWNoIGlzIGVxdWFsIHRvIGB5IHwgdHJpYWxzKE4pIH4gMSArIGdycDJgLAp3b3VsZCBjb3JyZXNwb25kIHRvIGEgbW9kZWwgJFxtYXRocm17bG9naXR9KFx0aGV0YSkgPSBcYWxwaGEgXHRpbWVzCjEgKyBcYmV0YV9cbWF0aHJte3RyZWF0bWVudH1cdGltZXMgeF9cbWF0aHJte3RyZWF0bWVudH0gPSBcYWxwaGEgKwpcYmV0YV9cbWF0aHJte3RyZWF0bWVudH1cdGltZXMgeF9cbWF0aHJte3RyZWF0bWVudH0uIE5vdyAkXGFscGhhJAptb2RlbHMgdGhlIHByb2JhYmlsaXR5IG9mIGRlYXRoICh2aWEgbG9naXN0aWMgbGluaykgaW4gdGhlIGNvbnRyb2wKZ3JvdXAgYW5kICRcYWxwaGEgKyBcYmV0YV9cbWF0aHJte3RyZWF0bWVudH0kIG1vZGVscyB0aGUKcHJvYmFiaWxpdHkgb2YgZGVhdGggKHZpYSBsb2dpc3RpYyBsaW5rKSBpbiB0aGUgdHJlYXRtZW50Cmdyb3VwLiBOb3cgdGhlIG1vZGVscyBmb3IgdGhlIGdyb3VwcyBhcmUgY29ubmVjdGVkLiBGdXJ0aGVybW9yZSwgaWYKd2Ugc2V0IGluZGVwZW5kZW50IGBzdHVkZW50X3QoNywgMCwgMS41KWAgcHJpb3JzIG9uICRcYWxwaGEkIGFuZAokXGJldGFfXG1hdGhybXt0cmVhdG1lbnR9JCwgdGhlIGltcGxpZWQgcHJpb3JzIG9uCiRcdGhldGFfXG1hdGhybXtjb250cm9sfSQgYW5kICRcdGhldGFfXG1hdGhybXt0cmVhdG1lbnR9JCBhcmUKZGlmZmVyZW50LiBXZSBjYW4gdmVyaWZ5IHRoaXMgd2l0aCBhIHByaW9yIHNpbXVsYXRpb24uCgoKYGBge3J9CmRhdGEuZnJhbWUodGhldGFfY29udHJvbCA9IHBsb2dpcyhnZ2Rpc3Q6OnJzdHVkZW50X3Qobj0yMDAwMCwgZGY9NywgbXU9MCwgc2lnbWE9MS41KSkpIHw+CiAgbWNtY19oaXN0KCkgKwogIHhsaW0oYygwLDEpKSArCiAgbGFicyh0aXRsZT0nc3R1ZGVudF90KDcsIDAsIDEuNSkgcHJpb3Igb24gSW50ZXJjZXB0JykgKwpkYXRhLmZyYW1lKHRoZXRhX3RyZWF0bWVudCA9IHBsb2dpcyhnZ2Rpc3Q6OnJzdHVkZW50X3Qobj0yMDAwMCwgZGY9NywgbXU9MCwgc2lnbWE9MS41KSkrCiAgICAgICAgICAgICBwbG9naXMoZ2dkaXN0Ojpyc3R1ZGVudF90KG49MjAwMDAsIGRmPTcsIG11PTAsIHNpZ21hPTEuNSkpKSB8PgogIG1jbWNfaGlzdCgpICsKICB4bGltKGMoMCwxKSkgKwogIGxhYnModGl0bGU9J3N0dWRlbnRfdCg3LCAwLCAxLjUpIHByaW9yIG9uIEludGVyY2VwdCBhbmQgYl9ncnAydHJlYXRtZW50JykKYGBgCgpJbiB0aGlzIGNhc2UsIHdpdGggcmVsYXRpdmVseSBiaWcgdHJlYXRtZW50IGFuZCBjb250cm9sIGdyb3VwLCB0aGUKbGlrZWxpaG9vZCBpcyBpbmZvcm1hdGl2ZSwgYW5kIHRoZSBkaWZmZXJlbmNlIGJldHdlZW4gdXNpbmcgYHkgfAp0cmlhbHMoTikgfiAwICsgZ3JwMmAgb3IgYHkgfCB0cmlhbHMoTikgfiBncnAyYCBpcyBuZWdsaWdpYmxlLgoKVGhpcmQgb3B0aW9uIHdvdWxkIGJlIGEgaGllcmFyY2hpY2FsIG1vZGVsIHdpdGggZm9ybXVsYSBgeSB8CnRyaWFscyhOKSB+IDEgKyAoMSB8IGdycDIpYCwgd2hpY2ggaXMgZXF1aXZhbGVudCB0byBgeSB8IHRyaWFscyhOKQp+IDEgKyAoMSB8IGdycDIpYCwgYW5kIGNvcnJlc3BvbmRzIHRvIGEgbW9kZWwKJFxtYXRocm17bG9naXR9KFx0aGV0YSkgPSBcYWxwaGEgXHRpbWVzIDEgKwpcYmV0YV9cbWF0aHJte2NvbnRyb2x9XHRpbWVzIHhfXG1hdGhybXtjb250cm9sfSArClxiZXRhX1xtYXRocm17dHJlYXRtZW50fVx0aW1lcyB4X1xtYXRocm17dHJlYXRtZW50fSQsIGJ1dCBub3cgdGhlCnByaW9yIG9uICRcYmV0YV9cbWF0aHJte2NvbnRyb2x9JCBhbmQgJFxiZXRhX1xtYXRocm17dHJlYXRtZW50fSQgaXMKJFxtYXRocm17bm9ybWFsfSgwLCBcc2lnbWFfXG1hdGhybXtncnB9KSQuIFRoZSBkZWZhdWx0IGBicm1zYCBwcmlvcgpmb3IgJFxzaWdtYV9cbWF0aHJte2dycH0kIGlzIGBzdHVkZW50X3QoMywgMCwgMi41KWAuIE5vdyAkXGFscGhhJAptb2RlbHMgdGhlIG92ZXJhbGwgcHJvYmFibGl0eSBvZiBkZWF0aCAodmlhIGxvZ2lzdGljIGxpbmspLCBhbmQKJFxiZXRhX1xtYXRocm17Y29udHJvbH0kIGFuZCAkXGJldGFfXG1hdGhybXt0cmVhdG1lbnR9JCBtb2RlbCB0aGUKZGlmZmVyZW5jZSBmcm9tIHRoYXQgaGF2aW5nIHRoZSBzYW1lIHByaW9yLiBQcmlvciBmb3IKJFxiZXRhX1xtYXRocm17Y29udHJvbH0kIGFuZCAkXGJldGFfXG1hdGhybXt0cmVhdG1lbnR9JCBpbmNsdWRlcwp1bmtub3duIHNjYWxlICRcc2lnbWFfXG1hdGhybXtncnB9JC4gSWYgdGhlIHRoZXJlIGlzIG5vdCBkaWZmZXJlbmNlCmJldHdlZW4gY29udHJvbCBhbmQgdHJlYXRtZW50IGdyb3VwcywgdGhlIHBvc3RlcmlvciBvZgokXHNpZ21hX1xtYXRocm17Z3JwfSQgaGFzIG1vcmUgbWFzcyBuZWFyIDAsIGFuZCBiaWdnZXIgdGhlCmRpZmZlcmVuY2UgYmV0d2VlbiBjb250cm9sIGFuZCB0cmVhdG1lbnQgZ3JvdXBzIGFyZSwgbW9yZSBtYXNzCnRoZXJlIGlzIGF3YXkgZnJvbSAwLiBXaXRoIGp1c3QgdHdvIGdyb3VwcywgdGhlcmUgaXMgbm90IG11Y2gKaW5mb3JtYXRpb24gYWJvdXQgJFxzaWdtYV9cbWF0aHJte2dycH0kLCBhbmQgdW5sZXNzIHRoZXJlIGlzIGEKaW5mb3JtYXRpdmUgcHJpb3Igb24gJFxzaWdtYV9cbWF0aHJte2dycH0kLCB0d28gZ3JvdXAgaGllcmFyY2hpY2FsCm1vZGVsIGlzIG5vdCB0aGF0IHVzZWZ1bC4gSGllcmFyY2hpY2FsIG1vZGVscyBhcmUgbW9yZSB1c2VmdWwgd2l0aAptb3JlIHRoYW4gdHdvIGdyb3Vwcy4gSW4gdGhlIGZvbGxvd2luZywgd2UgdXNlIHRoZSBwcmV2aW91c2x5IHVzZWQKYHN0dWRlbnRfdCg3LCAwLDEuNSlgIHByaW9yIG9uIGludGVyY2VwdCBhbmQgdGhlIGRlZmF1bHQgYGJybXNgCnByaW9yIGBzdHVkZW50X3QoMywgMCwgMi41KWAgb24gJFxzaWdtYV9cbWF0aHJte2dycH0kLgoKYGBge3IgcmVzdWx0cz0naGlkZSd9CmZpdF9iaW4yIDwtIGJybSh5IHwgdHJpYWxzKE4pIH4gMSArICgxIHwgZ3JwMiksIGZhbWlseSA9IGJpbm9taWFsKCksIGRhdGEgPSBkYXRhX2JpbjIsCiAgICAgICAgICAgICAgICBwcmlvciA9IHByaW9yKHN0dWRlbnRfdCg3LCAwLDEuNSksIGNsYXNzPSdJbnRlcmNlcHQnKSwKICAgICAgICAgICAgICAgIHNlZWQgPSBTRUVELCByZWZyZXNoID0gMCwgY29udHJvbD1saXN0KGFkYXB0X2RlbHRhPTAuOTkpKQpgYGAKCkNoZWNrIHRoZSBzdW1tYXJ5IG9mIHRoZSBwb3N0ZXJpb3IgYW5kIGNvbnZlcmdlbmNlLiBUaGUgc3VtbWFyeQpyZXBvcnRzIHRoYXQgdGhlcmUgYXJlIEdyb3VwLUxldmVsIEVmZmVjdHM6IGB+Z3JwMmAgd2l0aCAyIGxldmVscwooY29udHJvbCBhbmQgdHJlYXRtZW50KSwgd2l0aCBgc2QoSW50ZXJjZXB0KWAgZGVub3RpbmcKJFxzaWdtYV9cbWF0aHJte2dycH0kLiBJbiBhZGRpdGlvbiwgdGhlIHN1bW1hcnkgbGlzdHMKUG9wdWxhdGlvbi1MZXZlbCBFZmZlY3RzOiBgSW50ZXJjZXB0YCAoJFxhbHBoYSQpIGFzIGluIHRoZSBwcmV2b3VzCm5vbi1oaWVyYXJjaGljYWwgbW9kZWxzLgoKYGBge3J9CmZpdF9iaW4yCmBgYAoKV2UgY2FuIGFsc28gbG9vayBhdCB0aGUgdmFyaWFibGUgbmFtZXMgYGJybXNgIHVzZXMgaW50ZXJuYWxseQoKYGBge3J9CmFzX2RyYXdzX3J2YXJzKGZpdF9iaW4yKQpgYGAKCkFsdGhvdWdoIHRoZXJlIGlzIG5vIGRpZmZlcmVuY2UsIGlsbHVzdHJhdGUgaG93IHRvIGNvbXB1dGUgdGhlCm9kZHNyYXRpbyBmcm9tIGhpZXJhcmNoaWNhbCBtb2RlbAoKYGBge3J9CmRyYXdzX2JpbjIgPC0gYXNfZHJhd3NfZGYoZml0X2JpbjIpCm9kZHNyYXRpbyA8LSBkcmF3c19iaW4yIHw+CiAgbXV0YXRlX3ZhcmlhYmxlcyh0aGV0YV9jb250cm9sID0gcGxvZ2lzKGJfSW50ZXJjZXB0ICsgYHJfZ3JwMltjb250cm9sLEludGVyY2VwdF1gKSwKICAgICAgICAgICAgICAgICAgIHRoZXRhX3RyZWF0bWVudCA9IHBsb2dpcyhiX0ludGVyY2VwdCArIGByX2dycDJbdHJlYXRtZW50LEludGVyY2VwdF1gKSwKICAgICAgICAgICAgICAgICAgIG9kZHNyYXRpbyA9ICh0aGV0YV90cmVhdG1lbnQvKDEtdGhldGFfdHJlYXRtZW50KSkvKHRoZXRhX2NvbnRyb2wvKDEtdGhldGFfY29udHJvbCkpKSB8PgogIHN1YnNldF9kcmF3cyh2YXJpYWJsZT0nb2Rkc3JhdGlvJykKb2Rkc3JhdGlvIHw+IG1jbWNfaGlzdCgpICsKICBzY2FsZV94X2NvbnRpbnVvdXMoYnJlYWtzPXNlcSgwLjIsMS42LGJ5PTAuMikpKwogIGdlb21fdmxpbmUoeGludGVyY2VwdD0xLCBsaW5ldHlwZT0nZGFzaGVkJykKYGBgCgpNYWtlIGFsc28gcHJpb3Igc2Vuc2l0aXZpdHkgYW5hbHlzaXMgd2l0aCBmb2N1cyBvbiBvZGRzcmF0aW8uCgpgYGB7cn0KcG93ZXJzY2FsZV9zZW5zaXRpdml0eShmaXRfYmluMiwgcHJlZGljdGlvbiA9IFwoeCwgLi4uKSBvZGRzcmF0aW8sIG51bV9hcmdzPWxpc3QoZGlnaXRzPTIpCiAgICAgICAgICAgICAgICAgICAgICAgKSRzZW5zaXRpdml0eSB8PgogICAgICAgICAgICAgICAgICAgICAgICAgZmlsdGVyKHZhcmlhYmxlPT0nb2Rkc3JhdGlvJykgfD4KICAgICAgICAgICAgICAgICAgICAgICAgIG11dGF0ZShhY3Jvc3Mod2hlcmUoaXMuZG91YmxlKSwgIH5udW0oLngsIGRpZ2l0cz0yKSkpCmBgYAoKIyBMaW5lYXIgR2F1c3NpYW4gbW9kZWwKClVzZSB0aGUgS2lscGlzasOkcnZpIHN1bW1lciBtb250aCB0ZW1wZXJhdHVyZXMgMTk1Mi0tMjAyMiBkYXRhIGZyb20gYGFhbHRvYmRhYCBwYWNrYWdlCgpgYGB7cn0KbG9hZCh1cmwoJ2h0dHBzOi8vZ2l0aHViLmNvbS9hdmVodGFyaS9CREFfY291cnNlX0FhbHRvL3Jhdy9tYXN0ZXIvcnBhY2thZ2UvZGF0YS9raWxwaXNqYXJ2aTIwMjIucmRhJykpCmRhdGFfbGluIDwtIGRhdGEuZnJhbWUoeWVhciA9IGtpbHBpc2phcnZpMjAyMiR5ZWFyLAogICAgICAgICAgICAgICAgICAgICAgIHRlbXAgPSBraWxwaXNqYXJ2aTIwMjIkdGVtcC5zdW1tZXIpCmBgYAoKUGxvdCB0aGUgZGF0YQoKYGBge3J9CmRhdGFfbGluIHw+CiAgZ2dwbG90KGFlcyh5ZWFyLCB0ZW1wKSkgKwogIGdlb21fcG9pbnQoY29sb3I9MikgKwogIGxhYnMoeD0gIlllYXIiLCB5ID0gJ1N1bW1lciB0ZW1wLiBAS2lscGlzasOkcnZpJykgKwogIGd1aWRlcyhsaW5ldHlwZSA9ICJub25lIikKYGBgCgpUbyBhbmFseXNlIGhhcyB0aGVyZSBiZWVuIGNoYW5nZSBpbiB0aGUgYXZlcmFnZSBzdW1tZXIgbW9udGgKdGVtcGVyYXR1cmUgd2UgdXNlIGEgbGluZWFyIG1vZGVsIHdpdGggR2F1c3NpYW4gbW9kZWwgZm9yIHRoZQp1bmV4cGxhaW5lZCB2YXJpYXRpb24uIEJ5IGRlZmF1bHQgYnJtcyB1c2VzIHVuaWZvcm0gcHJpb3IgZm9yIHRoZQpjb2VmZmljaWVudHMuCgpGb3JtdWxhIGB0ZW1wIH4geWVhcmAgY29ycmVzcG9uZHMgdG8gbW9kZWwgJFxtYXRocm17dGVtcH0gfgpcbWF0aHJte25vcm1hbH0oXGFscGhhICsgXGJldGEgXHRpbWVzIFxtYXRocm17dGVtcH0sIFxzaWdtYSkuICBUaGUKbW9kZWwgY291bGQgYWxzbyBiZSBkZWZpbmVkIGFzIGB0ZW1wIH4gMSArIHllYXJgIHdoaWNoIGV4cGxpY2l0bHkKc2hvd3MgdGhlIGludGVyY2VwdCAoJFxhbHBoYSQpIHBhcnQuIFVzaW5nIHRoZSB2YXJpYWJsZSBuYW1lcwpgYnJtc2AgdXNlcyB0aGUgbW9kZWwgY2FuIGJlIHdyaXR0ZW4gYWxzbyBhcyBgdGVtcCB+Cm5vcm1hbChiX0ludGVyY2VwdCoxICsgYl95ZWFyKnllYXIsIHNpZ21hKWAuIFdlIHN0YXJ0IHdpdGggdGhlCmRlZmF1bHQgcHJpb3JzIHRvIHNlZSBzb21lIHRyaWNrcyB0aGF0IGBicm1zYCBkb2VzIGJlaGluZCB0aGUKY3VydGFpbi4KCmBgYHtyfQpmaXRfbGluIDwtIGJybSh0ZW1wIH4geWVhciwgZGF0YSA9IGRhdGFfbGluLCBmYW1pbHkgPSBnYXVzc2lhbigpLAogICAgICAgICAgICAgICBzZWVkID0gU0VFRCwgcmVmcmVzaCA9IDApCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuCgpgYGB7cn0KZml0X2xpbgpgYGAKCkNvbnZlcmdlbmNlIGRpYWdub3N0aWNzIGxvb2sgZ29vZC4gV2Ugc2VlIHRoYXQgcG9zdGVyaW9yIG1lYW4gb2YKYEludGVyY2VwdGAgaXMgLTM0LjcsIHdoaWNoIG1heSBzb3VuZCBzdHJhbmdlLCBidXQgdGhhdCBpcyB0aGUKaW50ZXJjZXB0IGF0IHllYXIgMCwgdGhhdCBpcywgdmVyeSBmYXIgZnJvbSB0aGUgZGF0YSByYW5nZSwgYW5kCnRodXMgZG9lc24ndCBoYXZlIG1lYW5pbmdmdWwgaW50ZXJwcmV0YXRpb24gZGlyZWN0bHkuIFRoZSBwb3N0ZXJpb3IKbWVhbiBvZiBgeWVhcmAgY29lZmZpY2llbnQgaXMgMC4wMiwgdGhhdCBpcywgd2UgZXN0aW1hdGUgdGhhdCB0aGUKc3VtbWVyIHRlbXBlcmF0dXJlIGlzIGluY3JlYXNpbmcgMC4wMsKwQyBwZXIgeWVhciAod2hpY2ggd291bGQgbWFrZQoxwrBDIGluIDUwIHllYXJzKS4KCldlIGNhbiBjaGVjayAkUl4yJCB3aGljaCBjb3JyZXNwb25kcyB0byB0aGUgcHJvcG9yaW9uIG9mIHZhcmlhbmNlCmV4cGxhaW5lZCBieSB0aGUgbW9kZWwuIFRoZSBsaW5lYXIgbW9kZWwgZXhwbGFpbnMgMC4xNj0xNiUgb2YgdGhlCnRvdGFsIGRhdGEgdmFyaWFuY2UuCgpgYGB7cn0KYmF5ZXNfUjIoZml0X2xpbikgfD4gcm91bmQoMikKYGBgCgpXZSBjYW4gY2hlY2sgdGhlIGFsbCB0aGUgcHJpb3JzIHVzZWQuIAoKYGBge3J9CnByaW9yX3N1bW1hcnkoZml0X2xpbikKYGBgCgpXZSBzZWUgdGhhdCBgY2xhc3M9YmAgYW5kIGBjb2VmPXllYXJgIGhhdmUgYGZsYXRgLCB0aGF0IGlzLAppbXByb3BlciB1bmlmb3JtIHByaW9yLCBgSW50ZXJjZXB0YCBoYXMgYHN0dWRlbnRfdCgzLCA5LjUsIDIuNSlgLAphbmQgYHNpZ21hYCBoYXMgYHN0dWRlbnRfdCgzLCAwLCAyLjUpYCBwcmlvci4gIEluIGdlbmVyYWwgaXQgaXMKZ29vZCB0byB1c2UgcHJvcGVyIHByaW9ycywgYnV0IHNvbWV0aW1lcyBmbGF0IHByaW9ycyBhcmUgZmluZSBhbmQKcHJvZHVjZSBwcm9wZXIgcG9zdGVyaW9yIChsaWtlIGluIHRoaXMgY2FzZSkuIEltcG9ydGFudCBwYXJ0IGhlcmUKaXMgdGhhdCBieSBkZWZhdWx0LCBgYnJtc2Agc2V0cyB0aGUgcHJpb3Igb24gSW50ZXJjZXB0IGFmdGVyCmNlbnRlcmluZyB0aGUgY292YXJpYXRlIHZhbHVlcyAoZGVzaWduIG1hdHJpeCkuIEluIHRoaXMgY2FzZSwKYGJybXNgIHVzZXMgYHRlbXAgLSBtZWFuKHRlbXApID0gdGVtcCAtIDE5ODdgIGluc3RlYWQgb2Ygb3JpZ2luYWwKeWVhcnMuIFRoaXMgaW4gZ2VuZXJhbCBpbXByb3ZlcyB0aGUgc2FtcGxpbmcgZWZmaWNpZW5jeS4gQXMgdGhlCmBJbnRlcmNlcHRgIGlzIG5vdyBkZWZpbmVkIGF0IHRoZSBtaWRkbGUgb2YgdGhlIGRhdGEsIHRoZSBkZWZhdWx0CmBJbnRlcmNlcHRgIHByaW9yIGlzIGNlbnRlcmVkIG9uIG1lZGlhbiBvZiB0aGUgdGFyZ2V0IChoZXJlIHRhcmdldAppcyBgeWVhcmApLiBJZiB3ZSB3b3VsZCBsaWtlIHRvIHNldCBpbmZvcm1hdGl2ZSBwcmlvcnMsIHdlIG5lZWQgdG8Kc2V0IHRoZSBpbmZvcm1hdGl2ZSBwcmlvciBvbiBgSW50ZXJjZXB0YCBnaXZlbiB0aGUgY2VudGVyZWQKY292YXJpYXRlIHZhbHVlcy4gV2UgY2FuIHR1cm4gb2YgdGhlIGNlbnRlcmluZyBieSBzZXR0aW5nIGFyZ3VtZW50CmBjZW50ZXI9RkFMU0VgLCBhbmQgd2UgY2FuIHNldCB0aGUgcHJpb3Igb24gb3JpZ2luYWwgaW50ZXJjZXB0IGJ5CnVzaW5nIGEgZm9ybXVsYSBgdGVtcCB+IDAgKyBJbnRlcmNlcHQgKyB5ZWFyYC4gSW4gdGhpcyBjYXNlLCB3ZSBhcmUKaGFwcHkgd2l0aCB0aGUgZGVmYXVsdCBwcmlvciBmb3IgdGhlIGludGVyY2VwdC4gSW4gdGhpcyBzcGVjaWZpYwpjYXNzZSwgdGhlIGZsYXQgcHJpb3Igb24gY29lZmZpY2llbnQgaXMgYWxzbyBmaW5lLCBidXQgd2UgYWRkIGFuCndlYWtseSBpbmZvcm1hdGl2ZSBwcmlvciBqdXN0IGZvciB0aGUgaWxsdXN0cmF0aW9uLiBMZXQncyBhc3N1bWUgd2UKZXhwZWN0IHRoZSB0ZW1wZXJhdHVyZSB0byBjaGFuZ2UgbGVzcyB0aGFuIDHCsEMgaW4gMTAgeWVhcnMuIFdpdGgKYHN0dWRlbnRfdCgzLCAwLCAwLjAzKWAgYWJvdXQgOTUlIHByaW9yIG1hc3MgaGFzIGxlc3MgdGhhbiAwLjHCsEMKY2hhbmdlIGluIHllYXIsIGFuZCB3aXRoIGxvdyBkZWdyZWVzIG9mIGZyZWVkb20gKDMpIHdlIGhhdmUgdGhpY2sKdGFpbHMgbWFraW5nIHRoZSBsaWtlbGlob29kIGRvbWluYXRlIGluIGNhc2Ugb2YgcHJpb3ItZGF0YQpjb25mbGljdC4gSW4gcmVhbCBsaWZlLCB3ZSBkbyBoYXZlIG11Y2ggbW9yZSBpbmZvcm1hdGlvbiBhYm91dCB0aGUKdGVtcGVyYXR1cmUgY2hhbmdlLCBhbmQgbmF0dXJhbGx5IGEgaGllcmFyY2hpY2FsIHNwYXRpby10ZW1wb3JhbAptb2RlbCB3aXRoIGFsbCB0ZW1wZXJhdHVyZSBtZWFzdXJlbWVudCBsb2NhdGlvbnMgd291bGQgYmUgZXZlbgpiZXR0ZXIuCgpgYGB7cn0KZml0X2xpbiA8LSBicm0odGVtcCB+IHllYXIsIGRhdGEgPSBkYXRhX2xpbiwgZmFtaWx5ID0gZ2F1c3NpYW4oKSwKICAgICAgICAgICAgICAgcHJpb3IgPSBwcmlvcihzdHVkZW50X3QoMywgMCwgMC4wMyksIGNsYXNzPSdiJyksCiAgICAgICAgICAgICAgIHNlZWQgPSBTRUVELCByZWZyZXNoID0gMCkKYGBgCgpDaGVjayB0aGUgc3VtbWFyeSBvZiB0aGUgcG9zdGVyaW9yIGFuZCBjb252ZXJnZW5jZQoKYGBge3J9CmZpdF9saW4KYGBgCgpNYWtlIHByaW9yIHNlbnNpdGl2aXR5IGFuYWx5c2lzIGJ5IHBvd2Vyc2NhbGluZyBib3RoIHByaW9yIGFuZCBsaWtlbGlob29kLgoKYGBge3J9CnBvd2Vyc2NhbGVfc2Vuc2l0aXZpdHkoZml0X2xpbikkc2Vuc2l0aXZpdHkgfD4KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICBtdXRhdGUoYWNyb3NzKHdoZXJlKGlzLmRvdWJsZSksICB+bnVtKC54LCBkaWdpdHM9MikpKQpgYGAKCk91ciB3ZWFrbHkgaW5mb3JtYXRpdmUgcHJvcGVyIHByaW9yIGhhcyBuZWdsaWdpYmxlIHNlbnNpdGl2aXR5LCBhbmQKdGhlIGxpa2VsaWhvb2QgaXMgaW5mb3JtYXRpdmUuCkV4dHJhY3QgdGhlIHBvc3RlcmlvciBkcmF3cyBhbmQgY2hlY2sgdGhlIHN1bW1hcmllcwoKYGBge3J9CmRyYXdzX2xpbiA8LSBhc19kcmF3c19kZihmaXRfbGluKSAKZHJhd3NfbGluIHw+IHN1bW1hcmlzZV9kcmF3cygpCmBgYAoKSWYgb25lIG9mIHRoZSBjb2x1bW5zIGlzIGhpZGRlbiB3ZSBjYW4gZm9yY2UgcHJpbnRpbmcgYWxsIGNvbHVtbnMKCmBgYHtyfQpkcmF3c19saW4gfD4gc3VtbWFyaXNlX2RyYXdzKCkgfD4gcHJpbnQod2lkdGg9SW5mKQpgYGAKCkhpc3RvZ3JhbSBvZiBiX3llYXIKCmBgYHtyfQpkcmF3c19saW4gfD4KICBtY21jX2hpc3QocGFycz0nYl95ZWFyJykgKwogIHhsYWIoJ0F2ZXJhZ2UgdGVtcGVyYXR1cmUgaW5jcmVhc2UgcGVyIHllYXInKQpgYGAKClByb2JhYmlsaXR5IHRoYXQgdGhlIGNvZWZmaWNpZW50IGJfeWVhciA+IDAgYW5kIHRoZSBjb3JyZXNwb25kaW5nIE1DU0UKCmBgYHtyfQpkcmF3c19saW4gfD4KICBtdXRhdGUoSV9iX3llYXJfZ3RfMCA9IGJfeWVhcj4wKSB8PgogIHN1YnNldF9kcmF3cyh2YXJpYWJsZT0nSV9iX3llYXJfZ3RfMCcpIHw+CiAgc3VtbWFyaXNlX2RyYXdzKG1lYW4sIG1jc2VfbWVhbikKYGBgCgpBbGwgcG9zdGVyaW9yIGRyYXdzIGhhdmUgYGJfeWVhcj4wYCwgdGhlIHByb2JhYmlsaXR5IGdldHMgcm91bmRlZAp0byAxLCBhbmQgTUNTRSBpcyBub3QgYXZhaWxhYmxlIGFzIHRoZSBvYnNlcmV2ZCBwb3N0ZXJpb3IgdmFyaWFuY2UKaXMgMC4KCjk1JSBwb3N0ZXJpb3IgaW50ZXJ2YWwgZm9yIHRlbXBlcmF0dXJlIGluY3JlYXNlIHBlciAxMDAgeWVhcnMKCmBgYHtyfQpkcmF3c19saW4gfD4KICBtdXRhdGUoYl95ZWFyXzEwMCA9IGJfeWVhcioxMDApIHw+CiAgc3Vic2V0X2RyYXdzKHZhcmlhYmxlPSdiX3llYXJfMTAwJykgfD4KICBzdW1tYXJpc2VfZHJhd3MofnF1YW50aWxlKC54LCBwcm9icyA9IGMoMC4wMjUsIDAuOTc1KSksCiAgICAgICAgICAgICAgICAgIH5tY3NlX3F1YW50aWxlKC54LCBwcm9icyA9IGMoMC4wMjUsIDAuOTc1KSksCiAgICAgICAgICAgICAgICAgIC5udW1fYXJncyA9IGxpc3QoZGlnaXRzID0gMiwgbm90YXRpb24gPSAiZGVjIikpCmBgYAoKUGxvdCBwb3N0ZXJpb3IgZHJhd3Mgb2YgdGhlIGxpbmVhciBmdW5jdGlvbiB2YWx1ZXMgYXQgZWFjaCB5ZWFyLgpgYWRkX2xpbnByZWRfZHJhd3MoKWAgdGFrZXMgdGhlIHllYXJzIGZyb20gdGhlIGRhdGEgYW5kIHVzZXMgYGZpdF9saW5gIHRvIG1ha2UKdGhlIHByZWRpY3Rpb25zLgoKYGBge3J9CmRhdGFfbGluIHw+CiAgYWRkX2xpbnByZWRfZHJhd3MoZml0X2xpbikgfD4KICAjIHBsb3QgZGF0YQogIGdncGxvdChhZXMoeD15ZWFyLCB5PXRlbXApKSArCiAgZ2VvbV9wb2ludChjb2xvcj0yKSArCiAgIyBwbG90IGxpbmVyaWJib24gZm9yIHRoZSBsaW5lYXIgbW9kZWwKICBzdGF0X2xpbmVyaWJib24oYWVzKHkgPSAubGlucHJlZCksIC53aWR0aCA9IGMoLjk1KSwgYWxwaGEgPSAxLzIsIGNvbG9yPWJyZXdlci5wYWwoNSwgIkJsdWVzIilbWzVdXSkgKwogICMgZGVjb3JhdGlvbgogIHNjYWxlX2ZpbGxfYnJld2VyKCkrCiAgbGFicyh4PSAiWWVhciIsIHkgPSAnU3VtbWVyIHRlbXAuIEBLaWxwaXNqw6RydmknKSArCiAgdGhlbWUobGVnZW5kLnBvc2l0aW9uPSJub25lIikrCiAgc2NhbGVfeF9jb250aW51b3VzKGJyZWFrcz1zZXEoMTk1MCwyMDIwLGJ5PTEwKSkKYGBgCgpBbHRlcm5hdGl2ZWxseSBwbG90IGEgc3BhZ2hldHRpIHBsb3QgZm9yIDEwMCBkcmF3cwoKYGBge3J9CmRhdGFfbGluIHw+CiAgYWRkX2xpbnByZWRfZHJhd3MoZml0X2xpbiwgbmRyYXdzPTEwMCkgfD4KICAjIHBsb3QgZGF0YQogIGdncGxvdChhZXMoeD15ZWFyLCB5PXRlbXApKSArCiAgZ2VvbV9wb2ludChjb2xvcj0yKSArCiAgIyBwbG90IGEgbGluZSBmb3IgZWFjaCBwb3N0ZXJpb3IgZHJhdwogIGdlb21fbGluZShhZXMoeT0ubGlucHJlZCwgZ3JvdXA9LmRyYXcpLCBhbHBoYSA9IDEvMiwgY29sb3IgPSBicmV3ZXIucGFsKDUsICJCbHVlcyIpW1szXV0pKwogICMgZGVjb3JhdGlvbgogIHNjYWxlX2ZpbGxfYnJld2VyKCkrCiAgbGFicyh4PSAiWWVhciIsIHkgPSAnU3VtbWVyIHRlbXAuIEBLaWxwaXNqw6RydmknKSArCiAgdGhlbWUobGVnZW5kLnBvc2l0aW9uPSJub25lIikrCiAgc2NhbGVfeF9jb250aW51b3VzKGJyZWFrcz1zZXEoMTk1MCwyMDIwLGJ5PTEwKSkKYGBgCgpQbG90IHBvc3RlcmlvciBwcmVkaWN0aXZlIGRpc3RyaWJ1dGlvbiBhdCBlYWNoIHllYXIgdW50aWwgMjAzMApgYWRkX3ByZWRpY3RlZF9kcmF3cygpYCB0YWtlcyB0aGUgeWVhcnMgZnJvbSB0aGUgZGF0YSBhbmQgdXNlcwpgZml0X2xpbmAgdG8gbWFrZSB0aGUgcHJlZGljdGlvbnMuCgpgYGB7cn0KZGF0YV9saW4gfD4KICBhZGRfcm93KHllYXI9MjAyMzoyMDMwKSB8PgogIGFkZF9wcmVkaWN0ZWRfZHJhd3MoZml0X2xpbikgfD4KICAjIHBsb3QgZGF0YQogIGdncGxvdChhZXMoeD15ZWFyLCB5PXRlbXApKSArCiAgZ2VvbV9wb2ludChjb2xvcj0yKSArCiAgIyBwbG90IGxpbmVyaWJib24gZm9yIHRoZSBsaW5lYXIgbW9kZWwKICBzdGF0X2xpbmVyaWJib24oYWVzKHkgPSAucHJlZGljdGlvbiksIC53aWR0aCA9IGMoLjk1KSwgYWxwaGEgPSAxLzIsIGNvbG9yPWJyZXdlci5wYWwoNSwgIkJsdWVzIilbWzVdXSkgKwogICMgZGVjb3JhdGlvbgogIHNjYWxlX2ZpbGxfYnJld2VyKCkrCiAgbGFicyh4PSAiWWVhciIsIHkgPSAnU3VtbWVyIHRlbXAuIEBLaWxwaXNqw6RydmknKSArCiAgdGhlbWUobGVnZW5kLnBvc2l0aW9uPSJub25lIikrCiAgc2NhbGVfeF9jb250aW51b3VzKGJyZWFrcz1zZXEoMTk1MCwyMDMwLGJ5PTEwKSkKYGBgCgojIExpbmVhciBTdHVkZW50J3MgJHQkIG1vZGVsCgpUaGUgdGVtcGVyYXR1cmVzIHVzZWQgaW4gdGhlIGFib3ZlIGFuYWx5c2VzIGFyZSBhdmVyYWdlcyBvdmVyIHRocmVlCm1vbnRocywgd2hpY2ggbWFrZXMgaXQgbW9yZSBsaWtlbHkgdGhhdCB0aGV5IGFyZSBub3JtYWxseQpkaXN0cmlidXRlZCwgYnV0IHRoZXJlIGNhbiBiZSBleHRyZW1lIGV2ZW50cyBpbiB0aGUgZmVhdGhlciBhbmQgd2UKY2FuIGNoZWNrIHdoZXRoZXIgbW9yZSByb2J1c3QgU3R1ZGVudCdzICR0JCBvYnNlcnZhdGlvbiBtb2RlbCB3b3VsZApnaXZlIGRpZmZlcmVudCByZXN1bHRzLgoKCmBgYHtyIHJlc3VsdHM9J2hpZGUnfQpmaXRfbGluX3QgPC0gYnJtKHRlbXAgfiB5ZWFyLCBkYXRhID0gZGF0YV9saW4sIGZhbWlseSA9IHN0dWRlbnQoKSwKICAgICAgICAgICAgICAgICBwcmlvciA9IHByaW9yKHN0dWRlbnRfdCgzLCAwLCAwLjAzKSwgY2xhc3M9J2InKSwKICAgICAgICAgICAgICAgICBzZWVkID0gU0VFRCwgcmVmcmVzaCA9IDApCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuIFRoZSBiX3llYXIKcG9zdGVyaW9yIGxvb2tzIHNpbWlsYXIgYXMgYmVmb3JlIGFuZCB0aGUgcG9zdGVyaW9yIGZvciBkZWdyZWVzIG9mCmZyZWVkb20gYG51YCBoYXMgbW9zdCBvZiB0aGUgcG9zdGVyaW9yIG1hc3MgZm9yIHF1aXRlIGxhcmdlIHZhbHVlcwppbmRpY2F0aW5nIHRoZXJlIGlzIG5vIHN0cm9uZyBzdXBwb3J0IGZvciB0aGljayB0YWlsZWQgdmFyaWF0aW9uIGluCmF2ZXJhZ2Ugc3VtbWVyIHRlbXBlcmF0dXJlcy4KCmBgYHtyfQpmaXRfbGluX3QKYGBgCgojIFBhcmV0by1zbW9vdGhlZCBpbXBvcnRhbmNlLXNhbXBsaW5nIGxlYXZlLW9uZS1vdXQgY3Jvc3MtdmFsaWRhdGlvbiAoUFNJUy1MT08pCgpXZSBjYW4gdXNlIGxlYXZlLW9uZS1vdXQgY3Jvc3MtdmFsaWRhdGlvbiB0byBjb21wYXJlIHRoZSBleHBlY3RlZCBwcmVkaWN0aXZlIHBlcmZvcm1hbmNlLgoKTE9PIGNvbXBhcmlzb24gc2hvd3Mgbm9ybWFsIGFuZCBTdHVkZW50J3MgJHQkIG1vZGVsIGhhdmUgc2ltaWxhciBwZXJmb3JtYW5jZS4KCmBgYHtyfQpsb29fY29tcGFyZShsb28oZml0X2xpbiksIGxvbyhmaXRfbGluX3QpKQpgYGAKCiMgSGV0ZXJvc2tlZGFzdGljIGxpbmVhciBtb2RlbAoKSGV0ZXJvc2tlZGFzdGljaXR5IGFzc3VtZXMgdGhhdCB0aGUgdmFyaWF0aW9uIGFyb3VuZCB0aGUgbGluZWFyCm1lYW4gY2FuIGFsc28gdmFyeS4gV2UgY2FuIGFsbG93IHNpZ21hIHRvIGRlcGVuZCBvbiB5ZWFyLCB0b28uCkFsdGhvdWdoIHRoZSBhZGRpdGlvbmFsIGNvbXBvbmVudCBpcyB3cml0dGVuIGFzIGBzaWdtYSB+IHllYXJgLCB0aGUKbG9nIGxpbmsgZnVuY3Rpb24gaXMgdXNlZCBhbmQgdGhlIG1vZGVsIGlzIGZvciBsb2coc2lnbWEpLiBgYmYoKWAgYWxsb3dzCmxpc3Rpbmcgc2V2ZXJhbCBmb3JtdWxhcy4KCgpgYGB7ciByZXN1bHRzPSdoaWRlJ30KZml0X2xpbl9oIDwtIGJybShiZih0ZW1wIH4geWVhciwKICAgICAgICAgICAgICAgICAgICBzaWdtYSB+IHllYXIpLAogICAgICAgICAgICAgICAgIGRhdGEgPSBkYXRhX2xpbiwgZmFtaWx5ID0gZ2F1c3NpYW4oKSwKICAgICAgICAgICAgICAgICBwcmlvciA9IHByaW9yKHN0dWRlbnRfdCgzLCAwLCAwLjAzKSwgY2xhc3M9J2InKSwKICAgICAgICAgICAgICAgICBzZWVkID0gU0VFRCwgcmVmcmVzaCA9IDApCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuIFRoZSBiX3llYXIKcG9zdGVyaW9yIGxvb2tzIHNpbWlsYXIgYXMgYmVmb3JlLiBUaGUgcG9zdGVyaW9yIGZvciBzaWdtYV95ZWFyCmxvb2tzIGxpa2UgaGF2aW5nIG1vc3N0IG9mIHRoZSBtYSBmb3IgbmVnYXRpdmUgdmFsdWVzLCBpbmRpY2F0aW5nCmRlY3JlYXNlIGluIHRlbXBlcmF0dXJlIHZhcmlhdGlvbiBhcm91bmQgdGhlIG1lYW4uCgpgYGB7cn0KZml0X2xpbl9oCmBgYAoKSGlzdG9ncmFtIG9mIGJfeWVhciBhbmQgYl9zaWdtYV95ZWFyCgpgYGB7cn0KYXNfZHJhd3NfZGYoZml0X2xpbl9oKSB8PgogIG1jbWNfYXJlYXMocGFycz1jKCdiX3llYXInLCAnYl9zaWdtYV95ZWFyJykpCmBgYAoKQXMgbG9nKHgpIGlzIGFsbW9zdCBsaW5lYXIgd2hlbiB4IGlzIGNsb3NlIHRvIHplcm8sIHdlIGNhbiBzZWUgdGhhdCB0aGUKc2lnbWEgaXMgZGVjcmVhc2luZyBhYm91dCAxJSBwZXIgeWVhciAoOTUlIGludGVydmFsIGZyb20gMCUgdG8gMiUpLgoKUGxvdCBwb3N0ZXJpb3IgcHJlZGljdGl2ZSBkaXN0cmlidXRpb24gYXQgZWFjaCB5ZWFyIHVudGlsIDIwMzAKYGFkZF9wcmVkaWN0ZWRfZHJhd3MoKWAgdGFrZXMgdGhlIHllYXJzIGZyb20gdGhlIGRhdGEgYW5kIHVzZXMKYGZpdF9saW5faGAgdG8gbWFrZSB0aGUgcHJlZGljdGlvbnMuCgpgYGB7cn0KZGF0YV9saW4gfD4KICBhZGRfcm93KHllYXI9MjAyMzoyMDMwKSB8PgogIGFkZF9wcmVkaWN0ZWRfZHJhd3MoZml0X2xpbl9oKSB8PgogICMgcGxvdCBkYXRhCiAgZ2dwbG90KGFlcyh4PXllYXIsIHk9dGVtcCkpICsKICBnZW9tX3BvaW50KGNvbG9yPTIpICsKICAjIHBsb3QgbGluZXJpYmJvbiBmb3IgdGhlIGxpbmVhciBtb2RlbAogIHN0YXRfbGluZXJpYmJvbihhZXMoeSA9IC5wcmVkaWN0aW9uKSwgLndpZHRoID0gYyguOTUpLCBhbHBoYSA9IDEvMiwgY29sb3I9YnJld2VyLnBhbCg1LCAiQmx1ZXMiKVtbNV1dKSArCiAgIyBkZWNvcmF0aW9uCiAgc2NhbGVfZmlsbF9icmV3ZXIoKSsKICBsYWJzKHg9ICJZZWFyIiwgeSA9ICdTdW1tZXIgdGVtcC4gQEtpbHBpc2rDpHJ2aScpICsKICB0aGVtZShsZWdlbmQucG9zaXRpb249Im5vbmUiKSsKICBzY2FsZV94X2NvbnRpbnVvdXMoYnJlYWtzPXNlcSgxOTUwLDIwMzAsYnk9MTApKQpgYGAKCk1ha2UgcHJpb3Igc2Vuc2l0aXZpdHkgYW5hbHlzaXMgYnkgcG93ZXJzY2FsaW5nIGJvdGggcHJpb3IgYW5kIGxpa2VsaWhvb2QuCgpgYGB7cn0KcG93ZXJzY2FsZV9zZW5zaXRpdml0eShmaXRfbGluX2gpJHNlbnNpdGl2aXR5IHw+CiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgbXV0YXRlKGFjcm9zcyh3aGVyZShpcy5kb3VibGUpLCAgfm51bSgueCwgZGlnaXRzPTIpKSkKYGBgCgpXZSBjYW4gdXNlIGxlYXZlLW9uZS1vdXQgY3Jvc3MtdmFsaWRhdGlvbiB0byBjb21wYXJlIHRoZSBleHBlY3RlZCBwcmVkaWN0aXZlIHBlcmZvcm1hbmNlLgoKTE9PIGNvbXBhcmlzb24gc2hvd3MgaG9tb3NrZWRhc3RpYyBub3JtYWwgYW5kIGhldGVyb3NrZWRhc3RpYwpub3JtYWwgbW9kZWxzIGhhdmUgc2ltaWxhciBwZXJmb3JtYW5jZXMuCgpgYGB7cn0KbG9vX2NvbXBhcmUobG9vKGZpdF9saW4pLCBsb28oZml0X2xpbl9oKSkKYGBgCgojIEhldGVyb3NrZWRhc3RpYyBub24tbGluZWFyIG1vZGVsCgpXZSBjYW4gdGVzdCB0aGUgbGluZWFyaXR5IGFzc3VtcHRpb24gYnkgdXNpbmcgbm9uLWxpbmVhciBzcGxpbmUKZnVuY3Rpb25zLCBieSB1aW5nIGBzKHllYXIpYCB0ZXJtcy4gU2FtcGxpbmcgaXMgc2xvd2VyIGFzIHRoZQpwb3N0ZXJpb3IgZ2V0cyBtb3JlIGNvbXBsZXguCgoKYGBge3IgcmVzdWx0cz0naGlkZSd9CmZpdF9zcGxpbmVfaCA8LSBicm0oYmYodGVtcCB+IHMoeWVhciksCiAgICAgICAgICAgICAgICAgICAgIHNpZ21hIH4gcyh5ZWFyKSksCiAgICAgICAgICAgICAgICAgIGRhdGEgPSBkYXRhX2xpbiwgZmFtaWx5ID0gZ2F1c3NpYW4oKSwKICAgICAgICAgICAgICAgICAgc2VlZCA9IFNFRUQsIHJlZnJlc2ggPSAwKQpgYGAKCldlIGdldCB3YXJuaW5ncyBhYm91dCBkaXZlcmdlbmNlcywgYW5kIHRyeSByZXJ1bm5pbmcgd2l0aCBoaWdoZXIKYWRhcHRfZGVsdGEsIHdoaWNoIGxlYWRzIHRvIHVzaW5nIHNtYWxsZXIgc3RlcCBzaXplcy4gT2Z0ZW4KYGFkYXB0X2RlbHRhPTAuOTk5YCBsZWFkcyB0byB2ZXJ5IHNsb3cgc2FtcGxpbmcsIGJ1dCB3aXRoIHRoaXMKc21hbGwgZGF0YSwgdGhpcyBpcyBub3QgYW4gaXNzdWUuCgpgYGB7cn0KZml0X3NwbGluZV9oIDwtIHVwZGF0ZShmaXRfc3BsaW5lX2gsIGNvbnRyb2wgPSBsaXN0KGFkYXB0X2RlbHRhPTAuOTk5KSkKYGBgCgpDaGVjayB0aGUgc3VtbWFyeSBvZiB0aGUgcG9zdGVyaW9yIGFuZCBjb252ZXJnZW5jZS4gV2UncmUgbm90CmFueW1vcmUgYWJsZSB0byBtYWtlIGludGVycHJldGF0aW9uIG9mIHRoZSB0ZW1wZXJhdHVyZSBpbmNyZWFzZQpiYXNlZCBvbiB0aGlzIHN1bW1hcnkuIEZvciBzcGxpbmVzLCB3ZSBzZWUgcHJpb3Igc2NhbGVzIGBzZHNgIGZvcgp0aGUgc3BsaW5lIGNvZWZmaWNpZW50cy4KCmBgYHtyfQpmaXRfc3BsaW5lX2gKYGBgCgpXZSBjYW4gc3RpbGwgcGxvdCBwb3N0ZXJpb3IgcHJlZGljdGl2ZSBkaXN0cmlidXRpb24gYXQgZWFjaCB5ZWFyCnVudGlsIDIwMzAgYGFkZF9wcmVkaWN0ZWRfZHJhd3MoKWAgdGFrZXMgdGhlIHllYXJzIGZyb20gdGhlIGRhdGEKYW5kIHVzZXMgYGZpdF9saW5faGAgdG8gbWFrZSB0aGUgcHJlZGljdGlvbnMuCgpgYGB7cn0KZGF0YV9saW4gfD4KICBhZGRfcm93KHllYXI9MjAyMzoyMDMwKSB8PgogIGFkZF9wcmVkaWN0ZWRfZHJhd3MoZml0X3NwbGluZV9oKSB8PgogICMgcGxvdCBkYXRhCiAgZ2dwbG90KGFlcyh4PXllYXIsIHk9dGVtcCkpICsKICBnZW9tX3BvaW50KGNvbG9yPTIpICsKICAjIHBsb3QgbGluZXJpYmJvbiBmb3IgdGhlIGxpbmVhciBtb2RlbAogIHN0YXRfbGluZXJpYmJvbihhZXMoeSA9IC5wcmVkaWN0aW9uKSwgLndpZHRoID0gYyguOTUpLCBhbHBoYSA9IDEvMiwgY29sb3I9YnJld2VyLnBhbCg1LCAiQmx1ZXMiKVtbNV1dKSArCiAgIyBkZWNvcmF0aW9uCiAgc2NhbGVfZmlsbF9icmV3ZXIoKSsKICBsYWJzKHg9ICJZZWFyIiwgeSA9ICdTdW1tZXIgdGVtcC4gQEtpbHBpc2rDpHJ2aScpICsKICB0aGVtZShsZWdlbmQucG9zaXRpb249Im5vbmUiKSsKICBzY2FsZV94X2NvbnRpbnVvdXMoYnJlYWtzPXNlcSgxOTUwLDIwMzAsYnk9MTApKQpgYGAKCkFuZCB3ZSBjYW4gdXNlIGxlYXZlLW9uZS1vdXQgY3Jvc3MtdmFsaWRhdGlvbiB0byBjb21wYXJlIHRoZQpleHBlY3RlZCBwcmVkaWN0aXZlIHBlcmZvcm1hbmNlLgoKTE9PIGNvbXBhcmlzb24gc2hvd3MgaG9tb3NrZWRhc3RpYyBub3JtYWwgbGluZWFyIGFuZApoZXRlcm9za2VkYXN0aWMgbm9ybWFsIHNwbGluZSBtb2RlbHMgaGF2ZSBzaW1pbGFyCnBlcmZvcm1hbmNlcy4gVGhlcmUgYXJlIG5vdCBlbm91Z2ggb2JzZXJ2YXRpb25zIHRvIG1ha2UgY2xlYXIKZGlmZmVyZW5jZSBiZXR3ZWVuIHRoZSBtb2RlbHMuCgpgYGB7cn0KbG9vX2NvbXBhcmUobG9vKGZpdF9saW4pLCBsb28oZml0X3NwbGluZV9oKSkKYGBgCgpGb3Igc3BsaW5lIGFuZCBvdGhlciBub24tcGFyYW1ldHJpYyBtb2RlbHMsIHdlIGNhbiB1c2UgcHJlZGljdGl2ZQplc3RpbWF0ZXMgYW5kIHByZWRpY3Rpb25zIHRvIGdldCBpbnRlcnByZXRhYmxlIHF1YW50aXRpZXMuIExldCdzCmV4YW1pbmUgdGhlIGRpZmZlcmVuY2Ugb2YgZXN0aW1hdGVkIGF2ZXJhZ2UgdGVtcGVyYXR1cmUgaW4geWVhcnMKMTk1MiBhbmQgMjAyMi4KCmBgYHtyfQp0ZW1wX2RpZmYgPC0gcG9zdGVyaW9yX2VwcmVkKGZpdF9zcGxpbmVfaCwgbmV3ZGF0YT1maWx0ZXIoZGF0YV9saW4seWVhcj09MTk1Mnx5ZWFyPT0yMDIyKSkgfD4KICBydmFyKCkgfD4KICBkaWZmKCkgfD4KICBhc19kcmF3c19kZigpIHw+CiAgc2V0X3ZhcmlhYmxlcygndGVtcF9kaWZmJykKCnRlbXBfZGlmZiA8LSBkYXRhX2xpbiB8PgogIGZpbHRlcih5ZWFyPT0xOTUyfHllYXI9PTIwMjIpIHw+CiAgYWRkX2VwcmVkX2RyYXdzKGZpdF9zcGxpbmVfaCkgfD4KICBwaXZvdF93aWRlcihpZF9jb2xzPS5kcmF3LCBuYW1lc19mcm9tID0geWVhciwgdmFsdWVzX2Zyb20gPSAuZXByZWQpIHw+CiAgbXV0YXRlKHRlbXBfZGlmZiA9IGAyMDIyYC1gMTk1MmAsCiAgICAgICAgIC5jaGFpbiA9ICguZHJhdyAtIDEpICUvJSAxMDAwICsgMSwKICAgICAgICAgLml0ZXJhdGlvbiA9ICguZHJhdyAtIDEpICUlIDEwMDAgKyAxKSB8PgogIGFzX2RyYXdzX2RmKCkgfD4KICBzdWJzZXRfZHJhd3ModmFyaWFibGU9J3RlbXBfZGlmZicpCmBgYAoKUG9zdGVyaW9yIGRpc3RyaWJ1dGlvbiBmb3IgYXZlcmFnZSBzdW1tZXIgdGVtcGVyYXR1cmUgaW5jcmVhc2UgZnJvbSAxOTUyIHRvIDIwMjIKCmBgYHtyfQp0ZW1wX2RpZmYgfD4KICBtY21jX2hpc3QoKQpgYGAKCjk1JSBwb3N0ZXJpb3IgaW50ZXJ2YWwgZm9yIGF2ZXJhZ2Ugc3VtbWVyIHRlbXBlcmF0dXJlIGluY3JlYXNlIGZyb20gMTk1MiB0byAyMDIyCgpgYGB7cn0KdGVtcF9kaWZmIHw+CiAgc3VtbWFyaXNlX2RyYXdzKH5xdWFudGlsZSgueCwgcHJvYnMgPSBjKDAuMDI1LCAwLjk3NSkpLAogICAgICAgICAgICAgICAgICB+bWNzZV9xdWFudGlsZSgueCwgcHJvYnMgPSBjKDAuMDI1LCAwLjk3NSkpLAogICAgICAgICAgICAgICAgICAubnVtX2FyZ3MgPSBsaXN0KGRpZ2l0cyA9IDIsIG5vdGF0aW9uID0gImRlYyIpKQpgYGAKCk1ha2UgcHJpb3Igc2Vuc2l0aXZpdHkgYW5hbHlzaXMgYnkgcG93ZXJzY2FsaW5nIGJvdGggcHJpb3IgYW5kCmxpa2VsaWhvb2Qgd2l0aCBmb2N1cyBvbiBhdmVyYWdlIHN1bW1lciB0ZW1wZXJhdHVyZSBpbmNyZWFzZSBmcm9tCjE5NTIgdG8gMjAyMi4KCmBgYHtyfQpwb3dlcnNjYWxlX3NlbnNpdGl2aXR5KGZpdF9zcGxpbmVfaCwgcHJlZGljdGlvbiA9IFwoeCwgLi4uKSB0ZW1wX2RpZmYsIG51bV9hcmdzPWxpc3QoZGlnaXRzPTIpCiAgICAgICAgICAgICAgICAgICAgICAgKSRzZW5zaXRpdml0eSB8PgogICAgICAgICAgICAgICAgICAgICAgICAgZmlsdGVyKHZhcmlhYmxlPT0ndGVtcF9kaWZmJykgfD4KICAgICAgICAgICAgICAgICAgICAgICAgIG11dGF0ZShhY3Jvc3Mod2hlcmUoaXMuZG91YmxlKSwgIH5udW0oLngsIGRpZ2l0cz0yKSkpCmBgYAoKUHJvYmFiaWxpdHkgdGhhdCB0aGUgYXZlcmFnZSBzdW1tZXIgdGVtcGVyYXR1cmUgaGFzIGluY3JlYXNlZCBmcm9tCjE5NTIgdG8gMjAyMiBpcyA5OS41JS4KCmBgYHtyfQp0ZW1wX2RpZmYgfD4KICBtdXRhdGUoSV90ZW1wX2RpZmZfZ3RfMCA9IHRlbXBfZGlmZj4wLAogICAgICAgICB0ZW1wX2RpZmYgPSBOVUxMKSB8PgogIHN1YnNldF9kcmF3cyh2YXJpYWJsZT0nSV90ZW1wX2RpZmZfZ3RfMCcpIHw+CiAgc3VtbWFyaXNlX2RyYXdzKG1lYW4sIG1jc2VfbWVhbikKYGBgCgoKIyBDb21wYXJpc29uIG9mIGsgZ3JvdXBzIHdpdGggaGllcmFyY2hpY2FsIG5vcm1hbCBtb2RlbHMKCkxvYWQgZmFjdG9yeSBkYXRhLCB3aGljaCBjb250YWluIDUgcXVhbGl0eSBtZWFzdXJlbWVudHMgZm9yIGVhY2ggb2YKNiBtYWNoaW5lcy4gV2UncmUgaW50ZXJlc3RlZCBpbiBhbmFseXNpbmcgYXJlIHRoZSBxdWFsaXR5IGRpZmZlcmVuY2VzCmJldHdlZW4gdGhlIG1hY2hpbmVzLgoKYGBge3J9CmZhY3RvcnkgPC0gcmVhZC50YWJsZSh1cmwoJ2h0dHBzOi8vcmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbS9hdmVodGFyaS9CREFfY291cnNlX0FhbHRvL21hc3Rlci9ycGFja2FnZS9kYXRhLXJhdy9mYWN0b3J5LnR4dCcpKQpjb2xuYW1lcyhmYWN0b3J5KSA8LSAxOjYKZmFjdG9yeQpgYGAKCldlIHBpdm90IHRoZSBkYXRhIHRvIGxvbmcgZm9ybWF0CgpgYGB7cn0KZmFjdG9yeSA8LSBmYWN0b3J5IHw+CiAgcGl2b3RfbG9uZ2VyKGNvbHMgPSBldmVyeXRoaW5nKCksCiAgICAgICAgICAgICAgIG5hbWVzX3RvID0gJ21hY2hpbmUnLAogICAgICAgICAgICAgICB2YWx1ZXNfdG8gPSAncXVhbGl0eScpCmZhY3RvcnkKYGBgCgojIyBQb29sZWQgbW9kZWwKCkFzIGNvbXBhcmlzb24gbWFrZSBhbHNvIHBvb2xlZCBtb2RlbAoKYGBge3IgcmVzdWx0cz0naGlkZSd9CmZpdF9wb29sZWQgPC0gYnJtKHF1YWxpdHkgfiAxLCBkYXRhID0gZmFjdG9yeSwgcmVmcmVzaD0wKQpgYGAKCkNoZWNrIHRoZSBzdW1tYXJ5IG9mIHRoZSBwb3N0ZXJpb3IgYW5kIGNvbnZlcmdlbmNlLgoKYGBge3J9CmZpdF9wb29sZWQKYGBgCgojIyBTZXBhcmF0ZSBtb2RlbAoKQXMgY29tcGFyaXNvbiBtYWtlIGFsc28gc2VwcmF0ZSBtb2RlbC4gVG8gbWFrZSBpdCBjb21wbGV0ZWx5CnNlcGFyYXRlIHdlIG5lZWQgdG8gaGF2ZSBkaWZmZXJlbnQgc2lnbWEgZm9yIGVhY2ggbWFjaGluZSwgdG9vLgoKYGBge3IgcmVzdWx0cz0naGlkZSd9CmZpdF9zZXBhcmF0ZSA8LSBicm0oYmYocXVhbGl0eSB+IDAgKyBtYWNoaW5lLAogICAgICAgICAgICAgICAgICAgICAgIHNpZ21hIH4gMCArIG1hY2hpbmUpLAogICAgICAgICAgICAgICAgICAgIGRhdGEgPSBmYWN0b3J5LCByZWZyZXNoPTApCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuCgpgYGB7cn0KZml0X3NlcGFyYXRlCmBgYAoKIyBDb21tb24gdmFyaWFuY2UgaGllcmFyY2hpY2FsIG1vZGVsIChBTk9WQSkKCmBgYHtyIHJlc3VsdHM9J2hpZGUnfQpmaXRfaGllciA8LSBicm0ocXVhbGl0eSB+IDEgKyAoMSB8IG1hY2hpbmUpLAogICAgICAgICAgICAgICAgZGF0YSA9IGZhY3RvcnksIHJlZnJlc2ggPSAwKQpgYGAKCkNoZWNrIHRoZSBzdW1tYXJ5IG9mIHRoZSBwb3N0ZXJpb3IgYW5kIGNvbnZlcmdlbmNlLgoKYGBge3J9CmZpdF9oaWVyCmBgYAoKTE9PIGNvbXBhcmlzb24gc2hvd3MgdGhlIGhpZXJhcmNoaWNhbCBtb2RlbCBpcyB0aGUgYmVzdC4gVGhlCmRpZmZlcmVuY2VzIGFyZSBzbWFsbCBhcyB0aGUgbnVtYmVyIG9mIG9ic2VydmF0aW9ucyBpcyBzbWFsbCBhbmQKdGhlcmUgaXMgYSBjb25zaWRlcmFibGUgcHJlZGljdGlvbiAoYWxlYXRvcmljKSB1bmNlcnRhaW50eS4KCmBgYHtyfQpsb29fY29tcGFyZShsb28oZml0X3Bvb2xlZCksIGxvbyhmaXRfc2VwYXJhdGUpLCBsb28oZml0X2hpZXIpKQpgYGAKCkRpZmZlcmVudCBtb2RlbCBwb3N0ZXJpb3IgZGlzdHJpYnV0aW9ucyBmb3IgdGhlIG1lYW4KcXVhbGl0eS4gUG9vbGVkIG1vZGVsIGlnbm9yZXMgdGhlIHZhcml0aW9uIGJldHdlZW4KbWFjaGluZXMuIFNlcGFyYXRlIG1vZGVsIGRvZXNuJ3QgdGFrZSBiZW5lZml0IGZyb20gdGhlIHNpbWlsYXJpeSBvZgp0aGUgbWFjaGluZXMgYW5kIGhhcyBoaWdoZXIgdW5jZXJ0YWludHkuCgpgYGB7cn0KcGggPC0gZml0X2hpZXIgfD4KICBzcHJlYWRfcnZhcnMoYl9JbnRlcmNlcHQsIHJfbWFjaGluZVttYWNoaW5lLF0pIHw+CiAgbXV0YXRlKG1hY2hpbmVfbWVhbiA9IGJfSW50ZXJjZXB0ICsgcl9tYWNoaW5lKSB8PgogIGdncGxvdChhZXMoeGRpc3Q9bWFjaGluZV9tZWFuLCB5PW1hY2hpbmUpKSArCiAgc3RhdF9oYWxmZXllKCkgKwogIHNjYWxlX3lfY29udGludW91cyhicmVha3M9MTo2KSArCiAgbGFicyh4PSdRdWFsaXR5JywgeT0nTWFjaGluZScsIHRpdGxlPSdIaWVyYXJjaGljYWwnKQoKcHMgPC0gZml0X3NlcGFyYXRlIHw+CiAgYXNfZHJhd3NfZGYoKSB8PgogIHN1YnNldF9kcmF3cyh2YXJpYWJsZT0nYl9tYWNoaW5lJywgcmVnZXg9VFJVRSkgfD4KICBzZXRfdmFyaWFibGVzKHBhc3RlMCgnYl9tYWNoaW5lWycsIDE6NiwgJ10nKSkgfD4KICBhc19kcmF3c19ydmFycygpIHw+CiAgc3ByZWFkX3J2YXJzKGJfbWFjaGluZVttYWNoaW5lXSkgfD4KICBtdXRhdGUobWFjaGluZV9tZWFuID0gYl9tYWNoaW5lKSB8PgogIGdncGxvdChhZXMoeGRpc3Q9bWFjaGluZV9tZWFuLCB5PW1hY2hpbmUpKSArCiAgc3RhdF9oYWxmZXllKCkgKwogIHNjYWxlX3lfY29udGludW91cyhicmVha3M9MTo2KSArCiAgbGFicyh4PSdRdWFsaXR5JywgeT0nTWFjaGluZScsIHRpdGxlPSdTZXBhcmF0ZScpCgpwcCA8LSBmaXRfcG9vbGVkIHw+CiAgc3ByZWFkX3J2YXJzKGJfSW50ZXJjZXB0KSB8PgogIG11dGF0ZShtYWNoaW5lX21lYW4gPSBiX0ludGVyY2VwdCkgfD4KICBnZ3Bsb3QoYWVzKHhkaXN0PW1hY2hpbmVfbWVhbiwgeT0wKSkgKwogIHN0YXRfaGFsZmV5ZSgpICsKICBzY2FsZV95X2NvbnRpbnVvdXMoYnJlYWtzPU5VTEwpICsKICBsYWJzKHg9J1F1YWxpdHknLCB5PSdBbGwgbWFjaGluZXMnLCB0aXRsZT0nUG9vbGVkJykKCihwcCAvIHBzIC8gcGgpICogeGxpbShjKDUwLDE0MCkpCmBgYAoKTWFrZSBwcmlvciBzZW5zaXRpdml0eSBhbmFseXNpcyBieSBwb3dlcnNjYWxpbmcgYm90aCBwcmlvciBhbmQKbGlrZWxpaG9vZCB3aXRoIGZvY3VzIG9uIG1lYW4gcXVhbGl0eSBvZiBlYWNoIG1hY2hpbmUuIFdlIHNlZSBubwpwcmlvciBzZW5zaXRpdml0eS4KCmBgYHtyfQptYWNoaW5lX21lYW4gPC0gZml0X2hpZXIgfD4KICBhc19kcmF3c19kZigpIHw+CiAgbXV0YXRlKGFjcm9zcyhtYXRjaGVzKCdyX21hY2hpbmUnKSwgfiAueCAtIGJfSW50ZXJjZXB0KSkgfD4KICBzdWJzZXRfZHJhd3ModmFyaWFibGU9J3JfbWFjaGluZScsIHJlZ2V4PVRSVUUpIHw+CiAgc2V0X3ZhcmlhYmxlcyhwYXN0ZTAoJ21hY2hpbmVfbWVhblsnLCAxOjYsICddJykpCnBvd2Vyc2NhbGVfc2Vuc2l0aXZpdHkoZml0X2hpZXIsIHByZWRpY3Rpb24gPSBcKHgsIC4uLikgbWFjaGluZV9tZWFuLCBudW1fYXJncz1saXN0KGRpZ2l0cz0yKQogICAgICAgICAgICAgICAgICAgICAgICkkc2Vuc2l0aXZpdHkgfD4KICAgICAgICAgICAgICAgICAgICAgICAgIGZpbHRlcihzdHJfZGV0ZWN0KHZhcmlhYmxlLCdtYWNoaW5lX21lYW4nKSkgfD4KICAgICAgICAgICAgICAgICAgICAgICAgIG11dGF0ZShhY3Jvc3Mod2hlcmUoaXMuZG91YmxlKSwgIH5udW0oLngsIGRpZ2l0cz0yKSkpCmBgYAoKCiMgSGllcmFyY2hpY2FsIGJpbm9taWFsIG1vZGVsCgpbU29yYWZlbmliIFRveGljaXR5IERhdGFzZXQgaW4gYG1ldGFkYXRgIFIgcGFja2FnZV0oaHR0cHM6Ly93dmllY2h0Yi5naXRodWIuaW8vbWV0YWRhdC9yZWZlcmVuY2UvZGF0LnVyc2lubzIwMjEuaHRtbCkKaW5jbHVkZXMgcmVzdWx0cyBmcm0gMTMgc3R1ZGllcyBpbnZlc3RpZ2F0aW5nIHRoZSBvY2N1cnJlbmNlIG9mCmRvc2UgbGltaXRpbmcgdG94aWNpdGllcyAoRExUcykgYXQgZGlmZmVyZW50IGRvc2VzIG9mIFNvcmFmZW5pYi4KCkxvYWQgZGF0YQoKYGBge3J9CmxvYWQodXJsKCdodHRwczovL2dpdGh1Yi5jb20vd3ZpZWNodGIvbWV0YWRhdC9yYXcvbWFzdGVyL2RhdGEvZGF0LnVyc2lubzIwMjEucmRhJykpCmhlYWQoZGF0LnVyc2lubzIwMjEpCmBgYAoKUG9vbGVkIG1vZGVsIGFzc3VtZXMgYWxsIHN0dWRpZXMgaGF2ZSB0aGUgc2FtZSBkb3NlIGVmZmVjdAoocmVtaW5kZXI6IGB+IGRvc2VgIGlzIGVxdWl2YWxlbnQgdG8gYH4gMSArIGRvc2VgKQoKYGBge3IgcmVzdWx0cz0naGlkZSd9CmZpdF9wb29sZWQgPC0gYnJtKGV2ZW50cyB8IHRyaWFscyh0b3RhbCkgfiBkb3NlLAogICAgICAgICAgICAgICAgICBwcmlvciA9IGMocHJpb3Ioc3R1ZGVudF90KDcsIDAsIDEuNSksIGNsYXNzPSdJbnRlcmNlcHQnKSwKICAgICAgICAgICAgICAgICAgICAgICAgICAgIHByaW9yKG5vcm1hbCgwLCAxKSwgY2xhc3M9J2InKSksCiAgICAgICAgICAgICAgICAgIGZhbWlseT1iaW5vbWlhbCgpLCBkYXRhPWRhdC51cnNpbm8yMDIxKQpgYGAKCkNoZWNrIHRoZSBzdW1tYXJ5IG9mIHRoZSBwb3N0ZXJpb3IgYW5kIGNvbnZlcmdlbmNlCgpgYGB7cn0KZml0X3Bvb2xlZApgYGAKCkRvc2UgY29lZmZpY2llbnQgc2VlbXMgdG8gYmUgdmVyeSBzbWFsbC4gTG9va2luZyBhdCB0aGUgcG9zdGVyaW9yLAp3ZSBzZWUgdGhhdCBpdCBpcyBwb3NpdGl2ZSB3aXRoIGhpZ2ggcHJvYmFiaWxpdHkuIAoKYGBge3J9CmZpdF9wb29sZWQgfD4KICBhc19kcmF3cygpIHw+CiAgc3Vic2V0X2RyYXdzKHZhcmlhYmxlPSdiX2Rvc2UnKSB8PgogIHN1bW1hcmlzZV9kcmF3cyh+cXVhbnRpbGUoLngsIHByb2JzID0gYygwLjAyNSwgMC45NzUpKSwgfm1jc2VfcXVhbnRpbGUoLngsIHByb2JzID0gYygwLjAyNSwgMC45NzUpKSkKYGBgCgpUaGUgZG9zZSB3YXMgcmVwb3J0ZWQgaW4gbWcsIGFuZCBtb3N0IHZhbHVlcyBhcmUgaW4gaHVuZHJlZHMuIEl0IGlzCm9mdGVuIHNlbnNpYmxlIHRvIHN3aXRjaCB0byBhIHNjYWxlIGluIHdoaWNoIHRoZSByYW5nZSBvZiB2YWx1ZXMgaXMKY2xvc2VyIHRvIHVuaXQgcmFuZ2UuIEluIHRoaXMgY2FzZSBpdCBpcyBuYXR1cmFsIHRvIHVzZSBnIGluc3RlYWQKb2YgbWcuCgpgYGB7cn0KZGF0LnVyc2lubzIwMjEgPC0gZGF0LnVyc2lubzIwMjEgfD4KICBtdXRhdGUoZG9zZWcgPSBkb3NlLzEwMCkKYGBgCgpGaXQgdGhlIHBvb2xlZCBtb2RlbCBhZ2FpbiB1aW5nIGBkb3NlZ2AKCmBgYHtyIHJlc3VsdHM9J2hpZGUnfQpmaXRfcG9vbGVkIDwtIGJybShldmVudHMgfCB0cmlhbHModG90YWwpIH4gZG9zZWcsCiAgICAgICAgICAgICAgICAgIHByaW9yID0gYyhwcmlvcihzdHVkZW50X3QoNywgMCwgMS41KSwgY2xhc3M9J0ludGVyY2VwdCcpLAogICAgICAgICAgICAgICAgICAgICAgICAgICAgcHJpb3Iobm9ybWFsKDAsIDEpLCBjbGFzcz0nYicpKSwKICAgICAgICAgICAgICAgICAgZmFtaWx5PWJpbm9taWFsKCksIGRhdGE9ZGF0LnVyc2lubzIwMjEpCmBgYAoKQ2hlY2sgdGhlIHN1bW1hcnkgb2YgdGhlIHBvc3RlcmlvciBhbmQgY29udmVyZ2VuY2UuCgpgYGB7cn0KZml0X3Bvb2xlZApgYGAKCk5vdyBpdCBpcyBlYXNpZXIgdG8gaW50ZXJwcmV0IHRoZSBwcmVzZW50ZWQgdmFsdWVzLiAKU2VwYXJhdGUgbW9kZWwgYXNzdW1lcyBhbGwgc3R1ZGllcyBoYXZlIGRpZmZlcmVudCBkb3NlIGVmZmVjdC4KSXQgd291bGQgYmUgYSBiaXQgY29tcGxpY2F0ZWQgdG8gc2V0IGEgZGlmZmVyZW50IHByaW9yIG9uIHN0dWR5IHNwZWNpZmljCmludGVyY2VwdHMgYW5kIG90aGVyIGNvZWZmaWNpZW50cywgc28gd2UgdXNlIHRoZSBhbWUgcHJpb3IgZm9yIGFsbC4KCmBgYHtyIHJlc3VsdHM9J2hpZGUnfQpmaXRfc2VwYXJhdGUgPC0gYnJtKGV2ZW50cyB8IHRyaWFscyh0b3RhbCkgfiAwICsgc3R1ZHkgKyBkb3NlZzpzdHVkeSwKICAgICAgICAgICAgICAgICAgICBwcmlvcj1wcmlvcihzdHVkZW50X3QoNywgMCwgMS41KSwgY2xhc3M9J2InKSwKICAgICAgICAgICAgICAgICAgICBmYW1pbHk9Ymlub21pYWwoKSwgZGF0YT1kYXQudXJzaW5vMjAyMSkKYGBgCgpDaGVjayB0aGUgc3VtbWFyeSBvZiB0aGUgcG9zdGVyaW9yIGFuZCBjb252ZXJnZW5jZS4KCmBgYHtyfQpmaXRfc2VwYXJhdGUKYGBgCgpIaWVyYXJjaGljYWwgbW9kZWwgYXNzdW1lcyBjb21tb24gbWVhbiBlZmZlY3QgYW5kIHZhcmlhdGlvbiBhcm91bmQgd2l0aCBub3JtYWwgcG9wdWxhdGlvbiBwcmlvcgoocmVtaW5kZXI6IGB+IGRvc2VnICsgKGRvc2VnIHwgc3R1ZHkpYCBpcyBlcXVpdmFsZW50IHRvIGB+IDEgKyBkb3NlZyArICgxICsgZG9zZWcgfCBzdHVkeSlgKQoKYGBge3IgcmVzdWx0cz0naGlkZSd9CmZpdF9oaWVyIDwtIGJybShldmVudHMgfCB0cmlhbHModG90YWwpIH4gZG9zZWcgKyAoZG9zZWcgfCBzdHVkeSksCiAgICAgICAgICAgICAgICAgICAgcHJpb3I9YyhwcmlvcihzdHVkZW50X3QoNywgMCwgMS41KSwgY2xhc3M9J0ludGVyY2VwdCcpLAogICAgICAgICAgICAgICAgICAgICAgICAgICAgcHJpb3Iobm9ybWFsKDAsIDEpLCBjbGFzcz0nYicpKSwKICAgICAgICAgICAgICAgIGZhbWlseT1iaW5vbWlhbCgpLCBkYXRhPWRhdC51cnNpbm8yMDIxKQpgYGAKCldlIHNlZW0gc29tZSBkaXZlcmdlbmNlcyBhbmQgcmVwZWF0IHdpdGggaGlnaGVyIGFkYXB0X2RlbHRhCgpgYGB7ciByZXN1bHRzPSdoaWRlJ30KZml0X2hpZXIgPC0gdXBkYXRlKGZpdF9oaWVyLCBjb250cm9sPWxpc3QoYWRhcHRfZGVsdGE9MC45OSkpCmBgYAoKTE9PLUNWIGNvbXBhcmlzb24KCmBgYHtyfQpsb29fY29tcGFyZShsb28oZml0X3Bvb2xlZCksIGxvbyhmaXRfc2VwYXJhdGUpLCBsb28oZml0X2hpZXIpKQpgYGAKCldlIGdldCB3YXJuaW5ncyBhYm91dCBQYXJldG8gaydzID4gMC43IGluIFBTSVMtTE9PIGZvciBzZXBhcmF0ZQptb2RlbCwgYnV0IGFzIGluIHRoYXQgY2FzZSB0aGUgTE9PLUNWIGVzdGltYXRlIGlzIHVzdWFsbHkKb3Zlcm9wdGltaXN0aWMgYW5kIHRoZSBzZXBhcmF0ZSBtb2RlbCBpcyB0aGUgd29yc3QsIHRoZXJlIGlzIG5vCm5lZWQgdG8gdXNlIG1vcmUgYWNjdXJhdGUgY29tcHV0YXRpb24uCgpIaWVyYXJjaGljYWwgbW9kZWwgaGFzIGJldHRlciBlbHBkIHRoYW4gdGhlIHBvb2xlZCwgYnV0IGRpZmZlcmVuY2UKaXMgbmVnbGlnaWJsZS4gSG93ZXZlciwgd2hlbiB3ZSBsb29rIGF0IHRoZSBzdHVkeSBzcGVjaWZpYwpwYXJhbWV0ZXJzLCB3ZSBzZWUgdGhhdCB0aGUgTWlsbGVyIHN0dWR5IGhhcyBoaWdoZXIgaW50ZXJjZXB0IChtb3JlCmV2ZW50cykuCgpgYGB7cn0KbWNtY19hcmVhcyhhc19kcmF3c19kZihmaXRfaGllciksIHJlZ2V4X3BhcnM9J3Jfc3R1ZHlcXFsuKkludGVyY2VwdCcpCmBgYAoKClRoZXJlIGFyZSBubyBkaWZmZXJlbmNlcyBpbiBzbG9wZXMuCgpgYGB7cn0KbWNtY19hcmVhcyhhc19kcmF3c19kZihmaXRfaGllciksIHJlZ2V4X3BhcnM9J3Jfc3R1ZHlcXFsuKmRvc2VnJykKYGBgCgoKVGhlIHBvcHVsYXRpb24gbGV2ZWwgY29lZmZpY2llbnQgZm9yIHRoZSBkb3NlIGlzIGNsZWFybHkgbGFyZ2VyIHRoYW4gMAoKYGBge3J9Cm1jbWNfYXJlYXMoYXNfZHJhd3NfZGYoZml0X2hpZXIpLCByZWdleF9wYXJzPSdiX2Rvc2VnJykgKwogIGdlb21fdmxpbmUoeGludGVyY2VwdD0wLCBsaW5ldHlwZT0nZGFzaGVkJykgKwogIHhsaW0oYygwLDEpKQpgYGAKCk1ha2UgcHJpb3Igc2Vuc2l0aXZpdHkgYW5hbHlzaXMgYnkgcG93ZXJzY2FsaW5nIGJvdGggcHJpb3IgYW5kIGxpa2VsaWhvb2QuCgpgYGB7cn0KcG93ZXJzY2FsZV9zZW5zaXRpdml0eShmaXRfaGllciwgdmFyaWFibGU9J2JfZG9zZWcnCiAgICAgICAgICAgICAgICAgICAgICAgKSRzZW5zaXRpdml0eSB8PgogICAgICAgICAgICAgICAgICAgICAgICAgbXV0YXRlKGFjcm9zcyh3aGVyZShpcy5kb3VibGUpLCAgfm51bSgueCwgZGlnaXRzPTIpKSkKYGBgCgpUaGUgcG9zdGVyaW9yIGZvciB0aGUgcHJvYmFiaWxpdHkgb2YgZXZlbnQgZ2l2ZW4gY2VydGFpbiBkb3NlIGFuZCBhIG5ldyBzdHVkeS4KCmBgYHtyfQpkYXRhLmZyYW1lKHN0dWR5PSduZXcnLAogICAgICAgICAgIGRvc2VnPXNlcSgwLjEsMSxieT0wLjEpLAogICAgICAgICAgIHRvdGFsPTEpIHw+CiAgYWRkX2xpbnByZWRfZHJhd3MoZml0X2hpZXIsIHRyYW5zZm9ybT1UUlVFLCBhbGxvd19uZXdfbGV2ZWxzPVRSVUUpIHw+CiAgZ2dwbG90KGFlcyh4PWRvc2VnLCB5PS5saW5wcmVkKSkgKwogIHN0YXRfbGluZXJpYmJvbigud2lkdGggPSBjKC45NSksIGFscGhhID0gMS8yLCBjb2xvcj1icmV3ZXIucGFsKDUsICJCbHVlcyIpW1s1XV0pICsKICBzY2FsZV9maWxsX2JyZXdlcigpKwogIGxhYnMoeD0gIkRvc2UgKGcpIiwgeSA9ICdQcm9iYWJpbGl0eSBvZiBldmVudCcpICsKICB0aGVtZShsZWdlbmQucG9zaXRpb249Im5vbmUiKSArCiAgZ2VvbV9obGluZSh5aW50ZXJjZXB0PTApICsKICBzY2FsZV94X2NvbnRpbnVvdXMoYnJlYWtzPXNlcSgwLjEsMSxieT0wLjEpKQpgYGAKCklmIHBsb3QgaW5kaXZpZHVhbCBwb3N0ZXJpb3IgZHJhd3MsIHdlIHNlZSB0aGF0IHRoZXJlIGlzIGEgbG90IG9mCnVuY2VydGFpbnR5IGFib3V0IHRoZSBvdmVyYWxsIHByb2JhYmlsaXR5IChleHBsYWluZWQgYnkgdGhlCnZhcmlhdGlvbiBpbiBJbnRlcmNlcHQgaW4gZGlmZmVyZW50IHN0dWRpZXMpLCBidXQgbGVzcyB1bmNlcnRhaW50eQphYm91dCB0aGUgc2xvcGUuCgpgYGB7cn0KZGF0YS5mcmFtZShzdHVkeT0nbmV3JywKICAgICAgICAgICBkb3NlZz1zZXEoMC4xLDEsYnk9MC4xKSwKICAgICAgICAgICB0b3RhbD0xKSB8PgogIGFkZF9saW5wcmVkX2RyYXdzKGZpdF9oaWVyLCB0cmFuc2Zvcm09VFJVRSwgYWxsb3dfbmV3X2xldmVscz1UUlVFLCBuZHJhd3M9MTAwKSB8PgogIGdncGxvdChhZXMoeD1kb3NlZywgeT0ubGlucHJlZCkpICsKICBnZW9tX2xpbmUoYWVzKGdyb3VwPS5kcmF3KSwgYWxwaGEgPSAxLzIsIGNvbG9yID0gYnJld2VyLnBhbCg1LCAiQmx1ZXMiKVtbM11dKSsKICBzY2FsZV9maWxsX2JyZXdlcigpKwogIGxhYnMoeD0gIkRvc2UgKGcpIiwgeSA9ICdQcm9iYWJpbGl0eSBvZiBldmVudCcpICsKICB0aGVtZShsZWdlbmQucG9zaXRpb249Im5vbmUiKSArCiAgZ2VvbV9obGluZSh5aW50ZXJjZXB0PTApICsKICBzY2FsZV94X2NvbnRpbnVvdXMoYnJlYWtzPXNlcSgwLjEsMSxieT0wLjEpKQpgYGAKClBvc3RlcmlvciBwcmVkaWN0aXZlIGNoZWNraW5nIHNob3dpbmcgdGhlIG9ic2VydmVkIGFuZCBwcmVkaWN0ZWQgbnVtYmVyIG9mIGV2ZW50cy4KCmBgYHtyfQpwcF9jaGVjayhmaXRfaGllciwgdHlwZSA9ICJyaWJib25fZ3JvdXBlZCIsIGdyb3VwPSJzdHVkeSIpCmBgYAoKPGJyIC8+CgojIExpY2Vuc2VzIHsudW5udW1iZXJlZH0KCiogQ29kZSAmY29weTsgMjAxNy0yMDI0LCBBa2kgVmVodGFyaSwgbGljZW5zZWQgdW5kZXIgQlNELTMuCiogVGV4dCAmY29weTsgMjAxNy0yMDI0LCBBa2kgVmVodGFyaSwgbGljZW5zZWQgdW5kZXIgQ0MtQlktTkMgNC4wLgo=