diff --git a/R/RcppExports.R b/R/RcppExports.R index 23369eb..84fd44d 100644 --- a/R/RcppExports.R +++ b/R/RcppExports.R @@ -3,7 +3,7 @@ #' Financial options using a Milstein discretisation #' -#' Financial options based on scalar geometric Brownian motion, similar to Mike Giles' MCQMC06 paper, using a Milstein discretisation. +#' Financial options based on scalar geometric Brownian motion, similar to Mike Giles' MCQMC06 paper, Giles (2008), using a Milstein discretisation. #' #' This function is based on GPL-2 C++ code by Mike Giles. #' @@ -26,7 +26,7 @@ #' @author Mike Giles #' #' @references -#' M.B. Giles. 'Improved multilevel Monte Carlo convergence using the Milstein scheme', p.343-358 in \emph{Monte Carlo and Quasi-Monte Carlo Methods 2006}, Springer, 2007. +#' Giles, M. (2008) 'Improved Multilevel Monte Carlo Convergence using the Milstein Scheme', in A. Keller, S. Heinrich, and H. Niederreiter (eds) \emph{Monte Carlo and Quasi-Monte Carlo Methods 2006}. Berlin, Heidelberg: Springer, pp. 343–358. Available at: \url{https://doi.org/10.1007/978-3-540-74496-2_20}. #' #' @examples #' \dontrun{ @@ -39,41 +39,40 @@ #' # -- different random number generation #' # -- switch to S_0=100 #' -#' M <- 2 # refinement cost factor #' N0 <- 200 # initial samples on coarse levels #' Lmin <- 2 # minimum refinement level #' Lmax <- 10 # maximum refinement level #' #' test.res <- list() #' for(option in 1:5) { -#' if(option==1) { +#' if(option == 1) { #' cat("\n ---- Computing European call ---- \n") #' N <- 20000 # samples for convergence tests #' L <- 8 # levels for convergence tests #' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) -#' } else if(option==2) { +#' } else if(option == 2) { #' cat("\n ---- Computing Asian call ---- \n") #' N <- 20000 # samples for convergence tests #' L <- 8 # levels for convergence tests #' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) -#' } else if(option==3) { +#' } else if(option == 3) { #' cat("\n ---- Computing lookback call ---- \n") #' N <- 20000 # samples for convergence tests #' L <- 10 # levels for convergence tests #' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) -#' } else if(option==4) { +#' } else if(option == 4) { #' cat("\n ---- Computing digital call ---- \n") #' N <- 200000 # samples for convergence tests #' L <- 8 # levels for convergence tests #' Eps <- c(0.01, 0.02, 0.05, 0.1, 0.2) -#' } else if(option==5) { +#' } else if(option == 5) { #' cat("\n ---- Computing barrier call ---- \n") #' N <- 200000 # samples for convergence tests #' L <- 8 # levels for convergence tests #' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) #' } #' -#' test.res[[option]] <- mlmc.test(mcqmc06_l, M, N, L, N0, Eps, Lmin, Lmax, option=option) +#' test.res[[option]] <- mlmc.test(mcqmc06_l, N, L, N0, Eps, Lmin, Lmax, option = option) #' #' # plot results #' plot(test.res[[option]]) @@ -81,7 +80,7 @@ #' } #' #' # The level sampler can be called directly to retrieve the relevant level sums: -#' mcqmc06_l(l=7, N=10, option=1) +#' mcqmc06_l(l = 7, N = 10, option = 1) #' #' @export mcqmc06_l <- function(l, N, option) { diff --git a/R/hex.R b/R/hex.R index 308e0a7..b92e026 100644 --- a/R/hex.R +++ b/R/hex.R @@ -14,7 +14,7 @@ make_hex <- function() { p_size = 5, h_fill = "#EDEDED", h_color = "#002147", - url = "www.louisaslett.com", + url = "mlmc.louisaslett.com", u_size = 1.5, u_family = "mono", white_around_sticker = TRUE, diff --git a/R/mlmc.R b/R/mlmc.R index 6537590..a618bb4 100644 --- a/R/mlmc.R +++ b/R/mlmc.R @@ -10,8 +10,9 @@ #' Consider a sequence \eqn{P_0, P_1, \ldots}, which approximates \eqn{P_L} with increasing accuracy, but also increasing cost, we have the simple identity #' \deqn{E[P_L] = E[P_0] + \sum_{l=1}^L E[P_l-P_{l-1}],} #' and therefore we can use the following unbiased estimator for \eqn{E[P_L]}, -#' \deqn{N_0^{-1} \sum_{n=1}^{N_0} P_0^{(0,n)} + \sum_{l=1}^L \{ N_l^{-1} \sum_{n=1}^{N_l} (P_l^{(l,n)} - P_{l-1}^{(l,n)}) \}} -#' with the inclusion of the level \eqn{l} in the superscript \eqn{(l,n)} indicating that the samples used at each level of correction are independent. +#' \deqn{N_0^{-1} \sum_{n=1}^{N_0} P_0^{(0,n)} + \sum_{l=1}^L \left\{ N_l^{-1} \sum_{n=1}^{N_l} \left(P_l^{(l,n)} - P_{l-1}^{(l,n)}\right) \right\}} +#' where \eqn{N_l} samples are produced at level \eqn{l}. +#' The inclusion of the level \eqn{l} in the superscript \eqn{(l,n)} indicates that the samples used at each level of correction are independent. #' #' Set \eqn{C_0}, and \eqn{V_0} to be the cost and variance of one sample of \eqn{P_0}, and \eqn{C_l, V_l} to be the cost and variance of one sample of \eqn{P_l - P_{l-1}}, then the overall cost and variance of the multilevel estimator is \eqn{\sum_{l=0}^L N_l C_l} and \eqn{\sum_{l=0}^L N_l^{-1} V_l}, respectively. #' @@ -26,11 +27,11 @@ #' @author Tigran Nagapetyan #' #' @references -#' M.B. Giles. Multilevel Monte Carlo path simulation. \emph{Operations Research}, 56(3):607-617, 2008. +#' Giles, M.B. (2008) 'Multilevel Monte Carlo Path Simulation', \emph{Operations Research}, 56(3), pp. 607–617. Available at: \url{https://doi.org/10.1287/opre.1070.0496}. #' -#' M.B. Giles. Multilevel Monte Carlo methods. \emph{Acta Numerica}, 24:259-328, 2015. +#' Giles, M.B. (2015) 'Multilevel Monte Carlo methods', \emph{Acta Numerica}, 24, pp. 259–328. Available at: \url{https://doi.org/10.1017/S096249291500001X}. #' -#' S. Heinrich. Monte Carlo complexity of global solution of integral equations. \emph{Journal of Complexity}, 14(2):151-175, 1998. +#' Heinrich, S. (1998) 'Monte Carlo Complexity of Global Solution of Integral Equations', \emph{Journal of Complexity}, 14(2), pp. 151–175. Available at: \url{https://doi.org/10.1006/jcom.1998.0471}. #' #' @param Lmin #' the minimum level of refinement. Must be \eqn{\ge 2}. @@ -49,11 +50,12 @@ #' #' The user supplied function should return a named list containing one element named \code{sums} and second named \code{cost}, where: #' \describe{ -#' \item{\code{sums}}{is a vector of length two \eqn{(\sum Y_i, \sum Y_i^2)} where \eqn{Y_i} are iid simulations with expectation \eqn{E[P_0]} when \eqn{l=0} and expectation \eqn{E[P_l-P_{l-1}]} when \eqn{l>0}.} -#' \item{\code{cost}}{is a scalar with the cost of the number of paths simulated.} +#' \item{\code{sums}}{is a vector of length two \eqn{\left(\sum Y_i, \sum Y_i^2\right)} where \eqn{Y_i} are iid simulations with expectation \eqn{E[P_0]} when \eqn{l=0} and expectation \eqn{E[P_l-P_{l-1}]} when \eqn{l>0}.} +#' \item{\code{cost}}{is a scalar with the total cost of the paths simulated. +#' For example, in the financial options samplers included in this package, this is calculated as \eqn{NM^l}, where \eqn{N} is the number of paths requested in the call to the user function \code{mlmc_l}, \eqn{M} is the refinement cost factor (\eqn{M=2} for \code{\link[=mcqmc06_l]{mcqmc06_l()}} and \eqn{M=4} for \code{\link[=opre_l]{opre_l()}}), and \eqn{l} is the level being sampled.} #' } #' -#' See the function (and source code of) \code{\link[=opre_l]{opre_l()}} in this package for an example of a user supplied level sampler. +#' See the function (and source code of) \code{\link[=opre_l]{opre_l()}} and \code{\link[=mcqmc06_l]{mcqmc06_l()}} in this package for an example of user supplied level samplers. #' @param alpha #' the weak error, \eqn{O(2^{-\alpha l})}. #' Must be \eqn{> 0} if specified. @@ -74,13 +76,13 @@ #' @return A list containing: \describe{ #' \item{\code{P}}{The MLMC estimate;} #' \item{\code{Nl}}{A vector of the number of samples performed on each level;} -#' \item{\code{Cl}}{Cost of samples at each level.} +#' \item{\code{Cl}}{Per sample cost at each level.} #' } #' #' @examples -#' mlmc(2, 6, 1000, 0.01, opre_l, gamma=1, option=1) +#' mlmc(2, 6, 1000, 0.01, opre_l, option = 1) #' -#' mlmc(2, 10, 1000, 0.01, mcqmc06_l, gamma=1, option=1) +#' mlmc(2, 10, 1000, 0.01, mcqmc06_l, option = 1) #' #' @importFrom parallel mcmapply #' @importFrom stats lm @@ -93,8 +95,8 @@ mlmc <- function(Lmin, Lmax, N0, eps, mlmc_l, alpha = NA, beta = NA, gamma = NA, if(Lmax= Lmin.") } - if(N0<=0 || eps<=0 || gamma <= 0){ - stop("N0, eps and gamma must all be greater than zero.") + if(N0<=0 || eps<=0){ + stop("N0 and eps must be greater than zero.") } if(!is.na(alpha) && alpha<=0) { stop("if specified, alpha must be greater than zero. Set alpha to NA to automatically estimate.") @@ -102,6 +104,9 @@ mlmc <- function(Lmin, Lmax, N0, eps, mlmc_l, alpha = NA, beta = NA, gamma = NA, if(!is.na(beta) && beta<=0) { stop("if specified, beta must be greater than zero. Set beta to NA to automatically estimate.") } + if(!is.na(gamma) && gamma<=0) { + stop("if specified, gamma must be greater than zero. Set gamma to NA to automatically estimate.") + } # initialise the MLMC run est.alpha <- is.na(alpha) diff --git a/R/mlmc.test.R b/R/mlmc.test.R index 3aba0b1..8e953f7 100644 --- a/R/mlmc.test.R +++ b/R/mlmc.test.R @@ -10,7 +10,18 @@ #' This function is based on GPL-2 'Matlab' code by Mike Giles. #' #' @param mlmc_l -#' a user supplied function which provides the estimate for level l +#' a user supplied function which provides the estimate for level \eqn{l}. +#' It must take at least two arguments, the first is the level number to be simulated and the second the number of paths. +#' Additional arguments can be taken if desired: all additional \code{...} arguments to this function are forwarded to the user defined \code{mlmc_l} function. +#' +#' The user supplied function should return a named list containing one element named \code{sums} and second named \code{cost}, where: +#' \describe{ +#' \item{\code{sums}}{is a vector of length two \eqn{\left(\sum Y_i, \sum Y_i^2\right)} where \eqn{Y_i} are iid simulations with expectation \eqn{E[P_0]} when \eqn{l=0} and expectation \eqn{E[P_l-P_{l-1}]} when \eqn{l>0}.} +#' \item{\code{cost}}{is a scalar with the total cost of the paths simulated. +#' For example, in the financial options samplers included in this package, this is calculated as \eqn{NM^l}, where \eqn{N} is the number of paths requested in the call to the user function \code{mlmc_l}, \eqn{M} is the refinement cost factor (\eqn{M=2} for \code{\link[=mcqmc06_l]{mcqmc06_l()}} and \eqn{M=4} for \code{\link[=opre_l]{opre_l()}}), and \eqn{l} is the level being sampled.} +#' } +#' +#' See the function (and source code of) \code{\link[=opre_l]{opre_l()}} and \code{\link[=mcqmc06_l]{mcqmc06_l()}} in this package for an example of user supplied level samplers. #' @param N #' number of samples to use in the tests #' @param L @@ -19,18 +30,20 @@ #' initial number of samples which are used for the first 3 levels and for any subsequent levels which are automatically added. #' Must be \eqn{> 0}. #' @param eps.v -#' a vector of all the target accuracies in the tests. +#' a vector of one or more target accuracies for the tests. #' Must all be \eqn{> 0}. #' @param Lmin #' the minimum level of refinement. #' Must be \eqn{\ge 2}. #' @param Lmax #' the maximum level of refinement. -#' Must be \eqn{\ge} Lmin. +#' Must be \eqn{\ge} \code{Lmin}. #' @param silent #' set to TRUE to supress running output (identical output can still be printed by printing the return result) #' @param parallel -#' if an integer is supplied, R will fork \code{parallel} parallel processes an compute each level estimate in parallel. +#' if an integer is supplied, R will fork \code{parallel} parallel processes. +#' This is done for the convergence tests section by splitting the \code{N} samples as evenly as possible across cores when sampling each level. +#' This is also done for the MLMC complexity tests by passing the \code{parallel} argument on to the \code{\link[=mlmc]{mlmc()}} driver when targeting each accuracy level in \code{eps}. #' @param ... #' additional arguments which are passed on when the user supplied \code{mlmc_l} function is called #' @@ -44,31 +57,37 @@ #' @examples #' \dontrun{ #' # Example calls with realistic arguments -#' tst <- mlmc.test(opre_l, N=2000000, -#' L=5, N0=1000, -#' eps.v=c(0.005, 0.01, 0.02, 0.05, 0.1), -#' Lmin=2, Lmax=6, option=1) +#' # Financial options using an Euler-Maruyama discretisation +#' tst <- mlmc.test(opre_l, N = 2000000, +#' L = 5, N0 = 1000, +#' eps.v = c(0.005, 0.01, 0.02, 0.05, 0.1), +#' Lmin = 2, Lmax = 6, +#' option = 1) #' tst #' plot(tst) #' -#' tst <- mlmc.test(mcqmc06_l, N=20000, -#' L=8, N0=200, -#' eps.v=c(0.005, 0.01, 0.02, 0.05, 0.1), -#' Lmin=2, Lmax=10, option=1) +#' # Financial options using a Milstein discretisation +#' tst <- mlmc.test(mcqmc06_l, N = 20000, +#' L = 8, N0 = 200, +#' eps.v = c(0.005, 0.01, 0.02, 0.05, 0.1), +#' Lmin = 2, Lmax = 10, +#' option = 1) #' tst #' plot(tst) #' } #' #' # Toy versions for CRAN tests -#' tst <- mlmc.test(opre_l, N=10000, -#' L=5, N0=1000, -#' eps.v=c(0.025, 0.1), -#' Lmin=2, Lmax=6, option=1) +#' tst <- mlmc.test(opre_l, N = 10000, +#' L = 5, N0 = 1000, +#' eps.v = c(0.025, 0.1), +#' Lmin = 2, Lmax = 6, +#' option = 1) #' -#' tst <- mlmc.test(mcqmc06_l, N=10000, -#' L=8, N0=1000, -#' eps.v=c(0.025, 0.1), -#' Lmin=2, Lmax=10, option=1) +#' tst <- mlmc.test(mcqmc06_l, N = 10000, +#' L = 8, N0 = 1000, +#' eps.v = c(0.025, 0.1), +#' Lmin = 2, Lmax = 10, +#' option = 1) #' #' @importFrom stats lm #' @export @@ -122,6 +141,7 @@ mlmc.test <- function(mlmc_l, N, L, N0, eps.v, Lmin, Lmax, parallel = NA, silent del1 <- c(del1, sums[1]) del2 <- c(del2, sums[5]) var1 <- c(var1, sums[2]-sums[1]^2) + var1 <- pmax(var1, 1e-10) # fix for cases with var=0 var2 <- c(var2, sums[6]-sums[5]^2) var2 <- pmax(var2, 1e-10) # fix for cases with var=0 kur1 <- c(kur1, kurt) diff --git a/R/opre_l.R b/R/opre_l.R index 388341d..4baf712 100644 --- a/R/opre_l.R +++ b/R/opre_l.R @@ -4,18 +4,18 @@ sig_dW <- function(x, dW, h) { dW[2,] <- -0.5*dW[1,] + sqrt(0.75)*dW[2,] - c(sqrt(pmax(0,x[2,]))*x[1,]*dW[1,], - exp(-5*h)*0.25*sqrt(pmax(0,x[2,]))*dW[2,]); + rbind(sqrt(pmax(0,x[2,]))*x[1,]*dW[1,], + exp(-5*h)*0.25*sqrt(pmax(0,x[2,]))*dW[2,]); } mu <- function(x, h) { - m <- c(0.05*x[1,], - ((1-exp(-5*h))/h)*(0.04-x[2,])) + rbind(0.05*x[1,], + ((1-exp(-5*h))/h)*(0.04-x[2,])) } #' Financial options using an Euler-Maruyama discretisation #' -#' Financial options based on scalar geometric Brownian motion and Heston models, similar to Mike Giles' original 2008 Operations Research paper, using an Euler-Maruyama discretisation +#' Financial options based on scalar geometric Brownian motion and Heston models, similar to Mike Giles' original 2008 Operations Research paper, Giles (2008), using an Euler-Maruyama discretisation #' #' This function is based on GPL-2 'Matlab' code by Mike Giles. #' @@ -39,7 +39,7 @@ mu <- function(x, h) { #' @author Tigran Nagapetyan #' #' @references -#' M.B. Giles. Multilevel Monte Carlo path simulation. \emph{Operations Research}, 56(3):607-617, 2008. +#' Giles, M.B. (2008) 'Multilevel Monte Carlo Path Simulation', \emph{Operations Research}, 56(3), pp. 607–617. Available at: \url{https://doi.org/10.1287/opre.1070.0496}. #' #' @examples #' \dontrun{ @@ -53,41 +53,40 @@ mu <- function(x, h) { #' # -- the new MLMC driver is a little different #' # -- switch to X_0=100 instead of X_0=1 #' -#' M <- 4 # refinement cost factor #' N0 <- 1000 # initial samples on coarse levels #' Lmin <- 2 # minimum refinement level #' Lmax <- 6 # maximum refinement level #' #' test.res <- list() #' for(option in 1:5) { -#' if(option==1) { +#' if(option == 1) { #' cat("\n ---- Computing European call ---- \n") -#' N <- 2000000 # samples for convergence tests +#' N <- 1000000 # samples for convergence tests #' L <- 5 # levels for convergence tests #' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) -#' } else if(option==2) { +#' } else if(option == 2) { #' cat("\n ---- Computing Asian call ---- \n") -#' N <- 2000000 # samples for convergence tests +#' N <- 1000000 # samples for convergence tests #' L <- 5 # levels for convergence tests #' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) -#' } else if(option==3) { +#' } else if(option == 3) { #' cat("\n ---- Computing lookback call ---- \n") -#' N <- 2000000 # samples for convergence tests +#' N <- 1000000 # samples for convergence tests #' L <- 5 # levels for convergence tests #' Eps <- c(0.01, 0.02, 0.05, 0.1, 0.2) -#' } else if(option==4) { +#' } else if(option == 4) { #' cat("\n ---- Computing digital call ---- \n") -#' N <- 3000000 # samples for convergence tests +#' N <- 4000000 # samples for convergence tests #' L <- 5 # levels for convergence tests #' Eps <- c(0.02, 0.05, 0.1, 0.2, 0.5) -#' } else if(option==5) { +#' } else if(option == 5) { #' cat("\n ---- Computing Heston model ---- \n") #' N <- 2000000 # samples for convergence tests #' L <- 5 # levels for convergence tests #' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) #' } #' -#' test.res[[option]] <- mlmc.test(opre_l, M, N, L, N0, Eps, Lmin, Lmax, option=option) +#' test.res[[option]] <- mlmc.test(opre_l, N, L, N0, Eps, Lmin, Lmax, option = option) #' #' # print exact analytic value, based on S0=K #' T <- 1 @@ -95,18 +94,25 @@ mu <- function(x, h) { #' sig <- 0.2 #' K <- 100 #' +#' k <- 0.5*sig^2/r; #' d1 <- (r+0.5*sig^2)*T / (sig*sqrt(T)) #' d2 <- (r-0.5*sig^2)*T / (sig*sqrt(T)) #' -#' if(option==1) { +#' if(option == 1) { #' val <- K*( pnorm(d1) - exp(-r*T)*pnorm(d2) ) -#' cat(sprintf("\n Exact value: %f, MLMC value: %f \n", val, test.res[[option]]$P[1])) -#' } else if(option==3) { -#' k <- 0.5*sig^2/r +#' } else if(option == 2) { +#' val <- NA +#' } else if(option == 3) { #' val <- K*( pnorm(d1) - pnorm(-d1)*k - exp(-r*T)*(pnorm(d2) - pnorm(d2)*k) ) -#' cat(sprintf("\n Exact value: %f, MLMC value: %f \n", val, test.res[[option]]$P[1])) -#' } else if(option==4) { +#' } else if(option == 4) { #' val <- K*exp(-r*T)*pnorm(d2) +#' } else if(option == 5) { +#' val <- NA +#' } +#' +#' if(is.na(val)) { +#' cat(sprintf("\n Exact value unknown, MLMC value: %f \n", test.res[[option]]$P[1])) +#' } else { #' cat(sprintf("\n Exact value: %f, MLMC value: %f \n", val, test.res[[option]]$P[1])) #' } #' @@ -116,7 +122,7 @@ mu <- function(x, h) { #' } #' #' # The level sampler can be called directly to retrieve the relevant level sums: -#' opre_l(l=7, N=10, option=1) +#' opre_l(l = 7, N = 10, option = 1) #' #' @importFrom stats rnorm #' @export @@ -158,7 +164,7 @@ opre_l <- function(l, N, option) { dWf <- sqrt(hf)*rnorm(N2) Xf <- Xf + r*Xf*hf + sig*Xf*dWf Af <- Af + 0.5*hf*Xf - Mf <- min(Mf,Xf) + Mf <- pmin(Mf,Xf) } else { for (n in 1:nc){ dWc <- rep(0, N2) @@ -199,13 +205,13 @@ opre_l <- function(l, N, option) { Xc <- Xf if(l==0) { - dWf <- sqrt(hf)*rnorm(N2) + dWf <- sqrt(hf)*matrix(rnorm(2*N2), nrow=2, ncol=N2) Xf <- Xf + mu(Xf,hf)*hf + sig_dW(Xf,dWf,hf) } else { for(n in 1:nc) { dWc <- matrix(0, nrow=2, ncol=N2) for(m in 1:M) { - dWf <- sqrt(hf)*rnorm(N2) + dWf <- sqrt(hf)*matrix(rnorm(2*N2), nrow=2, ncol=N2) dWc <- dWc + dWf Xf <- Xf + mu(Xf,hf)*hf + sig_dW(Xf,dWf,hf) } diff --git a/R/plot.mlmc.test.R b/R/plot.mlmc.test.R index dff66e2..ac21a4e 100644 --- a/R/plot.mlmc.test.R +++ b/R/plot.mlmc.test.R @@ -2,26 +2,39 @@ #' #' Produces diagnostic plots on the result of an \code{\link{mlmc.test}} function call. #' -#' @param x an \code{mlmc.test} object as produced by a call to the \code{\link{mlmc.test}} function. -#' @param which a vector of strings specifying which plots to produce, or \code{"all"} to do all diagnostic plots. The options are: \describe{ -#' \item{\code{"var"} = \eqn{log_2} of variance against level;}{} -#' \item{\code{"mean"} = \eqn{log_2} of mean against level;}{} -#' \item{\code{"consis"} = consistency against level;}{} -#' \item{\code{"kurt"} = kurtosis against level;}{} -#' \item{\code{"Nl"} = \eqn{log_2} of number of samples against level;}{} -#' \item{\code{"cost"} = \eqn{log_10} of cost against \eqn{log_10} of epsilon (accuracy).}{} -#' } -#' @param cols the number of columns across to plot to override the default value. -#' @param ... additional arguments which are passed on to plotting functions. +#' Most of the plots produced are relatively self-explanatory. +#' However, the consistency and kurtosis plots in particular may require some background. +#' It is highly recommended to refer to Section 3.3 of Giles (2015), where the rationale for these diagnostic plots is addressed in full detail. +#' +#' @param x +#' an \code{mlmc.test} object as produced by a call to the \code{\link{mlmc.test}} function. +#' @param which +#' a vector of strings specifying which plots to produce, or \code{"all"} to do all diagnostic plots +#' The options are: \describe{ +#' \item{\code{"var"} = \eqn{\log_2} of variance against level;}{} +#' \item{\code{"mean"} = \eqn{\log_2} of the absolute value of the mean against level;}{} +#' \item{\code{"consis"} = consistency against level;}{} +#' \item{\code{"kurt"} = kurtosis against level;}{} +#' \item{\code{"Nl"} = \eqn{\log_2} of number of samples against level;}{} +#' \item{\code{"cost"} = \eqn{\log_{10}} of cost against \eqn{\log_{10}} of epsilon (accuracy).}{} +#' } +#' @param cols +#' the number of columns across to plot to override the default value. +#' @param ... +#' additional arguments which are passed on to plotting functions. #' #' @author Louis Aslett #' +#' @references +#' Giles, M.B. (2015) 'Multilevel Monte Carlo methods', \emph{Acta Numerica}, 24, pp. 259–328. Available at: \url{https://doi.org/10.1017/S096249291500001X}. +#' #' @examples #' \dontrun{ -#' tst <- mlmc.test(opre_l, N=2000000, -#' L=5, N0=1000, -#' eps.v=c(0.005, 0.01, 0.02, 0.05, 0.1), -#' Lmin=2, Lmax=6, option=1) +#' tst <- mlmc.test(opre_l, N = 2000000, +#' L = 5, N0 = 1000, +#' eps.v = c(0.005, 0.01, 0.02, 0.05, 0.1), +#' Lmin = 2, Lmax = 6, +#' option = 1) #' tst #' plot(tst) #' } @@ -47,12 +60,12 @@ plot.mlmc.test <- function(x, which="all", cols=NA, ...) { if("mean" %in% which) { p <- c(p, list( ggplot(data.frame(l=rep(0:x$L, 2), - mean=c(log2(x$del1), log2(x$del2)), + mean=c(log2(abs(x$del1)), log2(abs(x$del2))), Method=c(rep("MLMC", x$L+1), rep("MC", x$L+1)))) + geom_point(aes_string(x="l", y="mean", colour="Method")) + geom_line(aes_string(x="l", y="mean", colour="Method", linetype="Method")) + xlab("Level") + - ylab(expression(log[2](Mean))) + ylab(expression(log[2](group('|', phantom(), '')*Mean*group('|', phantom(), '')))) )) } if("consis" %in% which) { diff --git a/_pkgdown.yml b/_pkgdown.yml index 5a61829..f611dbe 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -1,6 +1,11 @@ url: https://mlmc.louisaslett.com/ template: bootstrap: 5 + includes: + in_header: | + + + navbar: structure: left: [intro, reference, articles, tutorials, news, giles] diff --git a/inst/mlmc_hex.png b/inst/mlmc_hex.png index 006cb8d..26291ac 100644 Binary files a/inst/mlmc_hex.png and b/inst/mlmc_hex.png differ diff --git a/man/figures/logo.png b/man/figures/logo.png index 8ff46e4..572b771 100644 Binary files a/man/figures/logo.png and b/man/figures/logo.png differ diff --git a/man/mcqmc06_l.Rd b/man/mcqmc06_l.Rd index 85bdc48..67a8496 100644 --- a/man/mcqmc06_l.Rd +++ b/man/mcqmc06_l.Rd @@ -22,7 +22,7 @@ The options are: }} } \description{ -Financial options based on scalar geometric Brownian motion, similar to Mike Giles' MCQMC06 paper, using a Milstein discretisation. +Financial options based on scalar geometric Brownian motion, similar to Mike Giles' MCQMC06 paper, Giles (2008), using a Milstein discretisation. } \details{ This function is based on GPL-2 C++ code by Mike Giles. @@ -38,41 +38,40 @@ This function is based on GPL-2 C++ code by Mike Giles. # -- different random number generation # -- switch to S_0=100 -M <- 2 # refinement cost factor N0 <- 200 # initial samples on coarse levels Lmin <- 2 # minimum refinement level Lmax <- 10 # maximum refinement level test.res <- list() for(option in 1:5) { - if(option==1) { + if(option == 1) { cat("\n ---- Computing European call ---- \n") N <- 20000 # samples for convergence tests L <- 8 # levels for convergence tests Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) - } else if(option==2) { + } else if(option == 2) { cat("\n ---- Computing Asian call ---- \n") N <- 20000 # samples for convergence tests L <- 8 # levels for convergence tests Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) - } else if(option==3) { + } else if(option == 3) { cat("\n ---- Computing lookback call ---- \n") N <- 20000 # samples for convergence tests L <- 10 # levels for convergence tests Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) - } else if(option==4) { + } else if(option == 4) { cat("\n ---- Computing digital call ---- \n") N <- 200000 # samples for convergence tests L <- 8 # levels for convergence tests Eps <- c(0.01, 0.02, 0.05, 0.1, 0.2) - } else if(option==5) { + } else if(option == 5) { cat("\n ---- Computing barrier call ---- \n") N <- 200000 # samples for convergence tests L <- 8 # levels for convergence tests Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) } - test.res[[option]] <- mlmc.test(mcqmc06_l, M, N, L, N0, Eps, Lmin, Lmax, option=option) + test.res[[option]] <- mlmc.test(mcqmc06_l, N, L, N0, Eps, Lmin, Lmax, option = option) # plot results plot(test.res[[option]]) @@ -80,11 +79,11 @@ for(option in 1:5) { } # The level sampler can be called directly to retrieve the relevant level sums: -mcqmc06_l(l=7, N=10, option=1) +mcqmc06_l(l = 7, N = 10, option = 1) } \references{ -M.B. Giles. 'Improved multilevel Monte Carlo convergence using the Milstein scheme', p.343-358 in \emph{Monte Carlo and Quasi-Monte Carlo Methods 2006}, Springer, 2007. +Giles, M. (2008) 'Improved Multilevel Monte Carlo Convergence using the Milstein Scheme', in A. Keller, S. Heinrich, and H. Niederreiter (eds) \emph{Monte Carlo and Quasi-Monte Carlo Methods 2006}. Berlin, Heidelberg: Springer, pp. 343–358. Available at: \url{https://doi.org/10.1007/978-3-540-74496-2_20}. } \author{ Louis Aslett diff --git a/man/mlmc.Rd b/man/mlmc.Rd index 6672b39..fc38e9e 100644 --- a/man/mlmc.Rd +++ b/man/mlmc.Rd @@ -34,11 +34,12 @@ Must be \eqn{> 0}.} The user supplied function should return a named list containing one element named \code{sums} and second named \code{cost}, where: \describe{ - \item{\code{sums}}{is a vector of length two \eqn{(\sum Y_i, \sum Y_i^2)} where \eqn{Y_i} are iid simulations with expectation \eqn{E[P_0]} when \eqn{l=0} and expectation \eqn{E[P_l-P_{l-1}]} when \eqn{l>0}.} - \item{\code{cost}}{is a scalar with the cost of the number of paths simulated.} + \item{\code{sums}}{is a vector of length two \eqn{\left(\sum Y_i, \sum Y_i^2\right)} where \eqn{Y_i} are iid simulations with expectation \eqn{E[P_0]} when \eqn{l=0} and expectation \eqn{E[P_l-P_{l-1}]} when \eqn{l>0}.} + \item{\code{cost}}{is a scalar with the total cost of the paths simulated. + For example, in the financial options samplers included in this package, this is calculated as \eqn{NM^l}, where \eqn{N} is the number of paths requested in the call to the user function \code{mlmc_l}, \eqn{M} is the refinement cost factor (\eqn{M=2} for \code{\link[=mcqmc06_l]{mcqmc06_l()}} and \eqn{M=4} for \code{\link[=opre_l]{opre_l()}}), and \eqn{l} is the level being sampled.} } - See the function (and source code of) \code{\link[=opre_l]{opre_l()}} in this package for an example of a user supplied level sampler.} + See the function (and source code of) \code{\link[=opre_l]{opre_l()}} and \code{\link[=mcqmc06_l]{mcqmc06_l()}} in this package for an example of user supplied level samplers.} \item{alpha}{the weak error, \eqn{O(2^{-\alpha l})}. Must be \eqn{> 0} if specified. @@ -60,7 +61,7 @@ If \code{NA} then \code{gamma} will be estimated.} A list containing: \describe{ \item{\code{P}}{The MLMC estimate;} \item{\code{Nl}}{A vector of the number of samples performed on each level;} - \item{\code{Cl}}{Cost of samples at each level.} + \item{\code{Cl}}{Per sample cost at each level.} } } \description{ @@ -72,8 +73,9 @@ The Multilevel Monte Carlo Method method originated in the works Giles (2008) an Consider a sequence \eqn{P_0, P_1, \ldots}, which approximates \eqn{P_L} with increasing accuracy, but also increasing cost, we have the simple identity \deqn{E[P_L] = E[P_0] + \sum_{l=1}^L E[P_l-P_{l-1}],} and therefore we can use the following unbiased estimator for \eqn{E[P_L]}, -\deqn{N_0^{-1} \sum_{n=1}^{N_0} P_0^{(0,n)} + \sum_{l=1}^L \{ N_l^{-1} \sum_{n=1}^{N_l} (P_l^{(l,n)} - P_{l-1}^{(l,n)}) \}} -with the inclusion of the level \eqn{l} in the superscript \eqn{(l,n)} indicating that the samples used at each level of correction are independent. +\deqn{N_0^{-1} \sum_{n=1}^{N_0} P_0^{(0,n)} + \sum_{l=1}^L \left\{ N_l^{-1} \sum_{n=1}^{N_l} \left(P_l^{(l,n)} - P_{l-1}^{(l,n)}\right) \right\}} +where \eqn{N_l} samples are produced at level \eqn{l}. +The inclusion of the level \eqn{l} in the superscript \eqn{(l,n)} indicates that the samples used at each level of correction are independent. Set \eqn{C_0}, and \eqn{V_0} to be the cost and variance of one sample of \eqn{P_0}, and \eqn{C_l, V_l} to be the cost and variance of one sample of \eqn{P_l - P_{l-1}}, then the overall cost and variance of the multilevel estimator is \eqn{\sum_{l=0}^L N_l C_l} and \eqn{\sum_{l=0}^L N_l^{-1} V_l}, respectively. @@ -84,17 +86,17 @@ For further information on multilevel Monte Carlo methods, see the webpage \url{ This function is based on GPL-2 'Matlab' code by Mike Giles. } \examples{ -mlmc(2, 6, 1000, 0.01, opre_l, gamma=1, option=1) +mlmc(2, 6, 1000, 0.01, opre_l, option = 1) -mlmc(2, 10, 1000, 0.01, mcqmc06_l, gamma=1, option=1) +mlmc(2, 10, 1000, 0.01, mcqmc06_l, option = 1) } \references{ -M.B. Giles. Multilevel Monte Carlo path simulation. \emph{Operations Research}, 56(3):607-617, 2008. +Giles, M.B. (2008) 'Multilevel Monte Carlo Path Simulation', \emph{Operations Research}, 56(3), pp. 607–617. Available at: \url{https://doi.org/10.1287/opre.1070.0496}. -M.B. Giles. Multilevel Monte Carlo methods. \emph{Acta Numerica}, 24:259-328, 2015. +Giles, M.B. (2015) 'Multilevel Monte Carlo methods', \emph{Acta Numerica}, 24, pp. 259–328. Available at: \url{https://doi.org/10.1017/S096249291500001X}. -S. Heinrich. Monte Carlo complexity of global solution of integral equations. \emph{Journal of Complexity}, 14(2):151-175, 1998. +Heinrich, S. (1998) 'Monte Carlo Complexity of Global Solution of Integral Equations', \emph{Journal of Complexity}, 14(2), pp. 151–175. Available at: \url{https://doi.org/10.1006/jcom.1998.0471}. } \author{ Louis Aslett diff --git a/man/mlmc.test.Rd b/man/mlmc.test.Rd index b9faf09..d438f17 100644 --- a/man/mlmc.test.Rd +++ b/man/mlmc.test.Rd @@ -18,7 +18,18 @@ mlmc.test( ) } \arguments{ -\item{mlmc_l}{a user supplied function which provides the estimate for level l} +\item{mlmc_l}{a user supplied function which provides the estimate for level \eqn{l}. + It must take at least two arguments, the first is the level number to be simulated and the second the number of paths. + Additional arguments can be taken if desired: all additional \code{...} arguments to this function are forwarded to the user defined \code{mlmc_l} function. + + The user supplied function should return a named list containing one element named \code{sums} and second named \code{cost}, where: + \describe{ + \item{\code{sums}}{is a vector of length two \eqn{\left(\sum Y_i, \sum Y_i^2\right)} where \eqn{Y_i} are iid simulations with expectation \eqn{E[P_0]} when \eqn{l=0} and expectation \eqn{E[P_l-P_{l-1}]} when \eqn{l>0}.} + \item{\code{cost}}{is a scalar with the total cost of the paths simulated. + For example, in the financial options samplers included in this package, this is calculated as \eqn{NM^l}, where \eqn{N} is the number of paths requested in the call to the user function \code{mlmc_l}, \eqn{M} is the refinement cost factor (\eqn{M=2} for \code{\link[=mcqmc06_l]{mcqmc06_l()}} and \eqn{M=4} for \code{\link[=opre_l]{opre_l()}}), and \eqn{l} is the level being sampled.} + } + + See the function (and source code of) \code{\link[=opre_l]{opre_l()}} and \code{\link[=mcqmc06_l]{mcqmc06_l()}} in this package for an example of user supplied level samplers.} \item{N}{number of samples to use in the tests} @@ -27,16 +38,18 @@ mlmc.test( \item{N0}{initial number of samples which are used for the first 3 levels and for any subsequent levels which are automatically added. Must be \eqn{> 0}.} -\item{eps.v}{a vector of all the target accuracies in the tests. +\item{eps.v}{a vector of one or more target accuracies for the tests. Must all be \eqn{> 0}.} \item{Lmin}{the minimum level of refinement. Must be \eqn{\ge 2}.} \item{Lmax}{the maximum level of refinement. -Must be \eqn{\ge} Lmin.} +Must be \eqn{\ge} \code{Lmin}.} -\item{parallel}{if an integer is supplied, R will fork \code{parallel} parallel processes an compute each level estimate in parallel.} +\item{parallel}{if an integer is supplied, R will fork \code{parallel} parallel processes. +This is done for the convergence tests section by splitting the \code{N} samples as evenly as possible across cores when sampling each level. +This is also done for the MLMC complexity tests by passing the \code{parallel} argument on to the \code{\link[=mlmc]{mlmc()}} driver when targeting each accuracy level in \code{eps}.} \item{silent}{set to TRUE to supress running output (identical output can still be printed by printing the return result)} @@ -57,31 +70,37 @@ This function is based on GPL-2 'Matlab' code by Mike Giles. \examples{ \dontrun{ # Example calls with realistic arguments -tst <- mlmc.test(opre_l, N=2000000, - L=5, N0=1000, - eps.v=c(0.005, 0.01, 0.02, 0.05, 0.1), - Lmin=2, Lmax=6, option=1) +# Financial options using an Euler-Maruyama discretisation +tst <- mlmc.test(opre_l, N = 2000000, + L = 5, N0 = 1000, + eps.v = c(0.005, 0.01, 0.02, 0.05, 0.1), + Lmin = 2, Lmax = 6, + option = 1) tst plot(tst) -tst <- mlmc.test(mcqmc06_l, N=20000, - L=8, N0=200, - eps.v=c(0.005, 0.01, 0.02, 0.05, 0.1), - Lmin=2, Lmax=10, option=1) +# Financial options using a Milstein discretisation +tst <- mlmc.test(mcqmc06_l, N = 20000, + L = 8, N0 = 200, + eps.v = c(0.005, 0.01, 0.02, 0.05, 0.1), + Lmin = 2, Lmax = 10, + option = 1) tst plot(tst) } # Toy versions for CRAN tests -tst <- mlmc.test(opre_l, N=10000, - L=5, N0=1000, - eps.v=c(0.025, 0.1), - Lmin=2, Lmax=6, option=1) - -tst <- mlmc.test(mcqmc06_l, N=10000, - L=8, N0=1000, - eps.v=c(0.025, 0.1), - Lmin=2, Lmax=10, option=1) +tst <- mlmc.test(opre_l, N = 10000, + L = 5, N0 = 1000, + eps.v = c(0.025, 0.1), + Lmin = 2, Lmax = 6, + option = 1) + +tst <- mlmc.test(mcqmc06_l, N = 10000, + L = 8, N0 = 1000, + eps.v = c(0.025, 0.1), + Lmin = 2, Lmax = 10, + option = 1) } \author{ diff --git a/man/opre_l.Rd b/man/opre_l.Rd index c9b2093..00d1f78 100644 --- a/man/opre_l.Rd +++ b/man/opre_l.Rd @@ -22,7 +22,7 @@ The options are: }} } \description{ -Financial options based on scalar geometric Brownian motion and Heston models, similar to Mike Giles' original 2008 Operations Research paper, using an Euler-Maruyama discretisation +Financial options based on scalar geometric Brownian motion and Heston models, similar to Mike Giles' original 2008 Operations Research paper, Giles (2008), using an Euler-Maruyama discretisation } \details{ This function is based on GPL-2 'Matlab' code by Mike Giles. @@ -39,41 +39,40 @@ This function is based on GPL-2 'Matlab' code by Mike Giles. # -- the new MLMC driver is a little different # -- switch to X_0=100 instead of X_0=1 -M <- 4 # refinement cost factor N0 <- 1000 # initial samples on coarse levels Lmin <- 2 # minimum refinement level Lmax <- 6 # maximum refinement level test.res <- list() for(option in 1:5) { - if(option==1) { + if(option == 1) { cat("\n ---- Computing European call ---- \n") - N <- 2000000 # samples for convergence tests + N <- 1000000 # samples for convergence tests L <- 5 # levels for convergence tests Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) - } else if(option==2) { + } else if(option == 2) { cat("\n ---- Computing Asian call ---- \n") - N <- 2000000 # samples for convergence tests + N <- 1000000 # samples for convergence tests L <- 5 # levels for convergence tests Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) - } else if(option==3) { + } else if(option == 3) { cat("\n ---- Computing lookback call ---- \n") - N <- 2000000 # samples for convergence tests + N <- 1000000 # samples for convergence tests L <- 5 # levels for convergence tests Eps <- c(0.01, 0.02, 0.05, 0.1, 0.2) - } else if(option==4) { + } else if(option == 4) { cat("\n ---- Computing digital call ---- \n") - N <- 3000000 # samples for convergence tests + N <- 4000000 # samples for convergence tests L <- 5 # levels for convergence tests Eps <- c(0.02, 0.05, 0.1, 0.2, 0.5) - } else if(option==5) { + } else if(option == 5) { cat("\n ---- Computing Heston model ---- \n") N <- 2000000 # samples for convergence tests L <- 5 # levels for convergence tests Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) } - test.res[[option]] <- mlmc.test(opre_l, M, N, L, N0, Eps, Lmin, Lmax, option=option) + test.res[[option]] <- mlmc.test(opre_l, N, L, N0, Eps, Lmin, Lmax, option = option) # print exact analytic value, based on S0=K T <- 1 @@ -81,18 +80,25 @@ for(option in 1:5) { sig <- 0.2 K <- 100 + k <- 0.5*sig^2/r; d1 <- (r+0.5*sig^2)*T / (sig*sqrt(T)) d2 <- (r-0.5*sig^2)*T / (sig*sqrt(T)) - if(option==1) { + if(option == 1) { val <- K*( pnorm(d1) - exp(-r*T)*pnorm(d2) ) - cat(sprintf("\n Exact value: \%f, MLMC value: \%f \n", val, test.res[[option]]$P[1])) - } else if(option==3) { - k <- 0.5*sig^2/r + } else if(option == 2) { + val <- NA + } else if(option == 3) { val <- K*( pnorm(d1) - pnorm(-d1)*k - exp(-r*T)*(pnorm(d2) - pnorm(d2)*k) ) - cat(sprintf("\n Exact value: \%f, MLMC value: \%f \n", val, test.res[[option]]$P[1])) - } else if(option==4) { + } else if(option == 4) { val <- K*exp(-r*T)*pnorm(d2) + } else if(option == 5) { + val <- NA + } + + if(is.na(val)) { + cat(sprintf("\n Exact value unknown, MLMC value: \%f \n", test.res[[option]]$P[1])) + } else { cat(sprintf("\n Exact value: \%f, MLMC value: \%f \n", val, test.res[[option]]$P[1])) } @@ -102,11 +108,11 @@ for(option in 1:5) { } # The level sampler can be called directly to retrieve the relevant level sums: -opre_l(l=7, N=10, option=1) +opre_l(l = 7, N = 10, option = 1) } \references{ -M.B. Giles. Multilevel Monte Carlo path simulation. \emph{Operations Research}, 56(3):607-617, 2008. +Giles, M.B. (2008) 'Multilevel Monte Carlo Path Simulation', \emph{Operations Research}, 56(3), pp. 607–617. Available at: \url{https://doi.org/10.1287/opre.1070.0496}. } \author{ Louis Aslett diff --git a/man/plot.mlmc.test.Rd b/man/plot.mlmc.test.Rd index 241bc83..f87a39d 100644 --- a/man/plot.mlmc.test.Rd +++ b/man/plot.mlmc.test.Rd @@ -9,13 +9,14 @@ \arguments{ \item{x}{an \code{mlmc.test} object as produced by a call to the \code{\link{mlmc.test}} function.} -\item{which}{a vector of strings specifying which plots to produce, or \code{"all"} to do all diagnostic plots. The options are: \describe{ - \item{\code{"var"} = \eqn{log_2} of variance against level;}{} - \item{\code{"mean"} = \eqn{log_2} of mean against level;}{} +\item{which}{a vector of strings specifying which plots to produce, or \code{"all"} to do all diagnostic plots +The options are: \describe{ + \item{\code{"var"} = \eqn{\log_2} of variance against level;}{} + \item{\code{"mean"} = \eqn{\log_2} of the absolute value of the mean against level;}{} \item{\code{"consis"} = consistency against level;}{} \item{\code{"kurt"} = kurtosis against level;}{} - \item{\code{"Nl"} = \eqn{log_2} of number of samples against level;}{} - \item{\code{"cost"} = \eqn{log_10} of cost against \eqn{log_10} of epsilon (accuracy).}{} + \item{\code{"Nl"} = \eqn{\log_2} of number of samples against level;}{} + \item{\code{"cost"} = \eqn{\log_{10}} of cost against \eqn{\log_{10}} of epsilon (accuracy).}{} }} \item{cols}{the number of columns across to plot to override the default value.} @@ -25,16 +26,25 @@ \description{ Produces diagnostic plots on the result of an \code{\link{mlmc.test}} function call. } +\details{ +Most of the plots produced are relatively self-explanatory. +However, the consistency and kurtosis plots in particular may require some background. +It is highly recommended to refer to Section 3.3 of Giles (2015), where the rationale for these diagnostic plots is addressed in full detail. +} \examples{ \dontrun{ -tst <- mlmc.test(opre_l, N=2000000, - L=5, N0=1000, - eps.v=c(0.005, 0.01, 0.02, 0.05, 0.1), - Lmin=2, Lmax=6, option=1) +tst <- mlmc.test(opre_l, N = 2000000, + L = 5, N0 = 1000, + eps.v = c(0.005, 0.01, 0.02, 0.05, 0.1), + Lmin = 2, Lmax = 6, + option = 1) tst plot(tst) } +} +\references{ +Giles, M.B. (2015) 'Multilevel Monte Carlo methods', \emph{Acta Numerica}, 24, pp. 259–328. Available at: \url{https://doi.org/10.1017/S096249291500001X}. } \author{ Louis Aslett diff --git a/src/mcqmc06_l.cpp b/src/mcqmc06_l.cpp index 083437b..7a6ab66 100644 --- a/src/mcqmc06_l.cpp +++ b/src/mcqmc06_l.cpp @@ -8,7 +8,7 @@ using namespace Rcpp; //' Financial options using a Milstein discretisation //' -//' Financial options based on scalar geometric Brownian motion, similar to Mike Giles' MCQMC06 paper, using a Milstein discretisation. +//' Financial options based on scalar geometric Brownian motion, similar to Mike Giles' MCQMC06 paper, Giles (2008), using a Milstein discretisation. //' //' This function is based on GPL-2 C++ code by Mike Giles. //' @@ -31,7 +31,7 @@ using namespace Rcpp; //' @author Mike Giles //' //' @references -//' M.B. Giles. 'Improved multilevel Monte Carlo convergence using the Milstein scheme', p.343-358 in \emph{Monte Carlo and Quasi-Monte Carlo Methods 2006}, Springer, 2007. +//' Giles, M. (2008) 'Improved Multilevel Monte Carlo Convergence using the Milstein Scheme', in A. Keller, S. Heinrich, and H. Niederreiter (eds) \emph{Monte Carlo and Quasi-Monte Carlo Methods 2006}. Berlin, Heidelberg: Springer, pp. 343–358. Available at: \url{https://doi.org/10.1007/978-3-540-74496-2_20}. //' //' @examples //' \dontrun{ @@ -44,41 +44,40 @@ using namespace Rcpp; //' # -- different random number generation //' # -- switch to S_0=100 //' -//' M <- 2 # refinement cost factor //' N0 <- 200 # initial samples on coarse levels //' Lmin <- 2 # minimum refinement level //' Lmax <- 10 # maximum refinement level //' //' test.res <- list() //' for(option in 1:5) { -//' if(option==1) { +//' if(option == 1) { //' cat("\n ---- Computing European call ---- \n") //' N <- 20000 # samples for convergence tests //' L <- 8 # levels for convergence tests //' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) -//' } else if(option==2) { +//' } else if(option == 2) { //' cat("\n ---- Computing Asian call ---- \n") //' N <- 20000 # samples for convergence tests //' L <- 8 # levels for convergence tests //' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) -//' } else if(option==3) { +//' } else if(option == 3) { //' cat("\n ---- Computing lookback call ---- \n") //' N <- 20000 # samples for convergence tests //' L <- 10 # levels for convergence tests //' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) -//' } else if(option==4) { +//' } else if(option == 4) { //' cat("\n ---- Computing digital call ---- \n") //' N <- 200000 # samples for convergence tests //' L <- 8 # levels for convergence tests //' Eps <- c(0.01, 0.02, 0.05, 0.1, 0.2) -//' } else if(option==5) { +//' } else if(option == 5) { //' cat("\n ---- Computing barrier call ---- \n") //' N <- 200000 # samples for convergence tests //' L <- 8 # levels for convergence tests //' Eps <- c(0.005, 0.01, 0.02, 0.05, 0.1) //' } //' -//' test.res[[option]] <- mlmc.test(mcqmc06_l, M, N, L, N0, Eps, Lmin, Lmax, option=option) +//' test.res[[option]] <- mlmc.test(mcqmc06_l, N, L, N0, Eps, Lmin, Lmax, option = option) //' //' # plot results //' plot(test.res[[option]]) @@ -86,7 +85,7 @@ using namespace Rcpp; //' } //' //' # The level sampler can be called directly to retrieve the relevant level sums: -//' mcqmc06_l(l=7, N=10, option=1) +//' mcqmc06_l(l = 7, N = 10, option = 1) //' //' @export // [[Rcpp::export]]