Skip to content

Commit

Permalink
Small bugfix, better documentation and style.
Browse files Browse the repository at this point in the history
  • Loading branch information
mhauru committed Jan 24, 2020
1 parent adf7097 commit 15a170d
Show file tree
Hide file tree
Showing 7 changed files with 255 additions and 152 deletions.
18 changes: 11 additions & 7 deletions demo/demo.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
# A script that optimizes a MERA for either the Ising or XXZ model, and computes some
# physical quantities from it. The MERAs built are stored on disk.

using ArgParse
using LinearAlgebra
using TensorKit
Expand All @@ -9,14 +12,14 @@ function parse_pars()
settings = ArgParseSettings(autofix_names=true)
@add_arg_table(settings
, "--model", arg_type=String, default="Ising"
, "--meratype", arg_type=String, default="binary"
, "--threads", arg_type=Int, default=1
, "--chis", arg_type=Vector{Int}, default=collect(2:4)
, "--meratype", arg_type=String, default="ternary"
, "--threads", arg_type=Int, default=1 # For BLAS parallelization
, "--chis", arg_type=Vector{Int}, default=collect(2:3) # Bond dimensions
, "--layers", arg_type=Int, default=3
, "--symmetry", arg_type=String, default="none"
, "--block_size", arg_type=Int, default=2
, "--h", arg_type=Float64, default=1.0
, "--Delta", arg_type=Float64, default=-0.5
, "--symmetry", arg_type=String, default="none" # "none" or "group"
, "--block_size", arg_type=Int, default=2 # Block two sites to start
, "--h", arg_type=Float64, default=1.0 # External field of Ising
, "--Delta", arg_type=Float64, default=-0.5 # Isotropicity in XXZ
, "--datafolder", arg_type=String, default="JLMdata"
)
pars = parse_args(ARGS, settings; as_symbols=true)
Expand All @@ -37,6 +40,7 @@ function main()
end
block_size = pars[:block_size]

# Three sets of parameters are used when optimizing the MERA:
# Used when determining which sector to give bond dimension to.
pars[:initial_opt_pars] = Dict(:densitymatrix_delta => 1e-5,
:maxiter => 10,
Expand Down
15 changes: 9 additions & 6 deletions demo/demo_refine.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
# Script that loads a previously optimized MERA from disk, and optimizes it further
# ("refines" it).

using ArgParse
using LinearAlgebra
using TensorKit
Expand All @@ -10,14 +13,14 @@ function parse_pars()
@add_arg_table(settings
, "--model", arg_type=String, default="Ising"
, "--meratype", arg_type=String, default="binary"
, "--threads", arg_type=Int, default=1
, "--chi", arg_type=Int, default=3
, "--threads", arg_type=Int, default=1 # For BLAS parallelization
, "--chi", arg_type=Int, default=3 # Bond dimension
, "--layers", arg_type=Int, default=3
, "--symmetry", arg_type=String, default="none"
, "--block_size", arg_type=Int, default=2
, "--reps", arg_type=Int, default=1000
, "--h", arg_type=Float64, default=1.0
, "--Delta", arg_type=Float64, default=-0.5
, "--symmetry", arg_type=String, default="none" # "none" or "group"
, "--block_size", arg_type=Int, default=2 # Block two sites to start
, "--h", arg_type=Float64, default=1.0 # External field of Ising
, "--Delta", arg_type=Float64, default=-0.5 # Isotropicity in XXZ
, "--datafolder", arg_type=String, default="JLMdata"
)
pars = parse_args(ARGS, settings; as_symbols=true)
Expand Down
29 changes: 16 additions & 13 deletions demo/demo_tools.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# Utilities for demo.jl and and demo_refine.jl.

module DemoTools

using LinearAlgebra
Expand All @@ -15,7 +17,8 @@ export get_optimized_mera
"""
Take a two-site operator `op` that defines the local term of a global operator, and block
sites together to form a new two-site operator for which each site corresponds to
`num_sites` sites of the original operator. `num_sites` should be a power of 2.
`num_sites` sites of the original operator, and which sums up to the same global operator.
`num_sites` should be a power of 2.
"""
function block_op(op::SquareTensorMap{2}, num_sites)
while num_sites > 1
Expand Down Expand Up @@ -43,7 +46,8 @@ end
"""
Take a one-site operator `op` that defines the local term of a global operator, and block
sites together to form a new one-site operator for which each site corresponds to
`num_sites` sites of the original operator. `num_sites` should be a power of 2.
`num_sites` sites of the original operator, and which sums up to the same global operator.
`num_sites` should be a power of 2.
"""
function block_op(op::SquareTensorMap{1}, num_sites)
while num_sites > 1
Expand Down Expand Up @@ -71,10 +75,10 @@ semidefinite. Return the normalized operator and the constant that was subtracte
"""
function normalize_H(H)
# TODO Switch to using an eigendecomposition?
D_max = norm(H)
bigeye = TensorMap(I, codomain(H) domain(H))
H = H - bigeye*D_max
return H, D_max
c = norm(H)
eye = TensorMap(I, codomain(H) domain(H))
H = H - eye*c
return H, c
end

"""
Expand Down Expand Up @@ -107,8 +111,8 @@ function build_H_XXZ(Delta=0.0; symmetry="none", block_size=1)
end
H = -(XXplusYY + Delta*ZZ)
H = block_op(H, block_size)
H, D_max = normalize_H(H)
return H, D_max
H, c = normalize_H(H)
return H, c
end

"""
Expand Down Expand Up @@ -155,13 +159,12 @@ function build_H_Ising(h=1.0; symmetry="none", block_size=1)
error("Unknown symmetry $symmetry")
end
H = block_op(H, block_size)
H, D_max = normalize_H(H)
return H, D_max
H, c = normalize_H(H)
return H, c
end

"""
Return the magnetization operator for the Ising model, possibly blocked over `block_size`
sites.
Return the magnetization operator for the Ising model, blocked over `block_size` sites.
"""
function build_magop(;block_size=1)
V =^2
Expand All @@ -176,7 +179,7 @@ end
Given the normalization and block_size constants used in creating a Hamiltonian, and the
expectation value of the normalized and blocked Hamiltonian, return the actual energy.
"""
normalize_energy(energy, D_max, block_size) = (energy + D_max)/block_size
normalize_energy(energy, c, block_size) = (energy + c)/block_size

# # # Functions for reading and writing to disk.

Expand Down
96 changes: 60 additions & 36 deletions src/binarylayer.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,23 @@
# BinaryLayer and BinaryMERA types, and methods thereof.
# To be `included` in MERA.jl.

# # # The core stuff

# Index numbering convention is as follows, where the physical indices are at the bottom:
# Disentangler:
# 3| 4|
# +------+
# | u |
# +------+
# 1| 2|
#
# Isometry:
# 3|
# +------+
# | w |
# +------+
# 1| 2|

# TODO We could parametrise this as BinaryLayer{T1, T2}, disentagler::T1, isometry::T2.
# Would this be good, because it increased type stability, or bad because it caused
# unnecessary recompilation?
Expand All @@ -13,7 +30,7 @@ BinaryMERA = GenericMERA{BinaryLayer}

Base.convert(::Type{BinaryLayer}, tuple::Tuple) = BinaryLayer(tuple...)

# Implement the iteration and indexing interfaces.
# Implement the iteration and indexing interfaces. Allows things like `u, w = layer`.
Base.iterate(layer::BinaryLayer) = (layer.disentangler, 1)
Base.iterate(layer::BinaryLayer, state) = state == 1 ? (layer.isometry, 2) : nothing
Base.eltype(::Type{BinaryLayer}) = TensorMap
Expand All @@ -27,14 +44,14 @@ function Base.getindex(layer::BinaryLayer, i)
end

Base.eltype(layer::BinaryLayer) = reduce(promote_type, map(eltype, layer))
Base.copy(layer::BinaryLayer) = BinaryLayer(map(deepcopy, layer)...)

"""
The ratio by which the number of sites changes when go down through this layer.
"""
scalefactor(::Type{BinaryMERA}) = 2

get_disentangler(m::BinaryMERA, depth) = get_layer(m, depth).disentangler

get_isometry(m::BinaryMERA, depth) = get_layer(m, depth).isometry

function set_disentangler!(m::BinaryMERA, u, depth; kwargs...)
Expand All @@ -49,37 +66,12 @@ end

causal_cone_width(::Type{BinaryLayer}) = 3

"""
Check the compatibility of the legs connecting the disentanglers and the isometries.
Return true/false.
"""
function space_invar_intralayer(layer::BinaryLayer)
u, w = layer
matching_bonds = [(space(u, 3)', space(w, 2)),
(space(u, 4)', space(w, 1))]
allmatch = all([==(pair...) for pair in matching_bonds])
return allmatch
end

"""
Check the compatibility of the legs connecting the isometries of the first layer to the
disentanglers of the layer above it. Return true/false.
"""
function space_invar_interlayer(layer::BinaryLayer, next_layer::BinaryLayer)
u, w = layer.disentangler, layer.isometry
unext, wnext = next_layer.disentangler, next_layer.isometry
matching_bonds = [(space(w, 3)', space(unext, 1)),
(space(w, 3)', space(unext, 2))]
allmatch = all([==(pair...) for pair in matching_bonds])
return allmatch
end

outputspace(layer::BinaryLayer) = space(layer.disentangler, 1)
inputspace(layer::BinaryLayer) = space(layer.isometry, 3)'

"""
Return a new layer where the isometries have been padded with zeros to change the top vector
space to be V_new.
Return a new layer where the isometries have been padded with zeros to change the input
(top) vector space to be V_new.
"""
function expand_inputspace(layer::BinaryLayer, V_new)
u, w = layer
Expand All @@ -89,7 +81,7 @@ end

"""
Return a new layer where the disentanglers and isometries have been padded with zeros to
change the bottom vector space to be V_new.
change the output (bottom) vector space to be V_new.
"""
function expand_outputspace(layer::BinaryLayer, V_new)
u, w = layer
Expand All @@ -107,16 +99,42 @@ If `random_disentangler=true`, the disentangler is also a random unitary, if `fa
(default), it is the identity.
"""
function randomlayer(::Type{BinaryLayer}, Vin, Vout; random_disentangler=false)
u = (random_disentangler ?
randomisometry(Vout Vout, Vout Vout)
: identitytensor(Vout Vout, Vout Vout))
ufunc = random_disentangler ? randomisometry : identitytensor
u = ufunc(Vout Vout, Vout Vout)
w = randomisometry(Vout Vout, Vin)
return BinaryLayer(u, w)
end

pseudoserialize(layer::BinaryLayer) = (repr(BinaryLayer), map(pseudoserialize, layer))
depseudoserialize(::Type{BinaryLayer}, data) = BinaryLayer([depseudoserialize(d...)
for d in data]...)
for d in data]...)

# # # Invariants

"""
Check the compatibility of the legs connecting the disentanglers and the isometries.
Return true/false.
"""
function space_invar_intralayer(layer::BinaryLayer)
u, w = layer
matching_bonds = [(space(u, 3)', space(w, 2)),
(space(u, 4)', space(w, 1))]
allmatch = all([==(pair...) for pair in matching_bonds])
return allmatch
end

"""
Check the compatibility of the legs connecting the isometries of the first layer to the
disentanglers of the layer above it. Return true/false.
"""
function space_invar_interlayer(layer::BinaryLayer, next_layer::BinaryLayer)
u, w = layer.disentangler, layer.isometry
unext, wnext = next_layer.disentangler, next_layer.isometry
matching_bonds = [(space(w, 3)', space(unext, 1)),
(space(w, 3)', space(unext, 2))]
allmatch = all([==(pair...) for pair in matching_bonds])
return allmatch
end

# # # Ascending and descending superoperators

Expand Down Expand Up @@ -158,12 +176,13 @@ end

# TODO Would there be a nice way of doing this where I wouldn't have to replicate all the
# network contractions? @ncon could do it, but Jutho's testing says it's significantly
# slower.
# slower. This is only used for diagonalizing in charge sectors, so having tensors with
# non-trivial charge would also solve this.
"""
Ascend a threesite `op` with an extra free leg from the bottom of the given layer to the
top.
"""
function ascend(op::TensorMap{S1,3,4,S2,T1,T2,T3}, layer::BinaryLayer, pos=:avg) where {S1, S2, T1, T2, T3}
function ascend(op::TensorMap{S1,3,4}, layer::BinaryLayer, pos=:avg) where {S1}
u, w = layer
u_dg = u'
w_dg = w'
Expand Down Expand Up @@ -246,6 +265,11 @@ end
"""
Loop over the tensors of the layer, optimizing each one in turn to minimize the expecation
value of `h`. `rho` is the density matrix right above this layer.
Three parameters are expected to be in the dictionary `pars`:
:layer_iters, for how many times to loop over the tensors within a layer,
:disentangler_iters, for how many times to loop over the disentangler,
:isometry_iters, for how many times to loop over the isometry.
"""
function minimize_expectation_layer(h, layer::BinaryLayer, rho, pars; do_disentanglers=true)
for i in 1:pars[:layer_iters]
Expand Down
Loading

0 comments on commit 15a170d

Please sign in to comment.