Skip to content

Commit

Permalink
Support and tests for IsingAnyons
Browse files Browse the repository at this point in the history
  • Loading branch information
mhauru committed Sep 30, 2020
1 parent ba473a2 commit d355e80
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 98 deletions.
33 changes: 14 additions & 19 deletions src/binarylayer.jl
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,18 @@ end
const BinaryOperator{S} = AbstractTensorMap{S,3,3}
const ChargedBinaryOperator{S} = AbstractTensorMap{S,3,4}

function ascend(op::Union{BinaryOperator, ChargedBinaryOperator}, layer::BinaryLayer)
l = ascend_left(op, layer)
r = ascend_right(op, layer)
scaled_op = (l+r)/2
return scaled_op
end

ascend(op::Union{SquareTensorMap{1}, SquareTensorMap{2}}, layer::BinaryLayer) =
ascend(expand_support(op, causal_cone_width(BinaryLayer)), layer)

# TODO Think about how to best remove the code duplication of having the separate methods
# for ordinary, charged, and double charged operators.
function ascend_left(op::ChargedBinaryOperator, layer::BinaryLayer)
u, w = layer
@tensor(
Expand Down Expand Up @@ -262,7 +274,7 @@ function ascend_right(op::ChargedBinaryOperator, layer::BinaryLayer)
end

# TODO Figure out how to deal with the extra charge legs in the case of anyonic tensors.
function ascend_left(op::BinaryOperator, layer::BinaryLayer{GradedSpace[FibonacciAnyon]})
function ascend_left(op::BinaryOperator, layer::BinaryLayer)
u, w = layer
@tensor(
scaled_op[-100 -200 -300; -400 -500 -600] :=
Expand All @@ -275,7 +287,7 @@ function ascend_left(op::BinaryOperator, layer::BinaryLayer{GradedSpace[Fibonacc
return scaled_op
end

function ascend_right(op::BinaryOperator, layer::BinaryLayer{GradedSpace[FibonacciAnyon]})
function ascend_right(op::BinaryOperator, layer::BinaryLayer)
u, w = layer
@tensor(
scaled_op[-100 -200 -300; -400 -500 -600] :=
Expand All @@ -288,23 +300,6 @@ function ascend_right(op::BinaryOperator, layer::BinaryLayer{GradedSpace[Fibonac
return scaled_op
end

function ascend(op::Union{BinaryOperator, ChargedBinaryOperator}, layer::BinaryLayer)
l = ascend_left(op, layer)
r = ascend_right(op, layer)
scaled_op = (l+r)/2
return scaled_op
end

# Turn any BinaryOperator into a ChargedBinaryOperator with trivial charge, and then back.
ascend_left(op::BinaryOperator, layer::BinaryLayer) =
remove_dummy_index(ascend_left(append_dummy_index(op), layer))

ascend_right(op::BinaryOperator, layer::BinaryLayer) =
remove_dummy_index(ascend_right(append_dummy_index(op), layer))

ascend(op::Union{SquareTensorMap{1}, SquareTensorMap{2}}, layer::BinaryLayer) =
ascend(expand_support(op, causal_cone_width(BinaryLayer)), layer)

function descend_left(rho::BinaryOperator, layer::BinaryLayer)
u, w = layer
@tensor(
Expand Down
73 changes: 30 additions & 43 deletions src/modifiedbinarylayer.jl
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,36 @@ end

# # # Ascending and descending superoperators


"""
Ascend a two-site `op` from the bottom of the given layer to the top.
"""
function ascend(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer
) where {S1, T <: Union{SquareTensorMap{2},
AbstractTensorMap{S1,2,3}}}
l = ascend_left(op, layer)
r = ascend_right(op, layer)
m = ascend_mid(op, layer)
b = ascend_between(op, layer)
scaled_op_mid = (l+r+m) / 2
scaled_op_gap = b / 2
scaled_op = ModifiedBinaryOp(scaled_op_mid, scaled_op_gap)
return scaled_op
end

function ascend(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer
) where {T <: SquareTensorMap{1}}
op = expand_support(op, causal_cone_width(ModifiedBinaryLayer))
return ascend(op, layer)
end

function ascend(op::AbstractTensorMap, layer::ModifiedBinaryLayer)
op = ModifiedBinaryOp(op)
return ascend(op, layer)
end

# TODO Think about how to best remove the code duplication of having the separate methods
# for ordinary, charged, and double charged operators.
function ascend_left(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer
) where T <: SquareTensorMap{2}
u, wl, wr = layer
Expand Down Expand Up @@ -324,21 +354,6 @@ function ascend_between(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer
return scaled_op
end

"""
Ascend a two-site `op` from the bottom of the given layer to the top.
"""
function ascend(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer
) where T <: SquareTensorMap{2}
l = ascend_left(op, layer)
r = ascend_right(op, layer)
m = ascend_mid(op, layer)
b = ascend_between(op, layer)
scaled_op_mid = (l+r+m) / 2.0
scaled_op_gap = b / 2.0
scaled_op = ModifiedBinaryOp(scaled_op_mid, scaled_op_gap)
return scaled_op
end

function ascend_left(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer
) where {S1, T <: AbstractTensorMap{S1,2,3}}
u, wl, wr = layer
Expand Down Expand Up @@ -401,34 +416,6 @@ function ascend_between(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer
return scaled_op
end

function ascend(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer
) where {S1, T <: AbstractTensorMap{S1,2,3}}
u, wl, wr = layer
op_mid, op_gap = op
l = ascend_left(op, layer)
r = ascend_right(op, layer)
m = ascend_mid(op, layer)
b = ascend_between(op, layer)
scaled_op_mid = (l+r+m) / 2.0
scaled_op_gap = b / 2.0
scaled_op = ModifiedBinaryOp(scaled_op_mid, scaled_op_gap)
return scaled_op
end

function ascend(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer
) where {T <: SquareTensorMap{1}}
op = expand_support(op, causal_cone_width(ModifiedBinaryLayer))
return ascend(op, layer)
end

function ascend(op::T, layer::ModifiedBinaryLayer
) where {S1, T <: Union{SquareTensorMap{1},
SquareTensorMap{2},
AbstractTensorMap{S1,2,3}}}
op = ModifiedBinaryOp(op)
return ascend(op, layer)
end

function descend_left(rho::ModifiedBinaryOp, layer::ModifiedBinaryLayer)
u, wl, wr = layer
rho_mid, rho_gap = rho
Expand Down
56 changes: 23 additions & 33 deletions src/ternarylayer.jl
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,23 @@ const TernaryOperator{S} = AbstractTensorMap{S, 2, 2}
const ChargedTernaryOperator{S} = AbstractTensorMap{S, 2, 3}
const DoubleChargedTernaryOperator{S} = AbstractTensorMap{S, 2, 4}

function ascend(op::Union{TernaryOperator,
ChargedTernaryOperator,
DoubleChargedTernaryOperator},
layer::TernaryLayer) where {S1}
u, w = layer
l = ascend_left(op, layer)
r = ascend_right(op, layer)
m = ascend_mid(op, layer)
scaled_op = (l+r+m)/3
return scaled_op
end

ascend(op::SquareTensorMap{1}, layer::TernaryLayer) =
ascend(expand_support(op, causal_cone_width(TernaryLayer)), layer)

# TODO Think about how to best remove the code duplication of having the separate methods
# for ordinary, charged, and double charged operators.
function ascend_left(op::ChargedTernaryOperator, layer::TernaryLayer)
u, w = layer
# Cost: 2X^8 + 2X^7 + 2X^6
Expand Down Expand Up @@ -284,32 +301,8 @@ function ascend_mid(op::ChargedTernaryOperator, layer::TernaryLayer)
return scaled_op
end

function ascend(op::Union{TernaryOperator,
ChargedTernaryOperator,
DoubleChargedTernaryOperator},
layer::TernaryLayer) where {S1}
u, w = layer
l = ascend_left(op, layer)
r = ascend_right(op, layer)
m = ascend_mid(op, layer)
scaled_op = (l+r+m)/3
return scaled_op
end

ascend_left(op::TernaryOperator, layer::TernaryLayer) =
remove_dummy_index(ascend_left(append_dummy_index(op), layer))

ascend_right(op::TernaryOperator, layer::TernaryLayer) =
remove_dummy_index(ascend_right(append_dummy_index(op), layer))

ascend_mid(op::TernaryOperator, layer::TernaryLayer) =
remove_dummy_index(ascend_mid(append_dummy_index(op), layer))

ascend(op::SquareTensorMap{1}, layer::TernaryLayer) =
ascend(expand_support(op, causal_cone_width(TernaryLayer)), layer)

# TODO Figure out how to deal with the extra charge legs in the case of anyonic tensors.
function ascend_left(op::TernaryOperator, layer::TernaryLayer{GradedSpace[FibonacciAnyon]})
function ascend_left(op::TernaryOperator, layer::TernaryLayer)
u, w = layer
# Cost: 2X^8 + 2X^7 + 2X^6
@tensor(
Expand All @@ -323,7 +316,7 @@ function ascend_left(op::TernaryOperator, layer::TernaryLayer{GradedSpace[Fibona
return scaled_op
end

function ascend_right(op::TernaryOperator, layer::TernaryLayer{GradedSpace[FibonacciAnyon]})
function ascend_right(op::TernaryOperator, layer::TernaryLayer)
u, w = layer
# Cost: 2X^8 + 2X^7 + 2X^6
@tensor(
Expand All @@ -337,7 +330,7 @@ function ascend_right(op::TernaryOperator, layer::TernaryLayer{GradedSpace[Fibon
return scaled_op
end

function ascend_mid(op::TernaryOperator, layer::TernaryLayer{GradedSpace[FibonacciAnyon]})
function ascend_mid(op::TernaryOperator, layer::TernaryLayer)
u, w = layer
# Cost: 6X^6
@tensor(
Expand All @@ -351,8 +344,7 @@ function ascend_mid(op::TernaryOperator, layer::TernaryLayer{GradedSpace[Fibonac
return scaled_op
end

function ascend_left(op::DoubleChargedTernaryOperator,
layer::TernaryLayer{GradedSpace[FibonacciAnyon]})
function ascend_left(op::DoubleChargedTernaryOperator, layer::TernaryLayer)
u, w = layer
# Cost: 2X^8 + 2X^7 + 2X^6
@tensor(
Expand All @@ -373,8 +365,7 @@ function ascend_left(op::DoubleChargedTernaryOperator,
return scaled_op
end

function ascend_right(op::DoubleChargedTernaryOperator,
layer::TernaryLayer{GradedSpace[FibonacciAnyon]})
function ascend_right(op::DoubleChargedTernaryOperator, layer::TernaryLayer)
u, w = layer
# Cost: 2X^8 + 2X^7 + 2X^6
@tensor(
Expand All @@ -395,8 +386,7 @@ function ascend_right(op::DoubleChargedTernaryOperator,
return scaled_op
end

function ascend_mid(op::DoubleChargedTernaryOperator,
layer::TernaryLayer{GradedSpace[FibonacciAnyon]})
function ascend_mid(op::DoubleChargedTernaryOperator, layer::TernaryLayer)
u, w = layer
# Cost: 6X^6
@tensor(
Expand Down
30 changes: 27 additions & 3 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,17 @@ function particle_number_operator(::Type{GradedSpace[FibonacciAnyon]})
return z
end

function particle_number_operator(::Type{GradedSpace[IsingAnyon]})
V = GradedSpace[IsingAnyon](IsingAnyon(:I) => 1,
IsingAnyon() => 1,
IsingAnyon() => 1)
z = TensorMap(zeros, Float64, V V)
z.data[IsingAnyon(:I)] .= 0.0
z.data[IsingAnyon()] .= 1.0
z.data[IsingAnyon()] .= 0.0
return z
end

function particle_number_operator(::Type{Z2Space})
V = Z2Space(ℤ₂(0) => 1, ℤ₂(1) => 1)
z = TensorMap(zeros, Float64, V V)
Expand All @@ -38,6 +49,17 @@ function random_space(::Type{GradedSpace[FibonacciAnyon]}, dlow=2, dhigh=6)
return V
end

function random_space(::Type{GradedSpace[IsingAnyon]}, dlow=3, dhigh=6)
dtotal = rand(dlow:dhigh)
d0 = rand(1:dtotal-2)
d1 = rand(1:(dtotal-d0-1))
d2 = dtotal - d0 - d1
V = GradedSpace[IsingAnyon](IsingAnyon(:I) => d0,
IsingAnyon() => d1,
IsingAnyon() => d2)
return V
end

function random_space(::Type{Z2Space}, dlow=2, dhigh=6)
dtotal = rand(dlow:dhigh)
d0 = rand(1:dtotal-1)
Expand Down Expand Up @@ -520,7 +542,7 @@ function test_optimization(::Type{M}, ::Type{S}, method, precondition=false) whe
expectation = expect(ham, m)
@test abs(expectation + 1.0) < eps
# TODO We used to check here that these entropies are small, but this doesn't hold for
# FibonacciAnyons anymore. Figure out what's a good check.
# anyons. Figure out what's a good check.
entropies = densitymatrix_entropies(m)
end

Expand Down Expand Up @@ -613,7 +635,7 @@ end

Random.seed!(1) # For reproducing the same tests again and again.
meratypes = (ModifiedBinaryMERA, BinaryMERA, TernaryMERA)
spacetypes = (ComplexSpace, Z2Space, GradedSpace[FibonacciAnyon])
spacetypes = (ComplexSpace, Z2Space, GradedSpace[FibonacciAnyon], GradedSpace[IsingAnyon])

# Run the tests on different MERAs and vector spaces.
# Basics
Expand Down Expand Up @@ -645,7 +667,9 @@ end
end
@testset "Removing symmetry" begin
# This doesn't make sense for anyons.
test_with_all_types(test_remove_symmetry, meratypes, (ComplexSpace, Z2Space))
nonanonic_spacetypes = (ST for ST in spacetypes
if BraidingStyle(sectortype(ST)) != Anyonic())
test_with_all_types(test_remove_symmetry, meratypes, nonanonic_spacetypes)
end
@testset "Reset storage" begin
test_with_all_types(test_reset_storage, meratypes, spacetypes)
Expand Down

0 comments on commit d355e80

Please sign in to comment.