diff --git a/src/binarylayer.jl b/src/binarylayer.jl index 2d11413..7d643ef 100644 --- a/src/binarylayer.jl +++ b/src/binarylayer.jl @@ -235,6 +235,18 @@ end const BinaryOperator{S} = AbstractTensorMap{S,3,3} const ChargedBinaryOperator{S} = AbstractTensorMap{S,3,4} +function ascend(op::Union{BinaryOperator, ChargedBinaryOperator}, layer::BinaryLayer) + l = ascend_left(op, layer) + r = ascend_right(op, layer) + scaled_op = (l+r)/2 + return scaled_op +end + +ascend(op::Union{SquareTensorMap{1}, SquareTensorMap{2}}, layer::BinaryLayer) = + ascend(expand_support(op, causal_cone_width(BinaryLayer)), layer) + +# TODO Think about how to best remove the code duplication of having the separate methods +# for ordinary, charged, and double charged operators. function ascend_left(op::ChargedBinaryOperator, layer::BinaryLayer) u, w = layer @tensor( @@ -262,7 +274,7 @@ function ascend_right(op::ChargedBinaryOperator, layer::BinaryLayer) end # TODO Figure out how to deal with the extra charge legs in the case of anyonic tensors. -function ascend_left(op::BinaryOperator, layer::BinaryLayer{GradedSpace[FibonacciAnyon]}) +function ascend_left(op::BinaryOperator, layer::BinaryLayer) u, w = layer @tensor( scaled_op[-100 -200 -300; -400 -500 -600] := @@ -275,7 +287,7 @@ function ascend_left(op::BinaryOperator, layer::BinaryLayer{GradedSpace[Fibonacc return scaled_op end -function ascend_right(op::BinaryOperator, layer::BinaryLayer{GradedSpace[FibonacciAnyon]}) +function ascend_right(op::BinaryOperator, layer::BinaryLayer) u, w = layer @tensor( scaled_op[-100 -200 -300; -400 -500 -600] := @@ -288,23 +300,6 @@ function ascend_right(op::BinaryOperator, layer::BinaryLayer{GradedSpace[Fibonac return scaled_op end -function ascend(op::Union{BinaryOperator, ChargedBinaryOperator}, layer::BinaryLayer) - l = ascend_left(op, layer) - r = ascend_right(op, layer) - scaled_op = (l+r)/2 - return scaled_op -end - -# Turn any BinaryOperator into a ChargedBinaryOperator with trivial charge, and then back. -ascend_left(op::BinaryOperator, layer::BinaryLayer) = - remove_dummy_index(ascend_left(append_dummy_index(op), layer)) - -ascend_right(op::BinaryOperator, layer::BinaryLayer) = - remove_dummy_index(ascend_right(append_dummy_index(op), layer)) - -ascend(op::Union{SquareTensorMap{1}, SquareTensorMap{2}}, layer::BinaryLayer) = - ascend(expand_support(op, causal_cone_width(BinaryLayer)), layer) - function descend_left(rho::BinaryOperator, layer::BinaryLayer) u, w = layer @tensor( diff --git a/src/modifiedbinarylayer.jl b/src/modifiedbinarylayer.jl index 7d69736..c3eec49 100644 --- a/src/modifiedbinarylayer.jl +++ b/src/modifiedbinarylayer.jl @@ -262,6 +262,36 @@ end # # # Ascending and descending superoperators + +""" +Ascend a two-site `op` from the bottom of the given layer to the top. +""" +function ascend(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer + ) where {S1, T <: Union{SquareTensorMap{2}, + AbstractTensorMap{S1,2,3}}} + l = ascend_left(op, layer) + r = ascend_right(op, layer) + m = ascend_mid(op, layer) + b = ascend_between(op, layer) + scaled_op_mid = (l+r+m) / 2 + scaled_op_gap = b / 2 + scaled_op = ModifiedBinaryOp(scaled_op_mid, scaled_op_gap) + return scaled_op +end + +function ascend(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer + ) where {T <: SquareTensorMap{1}} + op = expand_support(op, causal_cone_width(ModifiedBinaryLayer)) + return ascend(op, layer) +end + +function ascend(op::AbstractTensorMap, layer::ModifiedBinaryLayer) + op = ModifiedBinaryOp(op) + return ascend(op, layer) +end + +# TODO Think about how to best remove the code duplication of having the separate methods +# for ordinary, charged, and double charged operators. function ascend_left(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer ) where T <: SquareTensorMap{2} u, wl, wr = layer @@ -324,21 +354,6 @@ function ascend_between(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer return scaled_op end -""" -Ascend a two-site `op` from the bottom of the given layer to the top. -""" -function ascend(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer - ) where T <: SquareTensorMap{2} - l = ascend_left(op, layer) - r = ascend_right(op, layer) - m = ascend_mid(op, layer) - b = ascend_between(op, layer) - scaled_op_mid = (l+r+m) / 2.0 - scaled_op_gap = b / 2.0 - scaled_op = ModifiedBinaryOp(scaled_op_mid, scaled_op_gap) - return scaled_op -end - function ascend_left(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer ) where {S1, T <: AbstractTensorMap{S1,2,3}} u, wl, wr = layer @@ -401,34 +416,6 @@ function ascend_between(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer return scaled_op end -function ascend(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer - ) where {S1, T <: AbstractTensorMap{S1,2,3}} - u, wl, wr = layer - op_mid, op_gap = op - l = ascend_left(op, layer) - r = ascend_right(op, layer) - m = ascend_mid(op, layer) - b = ascend_between(op, layer) - scaled_op_mid = (l+r+m) / 2.0 - scaled_op_gap = b / 2.0 - scaled_op = ModifiedBinaryOp(scaled_op_mid, scaled_op_gap) - return scaled_op -end - -function ascend(op::ModifiedBinaryOp{T}, layer::ModifiedBinaryLayer - ) where {T <: SquareTensorMap{1}} - op = expand_support(op, causal_cone_width(ModifiedBinaryLayer)) - return ascend(op, layer) -end - -function ascend(op::T, layer::ModifiedBinaryLayer - ) where {S1, T <: Union{SquareTensorMap{1}, - SquareTensorMap{2}, - AbstractTensorMap{S1,2,3}}} - op = ModifiedBinaryOp(op) - return ascend(op, layer) -end - function descend_left(rho::ModifiedBinaryOp, layer::ModifiedBinaryLayer) u, wl, wr = layer rho_mid, rho_gap = rho diff --git a/src/ternarylayer.jl b/src/ternarylayer.jl index 0ea6bea..7f0a0a6 100644 --- a/src/ternarylayer.jl +++ b/src/ternarylayer.jl @@ -242,6 +242,23 @@ const TernaryOperator{S} = AbstractTensorMap{S, 2, 2} const ChargedTernaryOperator{S} = AbstractTensorMap{S, 2, 3} const DoubleChargedTernaryOperator{S} = AbstractTensorMap{S, 2, 4} +function ascend(op::Union{TernaryOperator, + ChargedTernaryOperator, + DoubleChargedTernaryOperator}, + layer::TernaryLayer) where {S1} + u, w = layer + l = ascend_left(op, layer) + r = ascend_right(op, layer) + m = ascend_mid(op, layer) + scaled_op = (l+r+m)/3 + return scaled_op +end + +ascend(op::SquareTensorMap{1}, layer::TernaryLayer) = + ascend(expand_support(op, causal_cone_width(TernaryLayer)), layer) + +# TODO Think about how to best remove the code duplication of having the separate methods +# for ordinary, charged, and double charged operators. function ascend_left(op::ChargedTernaryOperator, layer::TernaryLayer) u, w = layer # Cost: 2X^8 + 2X^7 + 2X^6 @@ -284,32 +301,8 @@ function ascend_mid(op::ChargedTernaryOperator, layer::TernaryLayer) return scaled_op end -function ascend(op::Union{TernaryOperator, - ChargedTernaryOperator, - DoubleChargedTernaryOperator}, - layer::TernaryLayer) where {S1} - u, w = layer - l = ascend_left(op, layer) - r = ascend_right(op, layer) - m = ascend_mid(op, layer) - scaled_op = (l+r+m)/3 - return scaled_op -end - -ascend_left(op::TernaryOperator, layer::TernaryLayer) = - remove_dummy_index(ascend_left(append_dummy_index(op), layer)) - -ascend_right(op::TernaryOperator, layer::TernaryLayer) = - remove_dummy_index(ascend_right(append_dummy_index(op), layer)) - -ascend_mid(op::TernaryOperator, layer::TernaryLayer) = - remove_dummy_index(ascend_mid(append_dummy_index(op), layer)) - -ascend(op::SquareTensorMap{1}, layer::TernaryLayer) = - ascend(expand_support(op, causal_cone_width(TernaryLayer)), layer) - # TODO Figure out how to deal with the extra charge legs in the case of anyonic tensors. -function ascend_left(op::TernaryOperator, layer::TernaryLayer{GradedSpace[FibonacciAnyon]}) +function ascend_left(op::TernaryOperator, layer::TernaryLayer) u, w = layer # Cost: 2X^8 + 2X^7 + 2X^6 @tensor( @@ -323,7 +316,7 @@ function ascend_left(op::TernaryOperator, layer::TernaryLayer{GradedSpace[Fibona return scaled_op end -function ascend_right(op::TernaryOperator, layer::TernaryLayer{GradedSpace[FibonacciAnyon]}) +function ascend_right(op::TernaryOperator, layer::TernaryLayer) u, w = layer # Cost: 2X^8 + 2X^7 + 2X^6 @tensor( @@ -337,7 +330,7 @@ function ascend_right(op::TernaryOperator, layer::TernaryLayer{GradedSpace[Fibon return scaled_op end -function ascend_mid(op::TernaryOperator, layer::TernaryLayer{GradedSpace[FibonacciAnyon]}) +function ascend_mid(op::TernaryOperator, layer::TernaryLayer) u, w = layer # Cost: 6X^6 @tensor( @@ -351,8 +344,7 @@ function ascend_mid(op::TernaryOperator, layer::TernaryLayer{GradedSpace[Fibonac return scaled_op end -function ascend_left(op::DoubleChargedTernaryOperator, - layer::TernaryLayer{GradedSpace[FibonacciAnyon]}) +function ascend_left(op::DoubleChargedTernaryOperator, layer::TernaryLayer) u, w = layer # Cost: 2X^8 + 2X^7 + 2X^6 @tensor( @@ -373,8 +365,7 @@ function ascend_left(op::DoubleChargedTernaryOperator, return scaled_op end -function ascend_right(op::DoubleChargedTernaryOperator, - layer::TernaryLayer{GradedSpace[FibonacciAnyon]}) +function ascend_right(op::DoubleChargedTernaryOperator, layer::TernaryLayer) u, w = layer # Cost: 2X^8 + 2X^7 + 2X^6 @tensor( @@ -395,8 +386,7 @@ function ascend_right(op::DoubleChargedTernaryOperator, return scaled_op end -function ascend_mid(op::DoubleChargedTernaryOperator, - layer::TernaryLayer{GradedSpace[FibonacciAnyon]}) +function ascend_mid(op::DoubleChargedTernaryOperator, layer::TernaryLayer) u, w = layer # Cost: 6X^6 @tensor( diff --git a/test/runtests.jl b/test/runtests.jl index 6289df4..77930a5 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -14,6 +14,17 @@ function particle_number_operator(::Type{GradedSpace[FibonacciAnyon]}) return z end +function particle_number_operator(::Type{GradedSpace[IsingAnyon]}) + V = GradedSpace[IsingAnyon](IsingAnyon(:I) => 1, + IsingAnyon(:σ) => 1, + IsingAnyon(:ψ) => 1) + z = TensorMap(zeros, Float64, V ← V) + z.data[IsingAnyon(:I)] .= 0.0 + z.data[IsingAnyon(:σ)] .= 1.0 + z.data[IsingAnyon(:ψ)] .= 0.0 + return z +end + function particle_number_operator(::Type{Z2Space}) V = Z2Space(ℤ₂(0) => 1, ℤ₂(1) => 1) z = TensorMap(zeros, Float64, V ← V) @@ -38,6 +49,17 @@ function random_space(::Type{GradedSpace[FibonacciAnyon]}, dlow=2, dhigh=6) return V end +function random_space(::Type{GradedSpace[IsingAnyon]}, dlow=3, dhigh=6) + dtotal = rand(dlow:dhigh) + d0 = rand(1:dtotal-2) + d1 = rand(1:(dtotal-d0-1)) + d2 = dtotal - d0 - d1 + V = GradedSpace[IsingAnyon](IsingAnyon(:I) => d0, + IsingAnyon(:σ) => d1, + IsingAnyon(:ψ) => d2) + return V +end + function random_space(::Type{Z2Space}, dlow=2, dhigh=6) dtotal = rand(dlow:dhigh) d0 = rand(1:dtotal-1) @@ -520,7 +542,7 @@ function test_optimization(::Type{M}, ::Type{S}, method, precondition=false) whe expectation = expect(ham, m) @test abs(expectation + 1.0) < eps # TODO We used to check here that these entropies are small, but this doesn't hold for - # FibonacciAnyons anymore. Figure out what's a good check. + # anyons. Figure out what's a good check. entropies = densitymatrix_entropies(m) end @@ -613,7 +635,7 @@ end Random.seed!(1) # For reproducing the same tests again and again. meratypes = (ModifiedBinaryMERA, BinaryMERA, TernaryMERA) -spacetypes = (ComplexSpace, Z2Space, GradedSpace[FibonacciAnyon]) +spacetypes = (ComplexSpace, Z2Space, GradedSpace[FibonacciAnyon], GradedSpace[IsingAnyon]) # Run the tests on different MERAs and vector spaces. # Basics @@ -645,7 +667,9 @@ end end @testset "Removing symmetry" begin # This doesn't make sense for anyons. - test_with_all_types(test_remove_symmetry, meratypes, (ComplexSpace, Z2Space)) + nonanonic_spacetypes = (ST for ST in spacetypes + if BraidingStyle(sectortype(ST)) != Anyonic()) + test_with_all_types(test_remove_symmetry, meratypes, nonanonic_spacetypes) end @testset "Reset storage" begin test_with_all_types(test_reset_storage, meratypes, spacetypes)