Skip to content

Commit b94bd23

Browse files
committed
add support for grassmann optimization, refactor the sharedmps
1 parent 61cf8ef commit b94bd23

13 files changed

Lines changed: 246 additions & 67 deletions

File tree

Project.toml

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,25 @@
11
name = "MPSKitParallel"
22
uuid = "9e7a2049-d464-45e7-afa7-8ebdb5f1c98e"
3-
version = "1.0.0-DEV"
4-
authors = ["Andreas Feuerpfeil |andreas.feuerpfeil@uni-wuerzburg.de>"]
3+
version = "0.1.0"
4+
authors = ["<Andreas Feuerpfeil|andreas.feuerpfeil@gmail.com>"]
55

66
[deps]
7+
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
78
MPSKit = "bb1c41ca-d63c-52ed-829e-0820dda26502"
9+
MPSKitModels = "ca635005-6f8c-4cd1-b51d-8491250ef2ab"
810
MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
11+
OhMyThreads = "67456a42-1dca-4109-a031-0a68de7e3ad5"
912
TensorKit = "07d1fe3e-3e46-537d-9eac-e9e13d0d4cec"
10-
TypedDelegation = "bc724e2e-2423-57c2-8f10-788c81380fc8"
13+
TensorKitManifolds = "11fa318c-39cb-4a83-b1ed-cdc7ba1e3684"
1114

1215
[compat]
16+
MPI = "0.20.23"
1317
MPSKit = "0.13.8"
18+
MPSKitModels = "0.4.4"
1419
MacroTools = "0.5.16"
20+
OhMyThreads = "0.8.3"
1521
TensorKit = "0.15.2"
16-
TypedDelegation = "0.6.0"
22+
TensorKitManifolds = "0.7.3"
1723
julia = "1.6.7"
1824

1925
[extras]

examples/Ising.jl

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
using MPSKit
2+
using MPSKitModels
3+
using TensorKit
4+
5+
symmetry = TensorKit.SU2Irrep
6+
spin = 1
7+
J = 1
8+
chain = InfiniteChain(1)
9+
H = heisenberg_XXX(symmetry, chain; J, spin);
10+
physical_space = SU2Space(1 => 1);
11+
virtual_space_inf = Rep[SU₂](1 // 2 => 16, 3 // 2 => 16, 5 // 2 => 8, 7 // 2 => 4);
12+
ψ₀_inf = InfiniteMPS([physical_space], [virtual_space_inf]);
13+
ψ_inf, envs_inf, delta_inf = find_groundstate(ψ₀_inf, H; verbosity = 3);
14+
15+
16+
using MPSKitParallel
17+
using MPI
18+
H_mpi = MPIOperator(H);
19+
MPI.Init()
20+
ψ_infmpi, envs_infmpi, delta_infmpi = find_groundstate(ψ₀_inf, H_mpi; verbosity = 3);
21+
abs(dot(ψ_inf, ψ_infmpi))

src/MPIOperator/derivatives.jl

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,27 @@
1-
@forward_3_1_astype MPIOperator.parent MPSKit.C_hamiltonian, MPSKit.AC_hamiltonian, MPSKit.AC2_hamiltonian, MPSKit.C_projection, MPSKit.AC_projection, MPSKit.AC2_projection
1+
# This does not work, as this is not a more specific type than the generic constructors in MPSKit, so it will never be executed:
22

3-
## This takes ..._hamiltonian(site::Int, below, operator::MPIOperator, above, envs)=MPIOperator(MPSKit...._hamiltonian(site, below, parent(operator), above, envs))
3+
# @forward_3_1_astype MPIOperator.parent MPSKit.C_hamiltonian, MPSKit.AC_hamiltonian, MPSKit.AC2_hamiltonian, MPSKit.C_projection, MPSKit.AC_projection, MPSKit.AC2_projection
4+
5+
function MPSKit.C_hamiltonian(site::Int, below, operator::MPIOperator, above, envs)
6+
return MPIOperator(MPSKit.C_hamiltonian(site, below, parent(operator), above, envs))
7+
end
8+
9+
function MPSKit.AC_hamiltonian(site::Int, below, operator::MPIOperator, above, envs)
10+
return MPIOperator(MPSKit.AC_hamiltonian(site, below, parent(operator), above, envs))
11+
end
12+
13+
function MPSKit.AC2_hamiltonian(site::Int, below, operator::MPIOperator, above, envs)
14+
return MPIOperator(MPSKit.AC2_hamiltonian(site, below, parent(operator), above, envs))
15+
end
16+
17+
function MPSKit.C_projection(site::Int, below, operator::MPIOperator, above, envs)
18+
return MPIOperator(MPSKit.C_projection(site, below, parent(operator), above, envs))
19+
end
20+
21+
function MPSKit.AC_projection(site::Int, below, operator::MPIOperator, above, envs)
22+
return MPIOperator(MPSKit.AC_projection(site, below, parent(operator), above, envs))
23+
end
24+
25+
function MPSKit.AC2_projection(site::Int, below, operator::MPIOperator, above, envs)
26+
return MPIOperator(MPSKit.AC2_projection(site, below, parent(operator), above, envs))
27+
end

src/MPIOperator/mpioperator.jl

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,21 @@
11
## This shallow struct is used to indicate that each LazyMIPOperator should be evaluated on each rank and the result is to be reduced across all ranks using MPI.Allreduce
2-
struct MPIOperator{O,T}
2+
struct MPIOperator{O}
33
parent::O
4+
function MPIOperator(parent::O) where {O}
5+
if !MPI.Initialized()
6+
@warn "MPI is currently not initialized. Please initialize MPI by running \n `using MPI; MPI.Init()` \n before creating an MPIOperator." maxlog=1
7+
end
8+
return new{O}(parent)
9+
end
410
end
511

6-
function Base.parent(op::MPIOperator{O,T})::O where {O,T}
12+
function Base.parent(op::MPIOperator{O})::O where {O}
713
return op.parent
814
end
915

10-
function (Op::MPIOperator{O,T})(x::S) where {O,T,S}
11-
y_per_rank = parent(x)
12-
y = MPI.allreduce(y_per_rank, +, MPI.COMM_WORLD)
16+
function (Op::MPIOperator{O})(x::S) where {O,S}
17+
y_per_rank = parent(Op)(x)
18+
y = large_allreduce(y_per_rank, +, MPI.COMM_WORLD)
1319
return y
1420
end
1521

@@ -24,4 +30,6 @@ Base.show(io::IO, op::MPIOperator) = show(convert(IOContext, io), op)
2430
function Base.show(io::IOContext, op::MPIOperator)
2531
print(io, "MPIOperator wrapping:\n")
2632
show(io, parent(op))
27-
end
33+
end
34+
35+
@forward MPIOperator.parent Base.getindex

src/MPSKitParallel.jl

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ module MPSKitParallel
55
# utility:
66

77
# MPOs
8-
export LazyMPISum
8+
export MPIOperator
99

1010
# Imports
1111
# -------
@@ -14,7 +14,10 @@ using MPSKit
1414
using MPI
1515
using MacroTools
1616

17-
18-
import MPSKit: environments
17+
import MPSKit: environments, AbstractMPSEnvironments, InfiniteEnvironments
18+
import MPSKit: C_hamiltonian, AC_hamiltonian, AC2_hamiltonian, C_projection, AC_projection, AC2_projection
1919
import MPSKit: C_hamiltonian, AC_hamiltonian, AC2_hamiltonian, C_projection, AC_projection, AC2_projection
20+
21+
include("includes.jl")
22+
2023
end

src/SharedMPS/mpipropertywrapper.jl

Lines changed: 0 additions & 21 deletions
This file was deleted.

src/SharedMPS/sharedmps.jl

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,25 @@
1-
function Base.getproperty::InfiniteMPS, prop::Symbol)
2-
return MPIPropertyWrapper(getfield(ψ, prop))
1+
macro forward_setindex_sync(ex)
2+
if !(isa(ex, Expr) && ex.head == :.)
3+
error("Syntax: @forward_setindex_sync Type.field")
4+
end
5+
Texpr = ex.args[1]
6+
field = ex.args[2]
7+
8+
# Remove the outer esc() and only escape what needs escaping
9+
body = quote
10+
function Base.setindex!(obj::$(esc(Texpr)), A, i::Int)
11+
if MPI.Initialized()
12+
A = MPI.bcast(A, MPI.COMM_WORLD) ## TODO: Write own chunked version
13+
println("We have bcasted!")
14+
end
15+
return setindex!(getfield(obj, $(QuoteNode(field))), A, i)
16+
end
17+
end
18+
19+
return body # Don't wrap the whole thing in esc()
320
end
421

5-
## Issue: Base.getproperty is already overloaded for MultilineMPS, QP, MultilineQP, WindowMPS, FiniteMPS
22+
@forward_setindex_sync InfiniteMPS.AL
23+
@forward_setindex_sync InfiniteMPS.AR
24+
@forward_setindex_sync InfiniteMPS.AC
25+
@forward_setindex_sync InfiniteMPS.C

src/algorithms/expval.jl

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
## TODO: It would be cleaner to instead clean up MPSKit.jl by using the correct constructors for AC_Hamiltonian etc...
2+
3+
function MPSKit.expectation_value::InfiniteMPS, H::MPIOperator{<:InfiniteMPOHamiltonian},
4+
envs::AbstractMPSEnvironments = environments(ψ, H))
5+
res = MPSKit.expectation_value(ψ, parent(H), envs)
6+
return MPI.Allreduce(res, +, MPI.COMM_WORLD)
7+
end

src/algorithms/grassmann.jl

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
## Here, we overload some functions of the GrassmannMPS local module in MPSKit.jl
2+
3+
using MPSKit: AbstractMPSEnvironments, InfiniteEnvironments, MultilineEnvironments, AC_hamiltonian, recalculate!
4+
using TensorKit
5+
using OhMyThreads
6+
import TensorKitManifolds.Grassmann
7+
import MPSKit.GrassmannMPS: rmul
8+
function MPSKit.GrassmannMPS.fg(
9+
state::FiniteMPS, operator::MPIOperator{O},
10+
envs::AbstractMPSEnvironments = environments(state, operator)
11+
) where {O <: FiniteMPOHamiltonian}
12+
f = expectation_value(state, operator, envs)
13+
isapprox(imag(f), 0; atol = eps(abs(f))^(3 / 4)) || @warn "MPO might not be Hermitian: $f"
14+
gs = map(1:length(state)) do i
15+
AC′ = AC_hamiltonian(i, state, operator, state, envs) * state.AC[i]
16+
g = Grassmann.project(AC′, state.AL[i])
17+
return rmul(g, state.C[i]')
18+
end
19+
return real(f), gs
20+
end
21+
function MPSKit.GrassmannMPS.fg(
22+
state::InfiniteMPS, operator::MPIOperator{O},
23+
envs::AbstractMPSEnvironments = environments(state, operator)
24+
) where {O <: InfiniteMPOHamiltonian}
25+
recalculate!(envs, state, operator, state)
26+
f = expectation_value(state, operator, envs)
27+
isapprox(imag(f), 0; atol = eps(abs(f))^(3 / 4)) || @warn "MPO might not be Hermitian: $f"
28+
29+
A = Core.Compiler.return_type(Grassmann.project, Tuple{eltype(state), eltype(state)})
30+
gs = Vector{A}(undef, length(state))
31+
tmap!(gs, 1:length(state); scheduler = MPSKit.Defaults.scheduler[]) do i
32+
AC′ = AC_hamiltonian(i, state, operator, state, envs) * state.AC[i]
33+
g = Grassmann.project(AC′, state.AL[i])
34+
return rmul(g, state.C[i]')
35+
end
36+
return real(f), gs
37+
end
38+
function MPSKit.GrassmannMPS.fg(
39+
state::InfiniteMPS, operator::MPIOperator{O},
40+
envs::AbstractMPSEnvironments = environments(state, operator)
41+
) where {O <: InfiniteMPO}
42+
recalculate!(envs, state, operator, state)
43+
f = expectation_value(state, operator, envs)
44+
isapprox(imag(f), 0; atol = eps(abs(f))^(3 / 4)) || @warn "MPO might not be Hermitian: $f"
45+
46+
A = Core.Compiler.return_type(Grassmann.project, Tuple{eltype(state), eltype(state)})
47+
gs = Vector{A}(undef, length(state))
48+
tmap!(gs, eachindex(state); scheduler = MPSKit.Defaults.scheduler[]) do i
49+
AC′ = AC_hamiltonian(i, state, operator, state, envs) * state.AC[i]
50+
g = rmul!(Grassmann.project(AC′, state.AL[i]), -inv(f))
51+
return rmul(g, state.C[i]')
52+
end
53+
return -log(real(f)), gs
54+
end
55+
# function MPSKit.GrassmannMPS.fg(
56+
# state::MultilineMPS, operator::MultilineMPO,
57+
# envs::MultilineEnvironments = environments(state, operator)
58+
# )
59+
# @assert length(state) == 1 "not implemented"
60+
# recalculate!(envs, state, operator, state)
61+
# f = expectation_value(state, operator, envs)
62+
# isapprox(imag(f), 0; atol = eps(abs(f))^(3 / 4)) || @warn "MPO might not be Hermitian: $f"
63+
64+
# A = Core.Compiler.return_type(Grassmann.project, Tuple{eltype(state), eltype(state)})
65+
# gs = Matrix{A}(undef, size(state))
66+
# tforeach(eachindex(state); scheduler = MPSKit.Defaults.scheduler[]) do i
67+
# AC′ = AC_hamiltonian(i, state, operator, state, envs) * state.AC[i]
68+
# g = rmul!(Grassmann.project(AC′, state.AL[i]), -inv(f))
69+
# gs[i] = rmul(g, state.C[i]')
70+
# return nothing
71+
# end
72+
# return -log(real(f)), gs
73+
# end

src/includes.jl

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,13 @@
11
include("utility/forward.jl")
2-
2+
include("multiprocessing/mpi/helper.jl")
3+
include("multiprocessing/mpi/mpi_buffers.jl")
34

45
include("MPIOperator/mpioperator.jl")
56
include("MPIOperator/derivatives.jl")
67
include("MPIOperator/environments.jl")
78
include("MPIOperator/ortho.jl")
89
include("MPIOperator/transfermatrix.jl")
10+
include("algorithms/expval.jl")
911

10-
include("multiprocessing/mpi/helper.jl")
12+
include("SharedMPS/mpipropertywrapper.jl")
13+
include("SharedMPS/sharedmps.jl")

0 commit comments

Comments
 (0)