Skip to content

Commit 450f8cf

Browse files
committed
added codes
1 parent ea458f7 commit 450f8cf

File tree

3 files changed

+322
-0
lines changed

3 files changed

+322
-0
lines changed

doc/Programs/QuantumRBM/qrbm.py

Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
import numpy as np
2+
from collections import Counter
3+
from sklearn.datasets import fetch_openml
4+
from skimage.transform import resize
5+
import warnings
6+
warnings.filterwarnings("ignore")
7+
8+
# --- STEP 1: Load and preprocess MNIST zeros (4x4 binarized) ---
9+
10+
print("Downloading and preprocessing MNIST...")
11+
mnist = fetch_openml("mnist_784", version=1, as_frame=False)
12+
X, y = mnist["data"], mnist["target"]
13+
X_zeros = X[y == '0'] / 255.0 # Normalize
14+
X_zeros = X_zeros[:200] # For speed
15+
16+
def downsample_binarize(img, size=4):
17+
img = img.reshape(28, 28)
18+
small = resize(img, (size, size), order=0, anti_aliasing=False, preserve_range=True)
19+
binary = (small > 0.5).astype(int)
20+
return ''.join(map(str, binary.flatten()))
21+
22+
samples_bin = [downsample_binarize(img) for img in X_zeros]
23+
data_dist = Counter(samples_bin)
24+
total = sum(data_dist.values())
25+
data_dist = {k: v / total for k, v in data_dist.items()}
26+
27+
# --- STEP 2: Quantum Circuit Utils ---
28+
29+
# R_y rotation
30+
def Ry(theta):
31+
return np.array([
32+
[np.cos(theta/2), -np.sin(theta/2)],
33+
[np.sin(theta/2), np.cos(theta/2)]
34+
])
35+
36+
# CNOT gate for any 2 qubits
37+
def CNOT(n, control, target):
38+
dim = 2**n
39+
op = np.zeros((dim, dim), dtype=complex)
40+
for i in range(dim):
41+
bits = list(np.binary_repr(i, width=n))
42+
if bits[control] == '1':
43+
bits[target] = '1' if bits[target] == '0' else '0'
44+
j = int(''.join(bits), 2)
45+
op[i, j] = 1
46+
return op
47+
48+
# Build the quantum state from params
49+
def variational_state(params):
50+
n = len(params)
51+
state = np.zeros(2**n, dtype=complex)
52+
state[0] = 1
53+
54+
# Apply Ry rotations
55+
U = 1
56+
for theta in params:
57+
U = np.kron(U, Ry(theta))
58+
state = U @ state
59+
60+
# Apply entangling CNOTs: linear chain
61+
for i in range(n - 1):
62+
state = CNOT(n, i, i + 1) @ state
63+
64+
return state
65+
66+
# Sample bitstrings from state
67+
def sample_state(psi, num_samples=1000):
68+
probs = np.abs(psi)**2
69+
states = [format(i, f'0{int(np.log2(len(psi)))}b') for i in range(len(psi))]
70+
return np.random.choice(states, size=num_samples, p=probs)
71+
72+
# Get distribution from samples
73+
def get_prob_dist(samples):
74+
counts = Counter(samples)
75+
total = sum(counts.values())
76+
return {x: c / total for x, c in counts.items()}
77+
78+
# KL divergence: D_KL(p || q)
79+
def kl_divergence(p, q, eps=1e-10):
80+
kl = 0.0
81+
for x in p:
82+
px = p[x]
83+
qx = q.get(x, eps)
84+
kl += px * np.log(px / (qx + eps))
85+
return kl
86+
87+
# Parameter-shift gradients
88+
def parameter_shift_grad(params, data_dist, shift=np.pi/2, num_samples=500):
89+
grads = np.zeros_like(params)
90+
for i in range(len(params)):
91+
plus = params.copy()
92+
minus = params.copy()
93+
plus[i] += shift
94+
minus[i] -= shift
95+
96+
psi_plus = variational_state(plus)
97+
psi_minus = variational_state(minus)
98+
dist_plus = get_prob_dist(sample_state(psi_plus, num_samples))
99+
dist_minus = get_prob_dist(sample_state(psi_minus, num_samples))
100+
101+
kl_plus = kl_divergence(data_dist, dist_plus)
102+
kl_minus = kl_divergence(data_dist, dist_minus)
103+
grads[i] = 0.5 * (kl_plus - kl_minus)
104+
return grads
105+
106+
# --- STEP 3: Training VQBM on MNIST patterns ---
107+
108+
n_qubits = 4
109+
params = np.random.uniform(0, 2*np.pi, size=n_qubits)
110+
lr = 0.2
111+
112+
print("\nTraining VQBM...\n")
113+
for step in range(100):
114+
psi = variational_state(params)
115+
model_samples = sample_state(psi, num_samples=1000)
116+
model_dist = get_prob_dist(model_samples)
117+
loss = kl_divergence(data_dist, model_dist)
118+
119+
grads = parameter_shift_grad(params, data_dist)
120+
params -= lr * grads
121+
122+
if step % 10 == 0:
123+
print(f"Step {step:3d}: KL Divergence = {loss:.5f}")
124+
125+
# --- STEP 4: Results ---
126+
127+
print("\nFinal learned distribution (top states):")
128+
final_samples = sample_state(variational_state(params), num_samples=2000)
129+
final_dist = get_prob_dist(final_samples)
130+
for k, v in sorted(final_dist.items(), key=lambda x: -x[1])[:10]:
131+
print(f"{k}: {v:.4f}")
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
import numpy as np
2+
3+
# Define Pauli matrices
4+
I = np.eye(2)
5+
X = np.array([[0, 1], [1, 0]])
6+
Z = np.array([[1, 0], [0, -1]])
7+
8+
# Kronecker product helper
9+
def kron(*matrices):
10+
result = np.array([[1]])
11+
for m in matrices:
12+
result = np.kron(result, m)
13+
return result
14+
15+
# Rotation around Y axis
16+
def Ry(theta):
17+
return np.array([
18+
[np.cos(theta / 2), -np.sin(theta / 2)],
19+
[np.sin(theta / 2), np.cos(theta / 2)]
20+
])
21+
22+
# CNOT gate on qubits 0 (control) and 1 (target)
23+
CNOT = np.array([
24+
[1, 0, 0, 0], # |00⟩ -> |00⟩
25+
[0, 1, 0, 0], # |01⟩ -> |01⟩
26+
[0, 0, 0, 1], # |10⟩ -> |11⟩
27+
[0, 0, 1, 0], # |11⟩ -> |10⟩
28+
])
29+
30+
# Full variational circuit with entanglement
31+
def variational_state(theta1, theta2):
32+
# Start in |00⟩
33+
state = np.array([1, 0, 0, 0], dtype=complex)
34+
35+
# Apply Ry ⊗ Ry
36+
U = kron(Ry(theta1), Ry(theta2))
37+
state = U @ state
38+
39+
# Apply CNOT
40+
state = CNOT @ state
41+
42+
return state
43+
44+
# Hamiltonian (Ising interaction)
45+
H = -kron(Z, Z)
46+
47+
# Compute expectation ⟨ψ|H|ψ⟩
48+
def energy(theta1, theta2):
49+
psi = variational_state(theta1, theta2)
50+
return np.real(np.vdot(psi, H @ psi))
51+
52+
# Parameter shift rule: dE/dθ ≈ [E(θ + π/2) - E(θ - π/2)] / 2
53+
def parameter_shift_grad(theta1, theta2):
54+
shift = np.pi / 2
55+
dtheta1 = 0.5 * (energy(theta1 + shift, theta2) - energy(theta1 - shift, theta2))
56+
dtheta2 = 0.5 * (energy(theta1, theta2 + shift) - energy(theta1, theta2 - shift))
57+
return dtheta1, dtheta2
58+
59+
# Training with gradient descent
60+
theta1, theta2 = np.random.uniform(0, 2 * np.pi, 2)
61+
learning_rate = 0.1
62+
63+
for step in range(100):
64+
E = energy(theta1, theta2)
65+
grad1, grad2 = parameter_shift_grad(theta1, theta2)
66+
67+
theta1 -= learning_rate * grad1
68+
theta2 -= learning_rate * grad2
69+
70+
if step % 10 == 0:
71+
print(f"Step {step:3d}: Energy = {E:.6f}, θ1 = {theta1:.4f}, θ2 = {theta2:.4f}")
72+
73+
print("\nFinal energy:", energy(theta1, theta2))
74+
print("Final parameters: θ1 =", theta1, ", θ2 =", theta2)

doc/Programs/QuantumRBM/qrbmx.py

Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
import numpy as np
2+
from collections import Counter
3+
from itertools import product
4+
5+
# Pauli matrices
6+
Z = np.array([[1, 0], [0, -1]])
7+
I = np.eye(2)
8+
9+
# R_y gate
10+
def Ry(theta):
11+
return np.array([
12+
[np.cos(theta / 2), -np.sin(theta / 2)],
13+
[np.sin(theta / 2), np.cos(theta / 2)]
14+
])
15+
16+
# CNOT gate for arbitrary control and target
17+
def CNOT(n, control, target):
18+
dim = 2 ** n
19+
op = np.zeros((dim, dim), dtype=complex)
20+
for i in range(dim):
21+
bits = list(np.binary_repr(i, width=n))
22+
if bits[control] == '1':
23+
bits[target] = '1' if bits[target] == '0' else '0'
24+
j = int("".join(bits), 2)
25+
op[i, j] = 1
26+
return op
27+
28+
# Create n-qubit variational state
29+
def variational_state(params):
30+
n = len(params)
31+
state = np.zeros(2**n, dtype=complex)
32+
state[0] = 1 # |00...0⟩
33+
34+
# Apply R_y on each qubit
35+
U = 1
36+
for theta in params:
37+
U = np.kron(U, Ry(theta))
38+
state = U @ state
39+
40+
# Apply entangling CNOTs (pairwise)
41+
for i in range(n - 1):
42+
state = CNOT(n, i, i + 1) @ state
43+
44+
return state
45+
46+
# Sample from state
47+
def sample_state(psi, num_samples=1000):
48+
probs = np.abs(psi) ** 2
49+
states = [format(i, f"0{int(np.log2(len(psi)))}b") for i in range(len(psi))]
50+
return np.random.choice(states, size=num_samples, p=probs)
51+
52+
# KL divergence: D_KL(p || q) = sum_x p(x) log(p(x)/q(x))
53+
def kl_divergence(p_data, p_model, eps=1e-10):
54+
kl = 0.0
55+
for x in p_data:
56+
p = p_data[x]
57+
q = p_model.get(x, eps)
58+
kl += p * np.log(p / (q + eps))
59+
return kl
60+
61+
# Get empirical probabilities from samples
62+
def get_prob_dist(samples):
63+
counts = Counter(samples)
64+
total = sum(counts.values())
65+
return {x: c / total for x, c in counts.items()}
66+
67+
68+
# Parameter shift gradient
69+
def parameter_shift_grad(params, data_dist, shift=np.pi/2, num_samples=500):
70+
grads = np.zeros_like(params)
71+
for i in range(len(params)):
72+
plus = params.copy()
73+
minus = params.copy()
74+
plus[i] += shift
75+
minus[i] -= shift
76+
77+
psi_plus = variational_state(plus)
78+
psi_minus = variational_state(minus)
79+
80+
model_plus = get_prob_dist(sample_state(psi_plus, num_samples))
81+
model_minus = get_prob_dist(sample_state(psi_minus, num_samples))
82+
83+
kl_plus = kl_divergence(data_dist, model_plus)
84+
kl_minus = kl_divergence(data_dist, model_minus)
85+
86+
grads[i] = 0.5 * (kl_plus - kl_minus)
87+
return grads
88+
89+
90+
# === Synthetic Dataset ===
91+
data_samples = ['000', '001', '011', '111'] * 50 # Synthetic repeated patterns
92+
data_dist = get_prob_dist(data_samples)
93+
94+
# === VQBM Training ===
95+
n_qubits = 3
96+
params = np.random.uniform(0, 2*np.pi, size=n_qubits)
97+
lr = 0.1
98+
99+
for step in range(1000):
100+
psi = variational_state(params)
101+
model_samples = sample_state(psi, num_samples=500)
102+
model_dist = get_prob_dist(model_samples)
103+
loss = kl_divergence(data_dist, model_dist)
104+
105+
grads = parameter_shift_grad(params, data_dist)
106+
params -= lr * grads
107+
108+
if step % 10 == 0:
109+
print(f"Step {step:3d}: KL Divergence = {loss:.6f}")
110+
111+
# Final Results
112+
print("\nFinal learned distribution:")
113+
psi = variational_state(params)
114+
samples = sample_state(psi, num_samples=1000)
115+
final_model_dist = get_prob_dist(samples)
116+
for k in sorted(final_model_dist):
117+
print(f"{k}: {final_model_dist[k]:.3f}")

0 commit comments

Comments
 (0)