Skip to content

Commit b7c7f3c

Browse files
committed
Updated with argparse
1 parent 4bd170c commit b7c7f3c

File tree

3 files changed

+154
-51
lines changed

3 files changed

+154
-51
lines changed

examples/QCBM/README.md

+34
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,41 @@
11
# Quantum Circuit Born Machine
2+
(Implementation by: [Gopal Ramesh Dahale](https://github.com/Gopal-Dahale))
23

34
Quantum Circuit Born Machine (QCBM) [1] is a generative modeling algorithm which uses Born rule from quantum mechanics to sample from a quantum state $|\psi \rangle$ learned by training an ansatz $U(\theta)$ [1][2]. In this tutorial we show how `torchquantum` can be used to model a Gaussian mixture with QCBM.
45

6+
## Setup
7+
8+
Below is the usage of `qcbm_gaussian_mixture.py` which can be obtained by running `python qcbm_gaussian_mixture.py -h`.
9+
10+
```
11+
usage: qcbm_gaussian_mixture.py [-h] [--n_wires N_WIRES] [--epochs EPOCHS] [--n_blocks N_BLOCKS] [--n_layers_per_block N_LAYERS_PER_BLOCK] [--plot] [--optimizer OPTIMIZER] [--lr LR]
12+
13+
options:
14+
-h, --help show this help message and exit
15+
--n_wires N_WIRES Number of wires used in the circuit
16+
--epochs EPOCHS Number of training epochs
17+
--n_blocks N_BLOCKS Number of blocks in ansatz
18+
--n_layers_per_block N_LAYERS_PER_BLOCK
19+
Number of layers per block in ansatz
20+
--plot Visualize the predicted probability distribution
21+
--optimizer OPTIMIZER
22+
optimizer class from torch.optim
23+
--lr LR
24+
```
25+
26+
For example:
27+
28+
```
29+
python qcbm_gaussian_mixture.py --plot --epochs 100 --optimizer RMSprop --lr 0.01 --n_blocks 6 --n_layers_per_block 2 --n_wires 6
30+
```
31+
32+
Using the command above gives an output similar to the plot below.
33+
34+
<p align="center">
35+
<img src ='./assets/sample_output.png' width-500 alt='sample output of QCBM'>
36+
</p>
37+
38+
539
## References
640

741
1. Liu, Jin-Guo, and Lei Wang. “Differentiable learning of quantum circuit born machines.” Physical Review A 98.6 (2018): 062324.
32.6 KB
Loading

examples/QCBM/qcbm_gaussian_mixture.py

+120-51
Original file line numberDiff line numberDiff line change
@@ -3,58 +3,127 @@
33
import torch
44
from torchquantum.algorithm import QCBM, MMDLoss
55
import torchquantum as tq
6+
import argparse
7+
import os
8+
from pprint import pprint
9+
10+
11+
# Reproducibility
12+
def set_seed(seed: int = 42) -> None:
13+
np.random.seed(seed)
14+
torch.manual_seed(seed)
15+
torch.cuda.manual_seed(seed)
16+
# When running on the CuDNN backend, two further options must be set
17+
torch.backends.cudnn.deterministic = True
18+
torch.backends.cudnn.benchmark = False
19+
# Set a fixed value for the hash seed
20+
os.environ["PYTHONHASHSEED"] = str(seed)
21+
print(f"Random seed set as {seed}")
22+
23+
24+
def _setup_parser():
25+
parser = argparse.ArgumentParser()
26+
parser.add_argument(
27+
"--n_wires", type=int, default=6, help="Number of wires used in the circuit"
28+
)
29+
parser.add_argument(
30+
"--epochs", type=int, default=10, help="Number of training epochs"
31+
)
32+
parser.add_argument(
33+
"--n_blocks", type=int, default=6, help="Number of blocks in ansatz"
34+
)
35+
parser.add_argument(
36+
"--n_layers_per_block",
37+
type=int,
38+
default=1,
39+
help="Number of layers per block in ansatz",
40+
)
41+
parser.add_argument(
42+
"--plot",
43+
action="store_true",
44+
help="Visualize the predicted probability distribution",
45+
)
46+
parser.add_argument(
47+
"--optimizer", type=str, default="Adam", help="optimizer class from torch.optim"
48+
)
49+
parser.add_argument("--lr", type=float, default=1e-2)
50+
return parser
651

752

853
# Function to create a gaussian mixture
954
def gaussian_mixture_pdf(x, mus, sigmas):
10-
mus, sigmas = np.array(mus), np.array(sigmas)
11-
vars = sigmas**2
12-
values = [
13-
(1 / np.sqrt(2 * np.pi * v)) * np.exp(-((x - m) ** 2) / (2 * v))
14-
for m, v in zip(mus, vars)
15-
]
16-
values = np.sum([val / sum(val) for val in values], axis=0)
17-
return values / np.sum(values)
18-
19-
# Create a gaussian mixture
20-
n_wires = 6
21-
x_max = 2**n_wires
22-
x_input = np.arange(x_max)
23-
mus = [(2 / 8) * x_max, (5 / 8) * x_max]
24-
sigmas = [x_max / 10] * 2
25-
data = gaussian_mixture_pdf(x_input, mus, sigmas)
26-
27-
# This is the target distribution that the QCBM will learn
28-
target_probs = torch.tensor(data, dtype=torch.float32)
29-
30-
# Ansatz
31-
layers = tq.RXYZCXLayer0({"n_blocks": 6, "n_wires": n_wires, "n_layers_per_block": 1})
32-
33-
qcbm = QCBM(n_wires, layers)
34-
35-
# To train QCBMs, we use MMDLoss with radial basis function kernel.
36-
bandwidth = torch.tensor([0.25, 60])
37-
space = torch.arange(2**n_wires)
38-
mmd = MMDLoss(bandwidth, space)
39-
40-
# Optimization
41-
optimizer = torch.optim.Adam(qcbm.parameters(), lr=0.01)
42-
for i in range(100):
43-
optimizer.zero_grad(set_to_none=True)
44-
pred_probs = qcbm()
45-
loss = mmd(pred_probs, target_probs)
46-
loss.backward()
47-
optimizer.step()
48-
print(i, loss.item())
49-
50-
# Visualize the results
51-
with torch.no_grad():
52-
pred_probs = qcbm()
53-
54-
plt.plot(x_input, target_probs, linestyle="-.", label=r"$\pi(x)$")
55-
plt.bar(x_input, pred_probs, color="green", alpha=0.5, label="samples")
56-
plt.xlabel("Samples")
57-
plt.ylabel("Prob. Distribution")
58-
59-
plt.legend()
60-
plt.show()
55+
mus, sigmas = np.array(mus), np.array(sigmas)
56+
vars = sigmas**2
57+
values = [
58+
(1 / np.sqrt(2 * np.pi * v)) * np.exp(-((x - m) ** 2) / (2 * v))
59+
for m, v in zip(mus, vars)
60+
]
61+
values = np.sum([val / sum(val) for val in values], axis=0)
62+
return values / np.sum(values)
63+
64+
65+
def main():
66+
set_seed()
67+
parser = _setup_parser()
68+
args = parser.parse_args()
69+
70+
print("Configuration:")
71+
pprint(vars(args))
72+
73+
# Create a gaussian mixture
74+
n_wires = args.n_wires
75+
assert n_wires >= 1, "Number of wires must be at least 1"
76+
77+
x_max = 2**n_wires
78+
x_input = np.arange(x_max)
79+
mus = [(2 / 8) * x_max, (5 / 8) * x_max]
80+
sigmas = [x_max / 10] * 2
81+
data = gaussian_mixture_pdf(x_input, mus, sigmas)
82+
83+
# This is the target distribution that the QCBM will learn
84+
target_probs = torch.tensor(data, dtype=torch.float32)
85+
86+
# Ansatz
87+
layers = tq.RXYZCXLayer0(
88+
{
89+
"n_blocks": args.n_blocks,
90+
"n_wires": n_wires,
91+
"n_layers_per_block": args.n_layers_per_block,
92+
}
93+
)
94+
95+
qcbm = QCBM(n_wires, layers)
96+
97+
# To train QCBMs, we use MMDLoss with radial basis function kernel.
98+
bandwidth = torch.tensor([0.25, 60])
99+
space = torch.arange(2**n_wires)
100+
mmd = MMDLoss(bandwidth, space)
101+
102+
# Optimization
103+
optimizer_class = getattr(torch.optim, args.optimizer)
104+
optimizer = optimizer_class(qcbm.parameters(), lr=args.lr)
105+
106+
for i in range(args.epochs):
107+
optimizer.zero_grad(set_to_none=True)
108+
pred_probs = qcbm()
109+
loss = mmd(pred_probs, target_probs)
110+
loss.backward()
111+
optimizer.step()
112+
print(i, loss.item())
113+
114+
# Visualize the results
115+
if args.plot:
116+
with torch.no_grad():
117+
pred_probs = qcbm()
118+
119+
plt.plot(x_input, target_probs, linestyle="-.", label=r"$\pi(x)$")
120+
plt.bar(x_input, pred_probs, color="green", alpha=0.5, label="samples")
121+
plt.xlabel("Samples")
122+
plt.ylabel("Prob. Distribution")
123+
124+
plt.legend()
125+
plt.show()
126+
127+
128+
if __name__ == "__main__":
129+
main()

0 commit comments

Comments
 (0)