Solve a QUBO problem
In this notebook we solve a quadratic unconstrained optimization problem with Qadence emulated analog interface using the QAOA variational algorithm. The problem is detailed in the Pulser documentation here.
Define and solve QUBO
Pre-requisite: construct QUBO register
Before we start we have to define a register that fits into our device.
import torch
import numpy as np
from scipy.optimize import minimize
from scipy.spatial.distance import pdist, squareform
from pulser.devices import Chadoq2
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
def qubo_register_coords(Q):
"""Compute coordinates for register."""
bitstrings = [np.binary_repr(i, len(Q)) for i in range(len(Q) ** 2)]
costs = []
# this takes exponential time with the dimension of the QUBO
for b in bitstrings:
z = np.array(list(b), dtype=int)
cost = z.T @ Q @ z
costs.append(cost)
zipped = zip(bitstrings, costs)
sort_zipped = sorted(zipped, key=lambda x: x[1])
def evaluate_mapping(new_coords, *args):
"""Cost function to minimize. Ideally, the pairwise
distances are conserved"""
Q, shape = args
new_coords = np.reshape(new_coords, shape)
new_Q = squareform(Chadoq2.interaction_coeff / pdist(new_coords) ** 6)
return np.linalg.norm(new_Q - Q)
shape = (len(Q), 2)
costs = []
np.random.seed(0)
x0 = np.random.random(shape).flatten()
res = minimize(
evaluate_mapping,
x0,
args=(Q, shape),
method="Nelder-Mead",
tol=1e-6,
options={"maxiter": 200000, "maxfev": None},
)
return [(x, y) for (x, y) in np.reshape(res.x, (len(Q), 2))]
import matplotlib.pyplot as plt
import numpy as np
import torch
from qadence import add_interaction, chain
from qadence import QuantumModel, QuantumCircuit, AnalogRZ, AnalogRX, Register
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
The QUBO problem is initially defined by a graph of weighted connections Q
and a cost function.
def cost_colouring(bitstring, Q):
z = np.array(list(bitstring), dtype=int)
cost = z.T @ Q @ z
return cost
# Cost function.
def cost_fn(counter, Q):
cost = sum(counter[key] * cost_colouring(key, Q) for key in counter)
return cost / sum(counter.values()) # Divide by total samples
# Weights.
Q = np.array(
[
[-10.0, 19.7365809, 19.7365809, 5.42015853, 5.42015853],
[19.7365809, -10.0, 20.67626392, 0.17675796, 0.85604541],
[19.7365809, 20.67626392, -10.0, 0.85604541, 0.17675796],
[5.42015853, 0.17675796, 0.85604541, -10.0, 0.32306662],
[5.42015853, 0.85604541, 0.17675796, 0.32306662, -10.0],
]
)
Now, build a weighted register graph from the QUBO definition similarly to what is done in Pulser.
The analog circuit is composed of two global rotations per layer. The first rotation corresponds to the mixing Hamiltonian and the second one to the embedding Hamiltonian in the QAOA algorithm. Subsequently, there is an Ising interaction term to emulate the analog circuit. Please note that the Rydberg level is set to 70.
from qadence.transpile.emulate import ising_interaction
layers = 2
block = chain(*[AnalogRX(f"t{i}") * AnalogRZ(f"s{i}") for i in range(layers)])
emulated = add_interaction(
reg, block, interaction=lambda r, ps: ising_interaction(r, ps, rydberg_level=70)
)
emulated =
ChainBlock(0,1,2,3,4)
├── ChainBlock(0,1,2,3,4)
│ ├── HamEvo(0,1,2,3,4) [params: ['51_4100727736005*t0']]
│ └── HamEvo(0,1,2,3,4) [params: ['39_0388262113427*s0']]
└── ChainBlock(0,1,2,3,4)
├── HamEvo(0,1,2,3,4) [params: ['51_4100727736005*t1']]
└── HamEvo(0,1,2,3,4) [params: ['39_0388262113427*s1']]
Next, an initial solution is computed by sampling the model:
model = QuantumModel(QuantumCircuit(reg, emulated), backend="pyqtorch", diff_mode='gpsr')
initial_counts = model.sample({}, n_shots=1000)[0]
initial_counts = Counter({'00000': 470, '01000': 90, '00100': 88, '00001': 74, '00010': 71, '10000': 70, '00101': 19, '11000': 19, '10100': 14, '10001': 13, '10010': 12, '00110': 11, '01001': 11, '00011': 9, '01010': 8, '01100': 6, '01101': 3, '10110': 3, '01011': 2, '01110': 2, '10011': 2, '11010': 2, '00111': 1})
Then, the loss function is defined by averaging over the evaluated bitstrings.
def loss(param, *args):
Q = args[0]
param = torch.tensor(param)
model.reset_vparams(param)
C = model.sample({}, n_shots=1000)[0]
return cost_fn(C, Q)
And a gradient-free optimization loop is used to compute the optimal solution.
# Optimization loop.
for i in range(20):
res = minimize(
loss,
args=Q,
x0=np.random.uniform(1, 10, size=2 * layers),
method="COBYLA",
tol=1e-8,
options={"maxiter": 20},
)
# Sample and visualize the optimal solution.
model.reset_vparams(res.x)
optimal_count = model.sample({}, n_shots=1000)[0]
Finally, plot the solution: