Drive Shaping Methods¶
A quantum program in the Rydberg analog model is defined as a time-dependent drive Hamiltonian that is imposed on the qubits (in addition to the interaction Hamiltonian). The drive shaping configuration part (the drive_shaping field of SolverConfig) defines how the drive parameters are constructed.
In this notebook, we show how to use different drive shaping methods:
HEURISTIC(Has no parameters to be customized).OPTIMIZED(Has seven parameters that can be customized).
We choose the method when defining the SolverConfig configuration, with drive_shaping_method = DriveType.(method)
Default SolverConfig parameters to run a quantum approach:
- use_quantum: bool = False (for using the drive shaping methods, we have to set it to
True.) - backend: LocalEmulator | RemoteEmulator | QPU (by default, use a LocalEmulator based on qutip)
- device: Device = DigitalAnalogDevice() (also available:
AnalogDevice()) - embedding_method: str | EmbedderType | None = EmbedderType.GREEDY (also available:
BLADE) - drive_shaping_method: str | DriveType | None = DriveType.HEURISTIC (also available:
OPTIMIZED)
import torch
from qubosolver.qubo_instance import QUBOInstance
from qubosolver.config import SolverConfig, DriveShapingConfig
from qubosolver.qubo_types import DriveType
from qubosolver.solver import QuboSolver
import matplotlib.pyplot as plt
plt.rcParams["animation.html"] = "jshtml"
%matplotlib inline
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) Cell In[1], line 3 1 import torch ----> 3 from qubosolver.qubo_instance import QUBOInstance 4 from qubosolver.config import SolverConfig, DriveShapingConfig 5 from qubosolver.qubo_types import DriveType File ~/qubo-solver/qubosolver/__init__.py:5 1 from __future__ import annotations 3 from importlib import import_module ----> 5 from .data import * # noqa: F403 6 from .qubo_instance import * # noqa: F403 7 from .qubo_types import * # noqa: F403 File ~/qubo-solver/qubosolver/data.py:10 7 from torch.utils.data import Dataset 9 from qubosolver.data_utils import generate_symmetric_mask ---> 10 from qubosolver.qubo_types import SolutionStatusType 12 if TYPE_CHECKING: 13 pass File ~/qubo-solver/qubosolver/qubo_types.py:5 1 from __future__ import annotations 3 from enum import Enum ----> 5 from pulser.register.special_layouts import SquareLatticeLayout, TriangularLatticeLayout 8 class StrEnum(str, Enum): 9 """String-based Enums class implementation""" File ~/.local/share/hatch/env/virtual/qubo-solver/IwRVwNQb/qubo-solver/lib/python3.10/site-packages/pulser/__init__.py:18 15 """A pulse-level composer for neutral-atom quantum devices.""" 17 from pulser._version import __version__ as __version__ ---> 18 from pulser.waveforms import ( 19 CompositeWaveform, 20 CustomWaveform, 21 ConstantWaveform, 22 RampWaveform, 23 BlackmanWaveform, 24 InterpolatedWaveform, 25 KaiserWaveform, 26 ) 27 from pulser.pulse import Pulse 28 from pulser.register import Register, Register3D File ~/.local/share/hatch/env/virtual/qubo-solver/IwRVwNQb/qubo-solver/lib/python3.10/site-packages/pulser/waveforms.py:28 25 from types import FunctionType 26 from typing import TYPE_CHECKING, Any, Optional, Tuple, TypeVar, Union, cast ---> 28 import matplotlib.pyplot as plt 29 import numpy as np 30 import scipy.interpolate as interpolate File ~/.local/share/hatch/env/virtual/qubo-solver/IwRVwNQb/qubo-solver/lib/python3.10/site-packages/matplotlib/pyplot.py:69 66 from matplotlib import _docstring 67 from matplotlib.backend_bases import ( 68 FigureCanvasBase, FigureManagerBase, MouseButton) ---> 69 from matplotlib.figure import Figure, FigureBase, figaspect 70 from matplotlib.gridspec import GridSpec, SubplotSpec 71 from matplotlib import rcsetup, rcParamsDefault, rcParamsOrig File ~/.local/share/hatch/env/virtual/qubo-solver/IwRVwNQb/qubo-solver/lib/python3.10/site-packages/matplotlib/figure.py:40 37 import numpy as np 39 import matplotlib as mpl ---> 40 from matplotlib import _blocking_input, backend_bases, _docstring, projections 41 from matplotlib.artist import ( 42 Artist, allow_rasterization, _finalize_rasterization) 43 from matplotlib.backend_bases import ( 44 DrawEvent, FigureCanvasBase, NonGuiException, MouseButton, _get_renderer) File ~/.local/share/hatch/env/virtual/qubo-solver/IwRVwNQb/qubo-solver/lib/python3.10/site-packages/matplotlib/projections/__init__.py:55 1 """ 2 Non-separable transforms that map from data space to screen space. 3 (...) 52 `matplotlib.projections.polar` may also be of interest. 53 """ ---> 55 from .. import axes, _docstring 56 from .geo import AitoffAxes, HammerAxes, LambertAxes, MollweideAxes 57 from .polar import PolarAxes File ~/.local/share/hatch/env/virtual/qubo-solver/IwRVwNQb/qubo-solver/lib/python3.10/site-packages/matplotlib/axes/__init__.py:1 ----> 1 from . import _base 2 from ._axes import Axes 4 # Backcompat. File <frozen importlib._bootstrap>:1027, in _find_and_load(name, import_) File <frozen importlib._bootstrap>:1006, in _find_and_load_unlocked(name, import_) File <frozen importlib._bootstrap>:688, in _load_unlocked(spec) File <frozen importlib._bootstrap_external>:879, in exec_module(self, module) File <frozen importlib._bootstrap_external>:975, in get_code(self, fullname) File <frozen importlib._bootstrap_external>:1070, in get_data(self, path) KeyboardInterrupt:
** Create the QUBOInstance **
Here, we have a 3x3 QUBO matrix with negative diagonal and positive off-diagonal terms.
coefficients = torch.tensor([[-1.0, 0.5, 0.2], [0.5, -2.0, 0.3], [0.2, 0.3, -3.0]])
instance = QUBOInstance(coefficients)
Heuristic Drive Shaper¶
Default method
import torch
from qubosolver.qubo_instance import QUBOInstance
from qubosolver.config import EmbeddingConfig, LocalEmulator
from pulser_simulation import QutipBackendV2
def best_from_solution(sol):
# sol.costs: tensor([..]) ; sol.bitstrings: tensor([[..],..])
if sol.costs is None or len(sol.costs) == 0:
return None, None
idx = int(torch.argmin(sol.costs).item())
best_cost = float(sol.costs[idx].item())
best_bitstring = sol.bitstrings[idx].tolist()
return best_cost, best_bitstring
def run_one(method: DriveType, q: torch.Tensor, runs: int = 500, dmm: bool = True):
instance = QUBOInstance(coefficients=q)
embed_cfg = EmbeddingConfig(
embedding_method="greedy",
greedy_traps=6,
# pas de greedy_spacing => default
)
drive_cfg = DriveShapingConfig(
drive_shaping_method=method,
dmm=dmm,
)
backend = LocalEmulator(backend_type=QutipBackendV2, runs=runs)
config = SolverConfig(
use_quantum=True,
embedding=embed_cfg,
drive_shaping=drive_cfg,
backend=backend,
)
solver = QuboSolver(instance, config)
# solve (inclut exécution + mesure)
sol = solver.solve()
# afficher la séquence
embedding = solver.embedding()
drive = solver.drive(embedding)[0]
solver.draw_sequence(drive, embedding)
best_cost, best_bitstring = best_from_solution(sol)
tag = "HEURISTIC"
print("\n" + "=" * 80)
print(f"[{tag}] runs={runs}")
print("Solution object:")
print(sol)
print(f"Best sampled cost: {best_cost}")
print(f"Best sampled bitstring: {best_bitstring}")
return sol, best_cost, best_bitstring
if __name__ == "__main__":
Q1 = torch.tensor([
[-6.0, 2.0, 2.0, 2.0],
[ 2.0, -7.5, 2.0, 2.0],
[ 2.0, 2.0, -7.5, 2.0],
[ 2.0, 2.0, 2.0, -7.0],
])
sol_h_dmm, best_h_dmm, bs_h_dmm = run_one(DriveType.HEURISTIC, Q1, runs=500, dmm=True)
sol_h_nodmm, best_h_nodmm, bs_h_nodmm = run_one(DriveType.HEURISTIC, Q1, runs=500, dmm=False)
print("\n" + "#" * 80)
print("COMPARISON (best sampled cost):")
print(f"HEURISTIC (dmm=True) : {best_h_dmm} bitstring={bs_h_dmm}")
print(f"HEURISTIC (dmm=False): {best_h_nodmm} bitstring={bs_h_nodmm}")
================================================================================
[ADIABATIC] runs=500
Solution object:
QUBOSolution(bitstrings=tensor([[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 0, 0, 0]], dtype=torch.int32), costs=tensor([-7.5000, -7.5000, -7.0000, -6.0000, 0.0000]), counts=tensor([146, 148, 136, 64, 6]), probabilities=tensor([0.2920, 0.2960, 0.2720, 0.1280, 0.0120]), solution_status=<SolutionStatusType.UNPROCESSED: 'unprocessed'>)
Best sampled cost: -7.5
Best sampled bitstring: [0, 0, 1, 0]
================================================================================
[HEURISTIC (dmm=True)] runs=500
Solution object:
QUBOSolution(bitstrings=tensor([[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 0, 0, 0]], dtype=torch.int32), costs=tensor([-11.0000, -7.5000, -7.5000, -7.0000, -6.0000, 0.0000]), counts=tensor([422, 33, 29, 4, 8, 4]), probabilities=tensor([0.8440, 0.0660, 0.0580, 0.0080, 0.0160, 0.0080]), solution_status=<SolutionStatusType.UNPROCESSED: 'unprocessed'>)
Best sampled cost: -11.0
Best sampled bitstring: [0, 1, 1, 0]
================================================================================
[HEURISTIC (dmm=False)] runs=500
Solution object:
QUBOSolution(bitstrings=tensor([[0, 1, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 0, 0, 0]], dtype=torch.int32), costs=tensor([-11.0000, -7.5000, -7.5000, -7.0000, -6.0000, 0.0000]), counts=tensor([337, 72, 8, 76, 5, 2]), probabilities=tensor([0.6740, 0.1440, 0.0160, 0.1520, 0.0100, 0.0040]), solution_status=<SolutionStatusType.UNPROCESSED: 'unprocessed'>)
Best sampled cost: -11.0
Best sampled bitstring: [0, 1, 1, 0]
################################################################################
COMPARISON (best sampled cost):
ADIABATIC : -7.5 bitstring=[0, 0, 1, 0]
HEURISTIC (dmm=True) : -11.0 bitstring=[0, 1, 1, 0]
HEURISTIC (dmm=False): -11.0 bitstring=[0, 1, 1, 0]
Optimized Drive shaping¶
Parameters to customize:¶
For the OPTIMIZED drive shaping, we have the following parameters:
- optimized_n_calls: Number of calls for the optimization process.
- optimized_re_execute_opt_drive: Whether to re-run the optimal drive sequence after optimization.
- optimized_initial_omega_parameters: Default initial omega parameters for the drive. Defaults to (1, 2, 1).
- optimized_initial_detuning_parameters: Default initial detuning parameters for the drive. Defaults to (-2, 0, 2).
Default configuration¶
default_config = SolverConfig.from_kwargs(
use_quantum=True, drive_shaping=DriveShapingConfig(drive_shaping_method=DriveType.OPTIMIZED),
)
solver = QuboSolver(instance, default_config)
solution = solver.solve()
print(solution)
Changing optimized_n_calls¶
default_config = SolverConfig.from_kwargs(
use_quantum=True, drive_shaping=DriveShapingConfig(drive_shaping_method=DriveType.OPTIMIZED, optimized_n_calls=13),
)
solver = QuboSolver(instance, default_config)
solution = solver.solve()
print(solution)
Changing the initial parameters¶
Initial parameters of the optimization procedure can be changed as optimized_initial_omega_parameters and optimized_initial_detuning_parameters
default_config = SolverConfig.from_kwargs(
use_quantum=True, drive_shaping=DriveShapingConfig(drive_shaping_method=DriveType.OPTIMIZED, optimized_initial_omega_parameters=[1.0, 3.0, 1.0,], optimized_initial_detuning_parameters=[-9.0, 0.0, 5.0]),
)
solver = QuboSolver(instance, default_config)
solution = solver.solve()
print(solution)
Changing optimized_re_execute_opt_drive¶
default_config = SolverConfig.from_kwargs(
use_quantum=True, drive_shaping=DriveShapingConfig(drive_shaping_method=DriveType.OPTIMIZED, optimized_re_execute_opt_drive=True),
)
solver = QuboSolver(instance, default_config)
solution = solver.solve()
print(solution)
Adding custom functions¶
One can change the drive shaping method by incorporating custom functions for:
- Evaluating a candidate bitstring and QUBO via
optimized_custom_qubo_cost - Performing optimization with a different objective than the best cost via
optimized_custom_objective - Adding callback functions via
optimized_callback_objective.
from qubosolver.utils.qubo_eval import calculate_qubo_cost
# example of penalization
def penalized_qubo(bitstring: str, QUBO: torch.Tensor) -> float:
return calculate_qubo_cost(bitstring, QUBO) + 2 * bitstring.count("0")
# example of saving intermediate results
opt_results = list()
def callback(d: dict) -> None:
opt_results.append(d)
# example of using an average cost
def average_ojective(
bitstrings: list,
counts: list,
probabilities: list,
costs: list,
best_cost: float,
best_bitstring: str,
) -> float:
return sum([p * c for p, c in zip(probabilities, costs)])
drive_shaping=DriveShapingConfig(drive_shaping_method=DriveType.OPTIMIZED,
optimized_re_execute_opt_drive=True,
optimized_custom_qubo_cost=penalized_qubo,
optimized_callback_objective=callback,
optimized_custom_objective = average_ojective,
)
config = SolverConfig(
use_quantum=True,
drive_shaping=drive_shaping,
)
solver = QuboSolver(instance, config)
solution = solver.solve()
len(opt_results), opt_results[-1]
solution