Skip to content

Quantum models

QuantumModel(circuit, observable=None, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, measurement=None, noise=None, mitigation=None, configuration=None)

Bases: Module

The central class of qadence that executes QuantumCircuits and make them differentiable.

This class should be used as base class for any new quantum model supported in the qadence framework for information on the implementation of custom models see here.

Example:

import torch
from qadence import QuantumModel, QuantumCircuit, RX, RY, Z, PI, chain, kron
from qadence import FeatureParameter, VariationalParameter

theta = VariationalParameter("theta")
phi = FeatureParameter("phi")

block = chain(
    kron(RX(0, theta), RY(1, theta)),
    kron(RX(0, phi), RY(1, phi)),
)

circuit = QuantumCircuit(2, block)

observable = Z(0) + Z(1)

model = QuantumModel(circuit, observable)
values = {"phi": torch.tensor([PI, PI/2]), "theta": torch.tensor([PI, PI/2])}

wf = model.run(values)
xs = model.sample(values, n_shots=100)
ex = model.expectation(values)
print(wf)
print(xs)
print(ex)
tensor([[ 1.0000e+00+0.0000e+00j, -1.2246e-16+0.0000e+00j,
          0.0000e+00+1.2246e-16j,  0.0000e+00-1.4998e-32j],
        [ 4.9304e-32+0.0000e+00j,  2.2204e-16+0.0000e+00j,
          0.0000e+00-2.2204e-16j,  0.0000e+00-1.0000e+00j]])
[OrderedCounter({'00': 100}), OrderedCounter({'11': 100})]
tensor([[ 2.],
        [-2.]], requires_grad=True)
```

Initialize a generic QuantumModel instance.

PARAMETER DESCRIPTION
circuit

The circuit that is executed.

TYPE: QuantumCircuit

observable

Optional observable(s) that are used only in the expectation method. You can also provide observables on the fly to the expectation call directly.

TYPE: list[AbstractBlock] | AbstractBlock | None DEFAULT: None

backend

A backend for circuit execution.

TYPE: BackendName | str DEFAULT: PYQTORCH

diff_mode

A differentiability mode. Parameter shift based modes work on all backends. AD based modes only on PyTorch based backends.

TYPE: DiffMode DEFAULT: AD

measurement

Optional measurement protocol. If None, use exact expectation value with a statevector simulator.

TYPE: Measurements | None DEFAULT: None

configuration

Configuration for the backend.

TYPE: BackendConfiguration | dict | None DEFAULT: None

noise

A noise model to use.

TYPE: NoiseHandler | None DEFAULT: None

RAISES DESCRIPTION
ValueError

if the diff_mode argument is set to None

Source code in qadence/model.py
def __init__(
    self,
    circuit: QuantumCircuit,
    observable: list[AbstractBlock] | AbstractBlock | None = None,
    backend: BackendName | str = BackendName.PYQTORCH,
    diff_mode: DiffMode = DiffMode.AD,
    measurement: Measurements | None = None,
    noise: NoiseHandler | None = None,
    mitigation: Mitigations | None = None,
    configuration: BackendConfiguration | dict | None = None,
):
    """Initialize a generic QuantumModel instance.

    Arguments:
        circuit: The circuit that is executed.
        observable: Optional observable(s) that are used only in the `expectation` method. You
            can also provide observables on the fly to the expectation call directly.
        backend: A backend for circuit execution.
        diff_mode: A differentiability mode. Parameter shift based modes work on all backends.
            AD based modes only on PyTorch based backends.
        measurement: Optional measurement protocol. If None, use
            exact expectation value with a statevector simulator.
        configuration: Configuration for the backend.
        noise: A noise model to use.

    Raises:
        ValueError: if the `diff_mode` argument is set to None
    """
    super().__init__()

    if not isinstance(circuit, QuantumCircuit):
        TypeError(
            f"The circuit should be of type '<class QuantumCircuit>'. Got {type(circuit)}."
        )

    if diff_mode is None:
        raise ValueError("`diff_mode` cannot be `None` in a `QuantumModel`.")

    self.backend = backend_factory(
        backend=backend, diff_mode=diff_mode, configuration=configuration
    )

    if isinstance(observable, list) or observable is None:
        observable = observable
    else:
        observable = [observable]

    def _is_feature_param(p: Parameter) -> bool:
        return not p.trainable and not p.is_number

    if observable is None:
        self.inputs = list(filter(_is_feature_param, circuit.unique_parameters))
    else:
        uparams = unique_parameters(chain(circuit.block, *observable))
        self.inputs = list(filter(_is_feature_param, uparams))

    conv = self.backend.convert(circuit, observable)
    self.embedding_fn = conv.embedding_fn
    self._circuit = conv.circuit
    self._observable = conv.observable
    self._backend_name = backend
    self._diff_mode = diff_mode
    self._measurement = measurement
    self._noise = noise
    self._mitigation = mitigation
    self._params = nn.ParameterDict(
        {
            str(key): nn.Parameter(val, requires_grad=val.requires_grad)
            for key, val in conv.params.items()
        }
    )

device property

Get device.

RETURNS DESCRIPTION
device

torch.device

in_features property

Number of inputs.

num_vparams property

The number of variational parameters.

out_features property

Number of outputs.

vals_vparams property

Dictionary with parameters which are actually updated during optimization.

vparams property

Variational parameters.

assign_parameters(values)

Return the final, assigned circuit that is used in e.g. backend.run.

PARAMETER DESCRIPTION
values

Values dict which contains values for the parameters.

TYPE: dict[str, Tensor]

RETURNS DESCRIPTION
Any

Final, assigned circuit that is used in e.g. backend.run

Source code in qadence/model.py
def assign_parameters(self, values: dict[str, Tensor]) -> Any:
    """Return the final, assigned circuit that is used in e.g. `backend.run`.

    Arguments:
        values: Values dict which contains values for the parameters.

    Returns:
        Final, assigned circuit that is used in e.g. `backend.run`
    """
    params = self.embedding_fn(self._params, values)
    return self.backend.assign_parameters(self._circuit, params)

circuit(circuit)

Get backend-converted circuit.

PARAMETER DESCRIPTION
circuit

QuantumCircuit instance.

TYPE: QuantumCircuit

RETURNS DESCRIPTION
ConvertedCircuit

Backend circuit.

Source code in qadence/model.py
def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit:
    """Get backend-converted circuit.

    Args:
        circuit: QuantumCircuit instance.

    Returns:
        Backend circuit.
    """
    return self.backend.circuit(circuit)

expectation(values={}, observable=None, state=None, measurement=None, noise=None, mitigation=None, endianness=Endianness.BIG)

Compute expectation using the given backend.

Given an input state \(|\psi_0 \rangle\), a set of variational parameters \(\vec{\theta}\) and the unitary representation of the model \(U(\vec{\theta})\) we return \(\langle \psi_0 | U(\vec{\theta}) | \psi_0 \rangle\).

PARAMETER DESCRIPTION
values

Values dict which contains values for the parameters.

TYPE: dict[str, Tensor] DEFAULT: {}

observable

Observable part of the expectation.

TYPE: list[ConvertedObservable] | ConvertedObservable | None DEFAULT: None

state

Optional input state.

TYPE: Optional[Tensor] DEFAULT: None

measurement

Optional measurement protocol. If None, use exact expectation value with a statevector simulator.

TYPE: Measurements | None DEFAULT: None

noise

A noise model to use.

TYPE: NoiseHandler | None DEFAULT: None

mitigation

A mitigation protocol to use.

TYPE: Mitigations | None DEFAULT: None

endianness

Storage convention for binary information.

TYPE: Endianness DEFAULT: BIG

RAISES DESCRIPTION
ValueError

when no observable is set.

RETURNS DESCRIPTION
Tensor

A torch.Tensor of shape n_batches x n_obs

Source code in qadence/model.py
def expectation(
    self,
    values: dict[str, Tensor] = {},
    observable: list[ConvertedObservable] | ConvertedObservable | None = None,
    state: Optional[Tensor] = None,
    measurement: Measurements | None = None,
    noise: NoiseHandler | None = None,
    mitigation: Mitigations | None = None,
    endianness: Endianness = Endianness.BIG,
) -> Tensor:
    r"""Compute expectation using the given backend.



    Given an input state $|\psi_0 \rangle$,
    a set of variational parameters $\vec{\theta}$
    and the unitary representation of the model $U(\vec{\theta})$
    we return $\langle \psi_0 | U(\vec{\theta}) | \psi_0 \rangle$.

    Arguments:
        values: Values dict which contains values for the parameters.
        observable: Observable part of the expectation.
        state: Optional input state.
        measurement: Optional measurement protocol. If None, use
            exact expectation value with a statevector simulator.
        noise: A noise model to use.
        mitigation: A mitigation protocol to use.
        endianness: Storage convention for binary information.

    Raises:
        ValueError: when no observable is set.

    Returns:
        A torch.Tensor of shape n_batches x n_obs
    """
    if observable is None:
        if self._observable is None:
            raise ValueError(
                "Provide an AbstractBlock as the observable to compute expectation."
                "Either pass a 'native_observable' directly to 'QuantumModel.expectation'"
                "or pass a (non-native) '<class AbstractBlock>' to the 'QuantumModel.__init__'."
            )
        observable = self._observable

    params = self.embedding_fn(self._params, values)
    if measurement is None:
        measurement = self._measurement
    if noise is None:
        noise = self._noise
    else:
        self._noise = noise
    if mitigation is None:
        mitigation = self._mitigation
    return self.backend.expectation(
        circuit=self._circuit,
        observable=observable,
        param_values=params,
        state=state,
        measurement=measurement,
        noise=noise,
        mitigation=mitigation,
        endianness=endianness,
    )

forward(*args, **kwargs)

Calls run method with arguments.

RETURNS DESCRIPTION
Tensor

A torch.Tensor representing output.

TYPE: Tensor

Source code in qadence/model.py
def forward(self, *args: Any, **kwargs: Any) -> Tensor:
    """Calls run method with arguments.

    Returns:
        Tensor: A torch.Tensor representing output.
    """
    return self.run(*args, **kwargs)

load(file_path, as_torch=False, map_location='cpu') classmethod

Load QuantumModel.

PARAMETER DESCRIPTION
file_path

File path to load model from.

TYPE: str | Path

as_torch

Load parameters as torch tensor. Defaults to False.

TYPE: bool DEFAULT: False

map_location

Location for loading. Defaults to "cpu".

TYPE: str | device DEFAULT: 'cpu'

RETURNS DESCRIPTION
QuantumModel

QuantumModel from file_path.

Source code in qadence/model.py
@classmethod
def load(
    cls, file_path: str | Path, as_torch: bool = False, map_location: str | torch.device = "cpu"
) -> QuantumModel:
    """Load QuantumModel.

    Arguments:
        file_path: File path to load model from.
        as_torch: Load parameters as torch tensor. Defaults to False.
        map_location (str | torch.device, optional): Location for loading. Defaults to "cpu".

    Returns:
        QuantumModel from file_path.
    """
    qm_pt = {}
    if isinstance(file_path, str):
        file_path = Path(file_path)
    if os.path.isdir(file_path):
        from qadence.ml_tools.callbacks.saveload import get_latest_checkpoint_name

        file_path = file_path / get_latest_checkpoint_name(file_path, "model")

    try:
        qm_pt = torch.load(file_path, map_location=map_location)
    except Exception as e:
        logger.error(f"Unable to load QuantumModel due to {e}")
    return cls._from_dict(qm_pt, as_torch)

load_params_from_dict(d, strict=True)

Copy parameters from dictionary into this QuantumModel.

Unlike :meth:~qadence.QuantumModel.from_dict, this method does not create a new QuantumModel instance, but rather loads the parameters into the same QuantumModel. The behaviour of this method is similar to :meth:~torch.nn.Module.load_state_dict.

The dictionary is assumed to have the format as saved via :meth:~qadence.QuantumModel.to_dict

PARAMETER DESCRIPTION
d

The dictionary

TYPE: dict

strict

Whether to strictly enforce that the parameter keys in the dictionary and in the model match exactly. Default: True.

TYPE: bool DEFAULT: True

Source code in qadence/model.py
def load_params_from_dict(self, d: dict, strict: bool = True) -> None:
    """Copy parameters from dictionary into this QuantumModel.

    Unlike :meth:`~qadence.QuantumModel.from_dict`, this method does not create a new
    QuantumModel instance, but rather loads the parameters into the same QuantumModel.
    The behaviour of this method is similar to :meth:`~torch.nn.Module.load_state_dict`.

    The dictionary is assumed to have the format as saved via
    :meth:`~qadence.QuantumModel.to_dict`

    Args:
        d (dict): The dictionary
        strict (bool, optional):
            Whether to strictly enforce that the parameter keys in the dictionary and
            in the model match exactly. Default: ``True``.
    """
    param_dict = d["param_dict"]
    missing_keys = set(self._params.keys()) - set(param_dict.keys())
    unexpected_keys = set(param_dict.keys()) - set(self._params.keys())

    if strict:
        error_msgs = []
        if len(unexpected_keys) > 0:
            error_msgs.append(f"Unexpected key(s) in dictionary: {unexpected_keys}")
        if len(missing_keys) > 0:
            error_msgs.append(f"Missing key(s) in dictionary: {missing_keys}")
        if len(error_msgs) > 0:
            errors_string = "\n\t".join(error_msgs)
            raise RuntimeError(
                f"Error(s) loading the parameter dictionary due to: \n\t{errors_string}\n"
                "This error was thrown because the `strict` argument is set `True`."
                "If you don't need the parameter keys of the dictionary to exactly match "
                "the model parameters, set `strict=False`."
            )

    for n, param in param_dict.items():
        try:
            with torch.no_grad():
                self._params[n].copy_(
                    torch.nn.Parameter(param, requires_grad=param.requires_grad)
                )
        except Exception as e:
            logger.warning(f"Unable to load parameter {n} from dictionary due to {e}.")

observable(observable, n_qubits)

Get backend observable.

PARAMETER DESCRIPTION
observable

Observable block.

TYPE: AbstractBlock

n_qubits

Number of qubits

TYPE: int

RETURNS DESCRIPTION
Any

Backend observable.

Source code in qadence/model.py
def observable(self, observable: AbstractBlock, n_qubits: int) -> Any:
    """Get backend observable.

    Args:
        observable: Observable block.
        n_qubits: Number of qubits

    Returns:
        Backend observable.
    """
    return self.backend.observable(observable, n_qubits)

overlap()

Overlap of model.

RAISES DESCRIPTION
NotImplementedError

The overlap method is not implemented for this model.

Source code in qadence/model.py
def overlap(self) -> Tensor:
    """Overlap of model.

    Raises:
        NotImplementedError: The overlap method is not implemented for this model.
    """
    raise NotImplementedError("The overlap method is not implemented for this model.")

reset_vparams(values)

Reset all the variational parameters with a given list of values.

Source code in qadence/model.py
def reset_vparams(self, values: Sequence) -> None:
    """Reset all the variational parameters with a given list of values."""
    current_vparams = OrderedDict({k: v for k, v in self._params.items() if v.requires_grad})

    assert (
        len(values) == self.num_vparams
    ), "Pass an iterable with the values of all variational parameters"
    for i, k in enumerate(current_vparams.keys()):
        current_vparams[k].data = torch.tensor([values[i]])

run(values=None, state=None, endianness=Endianness.BIG)

Run model.

Given an input state \(| \psi_0 \rangle\), a set of variational parameters \(\vec{\theta}\) and the unitary representation of the model \(U(\vec{\theta})\) we return \(U(\vec{\theta}) | \psi_0 \rangle\).

PARAMETER DESCRIPTION
values

Values dict which contains values for the parameters.

TYPE: dict[str, Tensor] DEFAULT: None

state

Optional input state to apply model on.

TYPE: Tensor | None DEFAULT: None

endianness

Storage convention for binary information.

TYPE: Endianness DEFAULT: BIG

RETURNS DESCRIPTION
Tensor

A torch.Tensor representing output.

Source code in qadence/model.py
def run(
    self,
    values: dict[str, Tensor] = None,
    state: Tensor | None = None,
    endianness: Endianness = Endianness.BIG,
) -> Tensor:
    r"""Run model.

    Given an input state $| \psi_0 \rangle$,
    a set of variational parameters $\vec{\theta}$
    and the unitary representation of the model $U(\vec{\theta})$
    we return $U(\vec{\theta}) | \psi_0 \rangle$.

    Arguments:
        values: Values dict which contains values for the parameters.
        state: Optional input state to apply model on.
        endianness: Storage convention for binary information.

    Returns:
        A torch.Tensor representing output.
    """
    if values is None:
        values = {}

    params = self.embedding_fn(self._params, values)

    return self.backend.run(self._circuit, params, state=state, endianness=endianness)

sample(values={}, n_shots=1000, state=None, noise=None, mitigation=None, endianness=Endianness.BIG)

Obtain samples from model.

PARAMETER DESCRIPTION
values

Values dict which contains values for the parameters.

TYPE: dict[str, Tensor] DEFAULT: {}

n_shots

Observable part of the expectation.

TYPE: int DEFAULT: 1000

state

Optional input state to apply model on.

TYPE: Tensor | None DEFAULT: None

noise

A noise model to use.

TYPE: NoiseHandler | None DEFAULT: None

mitigation

A mitigation protocol to use.

TYPE: Mitigations | None DEFAULT: None

endianness

Storage convention for binary information.

TYPE: Endianness DEFAULT: BIG

RETURNS DESCRIPTION
list[Counter]

A list of Counter instances with the sample results.

Source code in qadence/model.py
def sample(
    self,
    values: dict[str, torch.Tensor] = {},
    n_shots: int = 1000,
    state: torch.Tensor | None = None,
    noise: NoiseHandler | None = None,
    mitigation: Mitigations | None = None,
    endianness: Endianness = Endianness.BIG,
) -> list[Counter]:
    """Obtain samples from model.

    Arguments:
        values: Values dict which contains values for the parameters.
        n_shots: Observable part of the expectation.
        state: Optional input state to apply model on.
        noise: A noise model to use.
        mitigation: A mitigation protocol to use.
        endianness: Storage convention for binary information.

    Returns:
        A list of Counter instances with the sample results.
    """
    params = self.embedding_fn(self._params, values)
    if noise is None:
        noise = self._noise
    if mitigation is None:
        mitigation = self._mitigation
    return self.backend.sample(
        self._circuit,
        params,
        n_shots=n_shots,
        state=state,
        noise=noise,
        mitigation=mitigation,
        endianness=endianness,
    )

save(folder, file_name='quantum_model.pt', save_params=True)

Save model.

PARAMETER DESCRIPTION
folder

Folder where model is saved.

TYPE: str | Path

file_name

File name for saving model. Defaults to "quantum_model.pt".

TYPE: str DEFAULT: 'quantum_model.pt'

save_params

Save parameters if True. Defaults to True.

TYPE: bool DEFAULT: True

RAISES DESCRIPTION
FileNotFoundError

If folder is not a directory.

Source code in qadence/model.py
def save(
    self, folder: str | Path, file_name: str = "quantum_model.pt", save_params: bool = True
) -> None:
    """Save model.

    Arguments:
        folder: Folder where model is saved.
        file_name: File name for saving model. Defaults to "quantum_model.pt".
        save_params: Save parameters if True. Defaults to True.

    Raises:
        FileNotFoundError: If folder is not a directory.
    """
    if not os.path.isdir(folder):
        raise FileNotFoundError
    try:
        torch.save(self._to_dict(save_params), folder / Path(file_name))
    except Exception as e:
        logger.error(f"Unable to write QuantumModel to disk due to {e}")

to(*args, **kwargs)

Conversion method for device or types.

RETURNS DESCRIPTION
QuantumModel

QuantumModel with conversions.

Source code in qadence/model.py
def to(self, *args: Any, **kwargs: Any) -> QuantumModel:
    """Conversion method for device or types.

    Returns:
        QuantumModel with conversions.
    """
    from pyqtorch import QuantumCircuit as PyQCircuit

    try:
        if isinstance(self._circuit.native, PyQCircuit):
            self._circuit.native = self._circuit.native.to(*args, **kwargs)
            if self._observable is not None:
                if isinstance(self._observable, ConvertedObservable):
                    self._observable.native = self._observable.native.to(*args, **kwargs)
                elif isinstance(self._observable, list):
                    for obs in self._observable:
                        obs.native = obs.native.to(*args, **kwargs)
            self._params = self._params.to(
                device=self._circuit.native.device,
                dtype=(
                    torch.float64
                    if self._circuit.native.dtype == torch.cdouble
                    else torch.float32
                ),
            )
            logger.debug(f"Moved {self} to {args}, {kwargs}.")
        else:
            logger.debug("QuantumModel.to only supports pyqtorch.QuantumCircuits.")
    except Exception as e:
        logger.warning(f"Unable to move {self} to {args}, {kwargs} due to {e}.")
    return self