Skip to content

Quantum models

QuantumModel(circuit, observable=None, backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, measurement=None, noise=None, mitigation=None, configuration=None)

Bases: Module

The central class of qadence that executes QuantumCircuits and make them differentiable.

This class should be used as base class for any new quantum model supported in the qadence framework for information on the implementation of custom models see here.

Example:

import torch
from qadence import QuantumModel, QuantumCircuit, RX, RY, Z, PI, chain, kron
from qadence import FeatureParameter, VariationalParameter

theta = VariationalParameter("theta")
phi = FeatureParameter("phi")

block = chain(
    kron(RX(0, theta), RY(1, theta)),
    kron(RX(0, phi), RY(1, phi)),
)

circuit = QuantumCircuit(2, block)

observable = Z(0) + Z(1)

model = QuantumModel(circuit, observable)
values = {"phi": torch.tensor([PI, PI/2]), "theta": torch.tensor([PI, PI/2])}

wf = model.run(values)
xs = model.sample(values, n_shots=100)
ex = model.expectation(values)
print(wf)
print(xs)
print(ex)
tensor([[ 1.0000e+00+0.0000e+00j, -1.2246e-16+0.0000e+00j,
          0.0000e+00+1.2246e-16j,  0.0000e+00-1.4998e-32j],
        [ 4.9304e-32+0.0000e+00j,  2.2204e-16+0.0000e+00j,
          0.0000e+00-2.2204e-16j,  0.0000e+00-1.0000e+00j]])
[OrderedCounter({'00': 100}), OrderedCounter({'11': 100})]
tensor([[ 2.],
        [-2.]], requires_grad=True)
```

Initialize a generic QuantumModel instance.

PARAMETER DESCRIPTION
circuit

The circuit that is executed.

TYPE: QuantumCircuit

observable

Optional observable(s) that are used only in the expectation method. You can also provide observables on the fly to the expectation call directly.

TYPE: list[AbstractBlock] | AbstractBlock | None DEFAULT: None

backend

A backend for circuit execution.

TYPE: BackendName | str DEFAULT: PYQTORCH

diff_mode

A differentiability mode. Parameter shift based modes work on all backends. AD based modes only on PyTorch based backends.

TYPE: DiffMode DEFAULT: AD

measurement

Optional measurement protocol. If None, use exact expectation value with a statevector simulator.

TYPE: Measurements | None DEFAULT: None

configuration

Configuration for the backend.

TYPE: BackendConfiguration | dict | None DEFAULT: None

noise

A noise model to use.

TYPE: NoiseHandler | None DEFAULT: None

RAISES DESCRIPTION
ValueError

if the diff_mode argument is set to None

Source code in qadence/model.py
def __init__(
    self,
    circuit: QuantumCircuit,
    observable: list[AbstractBlock] | AbstractBlock | None = None,
    backend: BackendName | str = BackendName.PYQTORCH,
    diff_mode: DiffMode = DiffMode.AD,
    measurement: Measurements | None = None,
    noise: NoiseHandler | None = None,
    mitigation: Mitigations | None = None,
    configuration: BackendConfiguration | dict | None = None,
):
    """Initialize a generic QuantumModel instance.

    Arguments:
        circuit: The circuit that is executed.
        observable: Optional observable(s) that are used only in the `expectation` method. You
            can also provide observables on the fly to the expectation call directly.
        backend: A backend for circuit execution.
        diff_mode: A differentiability mode. Parameter shift based modes work on all backends.
            AD based modes only on PyTorch based backends.
        measurement: Optional measurement protocol. If None, use
            exact expectation value with a statevector simulator.
        configuration: Configuration for the backend.
        noise: A noise model to use.

    Raises:
        ValueError: if the `diff_mode` argument is set to None
    """
    super().__init__()

    if not isinstance(circuit, QuantumCircuit):
        TypeError(
            f"The circuit should be of type '<class QuantumCircuit>'. Got {type(circuit)}."
        )

    if diff_mode is None:
        raise ValueError("`diff_mode` cannot be `None` in a `QuantumModel`.")

    self.backend = backend_factory(
        backend=backend, diff_mode=diff_mode, configuration=configuration
    )

    if isinstance(observable, list) or observable is None:
        observable = observable
    else:
        observable = [observable]

    def _is_feature_param(p: Parameter) -> bool:
        return not p.trainable and not p.is_number

    if observable is None:
        self.inputs = list(filter(_is_feature_param, circuit.unique_parameters))
    else:
        uparams = unique_parameters(chain(circuit.block, *observable))
        self.inputs = list(filter(_is_feature_param, uparams))

    conv = self.backend.convert(circuit, observable)
    self.embedding_fn = conv.embedding_fn
    self._circuit = conv.circuit
    self._observable = conv.observable
    self._backend_name = backend
    self._diff_mode = diff_mode
    self._measurement = measurement
    self._noise = noise
    self._mitigation = mitigation
    self._params = nn.ParameterDict(
        {
            str(key): nn.Parameter(val, requires_grad=val.requires_grad)
            for key, val in conv.params.items()
        }
    )

device: torch.device property

Get device.

RETURNS DESCRIPTION
device

torch.device

in_features: int property

Number of inputs.

num_vparams: int property

The number of variational parameters.

out_features: int | None property

Number of outputs.

vals_vparams: Tensor property

Dictionary with parameters which are actually updated during optimization.

vparams: OrderedDict property

Variational parameters.

assign_parameters(values)

Return the final, assigned circuit that is used in e.g. backend.run.

PARAMETER DESCRIPTION
values

Values dict which contains values for the parameters.

TYPE: dict[str, Tensor]

RETURNS DESCRIPTION
Any

Final, assigned circuit that is used in e.g. backend.run

Source code in qadence/model.py
def assign_parameters(self, values: dict[str, Tensor]) -> Any:
    """Return the final, assigned circuit that is used in e.g. `backend.run`.

    Arguments:
        values: Values dict which contains values for the parameters.

    Returns:
        Final, assigned circuit that is used in e.g. `backend.run`
    """
    params = self.embedding_fn(self._params, values)
    return self.backend.assign_parameters(self._circuit, params)

circuit(circuit)

Get backend-converted circuit.

PARAMETER DESCRIPTION
circuit

QuantumCircuit instance.

TYPE: QuantumCircuit

RETURNS DESCRIPTION
ConvertedCircuit

Backend circuit.

Source code in qadence/model.py
def circuit(self, circuit: QuantumCircuit) -> ConvertedCircuit:
    """Get backend-converted circuit.

    Args:
        circuit: QuantumCircuit instance.

    Returns:
        Backend circuit.
    """
    return self.backend.circuit(circuit)

expectation(values={}, observable=None, state=None, measurement=None, noise=None, mitigation=None, endianness=Endianness.BIG)

Compute expectation using the given backend.

Given an input state \(|\psi_0 \rangle\), a set of variational parameters \(\vec{\theta}\) and the unitary representation of the model \(U(\vec{\theta})\) we return \(\langle \psi_0 | U(\vec{\theta}) | \psi_0 \rangle\).

PARAMETER DESCRIPTION
values

Values dict which contains values for the parameters.

TYPE: dict[str, Tensor] DEFAULT: {}

observable

Observable part of the expectation.

TYPE: list[ConvertedObservable] | ConvertedObservable | None DEFAULT: None

state

Optional input state.

TYPE: Optional[Tensor] DEFAULT: None

measurement

Optional measurement protocol. If None, use exact expectation value with a statevector simulator.

TYPE: Measurements | None DEFAULT: None

noise

A noise model to use.

TYPE: NoiseHandler | None DEFAULT: None

mitigation

A mitigation protocol to use.

TYPE: Mitigations | None DEFAULT: None

endianness

Storage convention for binary information.

TYPE: Endianness DEFAULT: BIG

RAISES DESCRIPTION
ValueError

when no observable is set.

RETURNS DESCRIPTION
Tensor

A torch.Tensor of shape n_batches x n_obs

Source code in qadence/model.py
def expectation(
    self,
    values: dict[str, Tensor] = {},
    observable: list[ConvertedObservable] | ConvertedObservable | None = None,
    state: Optional[Tensor] = None,
    measurement: Measurements | None = None,
    noise: NoiseHandler | None = None,
    mitigation: Mitigations | None = None,
    endianness: Endianness = Endianness.BIG,
) -> Tensor:
    r"""Compute expectation using the given backend.



    Given an input state $|\psi_0 \rangle$,
    a set of variational parameters $\vec{\theta}$
    and the unitary representation of the model $U(\vec{\theta})$
    we return $\langle \psi_0 | U(\vec{\theta}) | \psi_0 \rangle$.

    Arguments:
        values: Values dict which contains values for the parameters.
        observable: Observable part of the expectation.
        state: Optional input state.
        measurement: Optional measurement protocol. If None, use
            exact expectation value with a statevector simulator.
        noise: A noise model to use.
        mitigation: A mitigation protocol to use.
        endianness: Storage convention for binary information.

    Raises:
        ValueError: when no observable is set.

    Returns:
        A torch.Tensor of shape n_batches x n_obs
    """
    if observable is None:
        if self._observable is None:
            raise ValueError(
                "Provide an AbstractBlock as the observable to compute expectation."
                "Either pass a 'native_observable' directly to 'QuantumModel.expectation'"
                "or pass a (non-native) '<class AbstractBlock>' to the 'QuantumModel.__init__'."
            )
        observable = self._observable

    params = self.embedding_fn(self._params, values)
    if measurement is None:
        measurement = self._measurement
    if noise is None:
        noise = self._noise
    else:
        self._noise = noise
    if mitigation is None:
        mitigation = self._mitigation
    return self.backend.expectation(
        circuit=self._circuit,
        observable=observable,
        param_values=params,
        state=state,
        measurement=measurement,
        noise=noise,
        mitigation=mitigation,
        endianness=endianness,
    )

forward(*args, **kwargs)

Calls run method with arguments.

RETURNS DESCRIPTION
Tensor

A torch.Tensor representing output.

TYPE: Tensor

Source code in qadence/model.py
def forward(self, *args: Any, **kwargs: Any) -> Tensor:
    """Calls run method with arguments.

    Returns:
        Tensor: A torch.Tensor representing output.
    """
    return self.run(*args, **kwargs)

load(file_path, as_torch=False, map_location='cpu') classmethod

Load QuantumModel.

PARAMETER DESCRIPTION
file_path

File path to load model from.

TYPE: str | Path

as_torch

Load parameters as torch tensor. Defaults to False.

TYPE: bool DEFAULT: False

map_location

Location for loading. Defaults to "cpu".

TYPE: str | device DEFAULT: 'cpu'

RETURNS DESCRIPTION
QuantumModel

QuantumModel from file_path.

Source code in qadence/model.py
@classmethod
def load(
    cls, file_path: str | Path, as_torch: bool = False, map_location: str | torch.device = "cpu"
) -> QuantumModel:
    """Load QuantumModel.

    Arguments:
        file_path: File path to load model from.
        as_torch: Load parameters as torch tensor. Defaults to False.
        map_location (str | torch.device, optional): Location for loading. Defaults to "cpu".

    Returns:
        QuantumModel from file_path.
    """
    qm_pt = {}
    if isinstance(file_path, str):
        file_path = Path(file_path)
    if os.path.isdir(file_path):
        from qadence.ml_tools.callbacks.saveload import get_latest_checkpoint_name

        file_path = file_path / get_latest_checkpoint_name(file_path, "model")

    try:
        qm_pt = torch.load(file_path, map_location=map_location)
    except Exception as e:
        logger.error(f"Unable to load QuantumModel due to {e}")
    return cls._from_dict(qm_pt, as_torch)

load_params_from_dict(d, strict=True)

Copy parameters from dictionary into this QuantumModel.

Unlike :meth:~qadence.QuantumModel.from_dict, this method does not create a new QuantumModel instance, but rather loads the parameters into the same QuantumModel. The behaviour of this method is similar to :meth:~torch.nn.Module.load_state_dict.

The dictionary is assumed to have the format as saved via :meth:~qadence.QuantumModel.to_dict

PARAMETER DESCRIPTION
d

The dictionary

TYPE: dict

strict

Whether to strictly enforce that the parameter keys in the dictionary and in the model match exactly. Default: True.

TYPE: bool DEFAULT: True

Source code in qadence/model.py
def load_params_from_dict(self, d: dict, strict: bool = True) -> None:
    """Copy parameters from dictionary into this QuantumModel.

    Unlike :meth:`~qadence.QuantumModel.from_dict`, this method does not create a new
    QuantumModel instance, but rather loads the parameters into the same QuantumModel.
    The behaviour of this method is similar to :meth:`~torch.nn.Module.load_state_dict`.

    The dictionary is assumed to have the format as saved via
    :meth:`~qadence.QuantumModel.to_dict`

    Args:
        d (dict): The dictionary
        strict (bool, optional):
            Whether to strictly enforce that the parameter keys in the dictionary and
            in the model match exactly. Default: ``True``.
    """
    param_dict = d["param_dict"]
    missing_keys = set(self._params.keys()) - set(param_dict.keys())
    unexpected_keys = set(param_dict.keys()) - set(self._params.keys())

    if strict:
        error_msgs = []
        if len(unexpected_keys) > 0:
            error_msgs.append(f"Unexpected key(s) in dictionary: {unexpected_keys}")
        if len(missing_keys) > 0:
            error_msgs.append(f"Missing key(s) in dictionary: {missing_keys}")
        if len(error_msgs) > 0:
            errors_string = "\n\t".join(error_msgs)
            raise RuntimeError(
                f"Error(s) loading the parameter dictionary due to: \n\t{errors_string}\n"
                "This error was thrown because the `strict` argument is set `True`."
                "If you don't need the parameter keys of the dictionary to exactly match "
                "the model parameters, set `strict=False`."
            )

    for n, param in param_dict.items():
        try:
            with torch.no_grad():
                self._params[n].copy_(
                    torch.nn.Parameter(param, requires_grad=param.requires_grad)
                )
        except Exception as e:
            logger.warning(f"Unable to load parameter {n} from dictionary due to {e}.")

observable(observable, n_qubits)

Get backend observable.

PARAMETER DESCRIPTION
observable

Observable block.

TYPE: AbstractBlock

n_qubits

Number of qubits

TYPE: int

RETURNS DESCRIPTION
Any

Backend observable.

Source code in qadence/model.py
def observable(self, observable: AbstractBlock, n_qubits: int) -> Any:
    """Get backend observable.

    Args:
        observable: Observable block.
        n_qubits: Number of qubits

    Returns:
        Backend observable.
    """
    return self.backend.observable(observable, n_qubits)

overlap()

Overlap of model.

RAISES DESCRIPTION
NotImplementedError

The overlap method is not implemented for this model.

Source code in qadence/model.py
def overlap(self) -> Tensor:
    """Overlap of model.

    Raises:
        NotImplementedError: The overlap method is not implemented for this model.
    """
    raise NotImplementedError("The overlap method is not implemented for this model.")

reset_vparams(values)

Reset all the variational parameters with a given list of values.

Source code in qadence/model.py
def reset_vparams(self, values: Sequence) -> None:
    """Reset all the variational parameters with a given list of values."""
    current_vparams = OrderedDict({k: v for k, v in self._params.items() if v.requires_grad})

    assert (
        len(values) == self.num_vparams
    ), "Pass an iterable with the values of all variational parameters"
    for i, k in enumerate(current_vparams.keys()):
        current_vparams[k].data = torch.tensor([values[i]])

run(values=None, state=None, endianness=Endianness.BIG)

Run model.

Given an input state \(| \psi_0 \rangle\), a set of variational parameters \(\vec{\theta}\) and the unitary representation of the model \(U(\vec{\theta})\) we return \(U(\vec{\theta}) | \psi_0 \rangle\).

PARAMETER DESCRIPTION
values

Values dict which contains values for the parameters.

TYPE: dict[str, Tensor] DEFAULT: None

state

Optional input state to apply model on.

TYPE: Tensor | None DEFAULT: None

endianness

Storage convention for binary information.

TYPE: Endianness DEFAULT: BIG

RETURNS DESCRIPTION
Tensor

A torch.Tensor representing output.

Source code in qadence/model.py
def run(
    self,
    values: dict[str, Tensor] = None,
    state: Tensor | None = None,
    endianness: Endianness = Endianness.BIG,
) -> Tensor:
    r"""Run model.

    Given an input state $| \psi_0 \rangle$,
    a set of variational parameters $\vec{\theta}$
    and the unitary representation of the model $U(\vec{\theta})$
    we return $U(\vec{\theta}) | \psi_0 \rangle$.

    Arguments:
        values: Values dict which contains values for the parameters.
        state: Optional input state to apply model on.
        endianness: Storage convention for binary information.

    Returns:
        A torch.Tensor representing output.
    """
    if values is None:
        values = {}

    params = self.embedding_fn(self._params, values)

    return self.backend.run(self._circuit, params, state=state, endianness=endianness)

sample(values={}, n_shots=1000, state=None, noise=None, mitigation=None, endianness=Endianness.BIG)

Obtain samples from model.

PARAMETER DESCRIPTION
values

Values dict which contains values for the parameters.

TYPE: dict[str, Tensor] DEFAULT: {}

n_shots

Observable part of the expectation.

TYPE: int DEFAULT: 1000

state

Optional input state to apply model on.

TYPE: Tensor | None DEFAULT: None

noise

A noise model to use.

TYPE: NoiseHandler | None DEFAULT: None

mitigation

A mitigation protocol to use.

TYPE: Mitigations | None DEFAULT: None

endianness

Storage convention for binary information.

TYPE: Endianness DEFAULT: BIG

RETURNS DESCRIPTION
list[Counter]

A list of Counter instances with the sample results.

Source code in qadence/model.py
def sample(
    self,
    values: dict[str, torch.Tensor] = {},
    n_shots: int = 1000,
    state: torch.Tensor | None = None,
    noise: NoiseHandler | None = None,
    mitigation: Mitigations | None = None,
    endianness: Endianness = Endianness.BIG,
) -> list[Counter]:
    """Obtain samples from model.

    Arguments:
        values: Values dict which contains values for the parameters.
        n_shots: Observable part of the expectation.
        state: Optional input state to apply model on.
        noise: A noise model to use.
        mitigation: A mitigation protocol to use.
        endianness: Storage convention for binary information.

    Returns:
        A list of Counter instances with the sample results.
    """
    params = self.embedding_fn(self._params, values)
    if noise is None:
        noise = self._noise
    if mitigation is None:
        mitigation = self._mitigation
    return self.backend.sample(
        self._circuit,
        params,
        n_shots=n_shots,
        state=state,
        noise=noise,
        mitigation=mitigation,
        endianness=endianness,
    )

save(folder, file_name='quantum_model.pt', save_params=True)

Save model.

PARAMETER DESCRIPTION
folder

Folder where model is saved.

TYPE: str | Path

file_name

File name for saving model. Defaults to "quantum_model.pt".

TYPE: str DEFAULT: 'quantum_model.pt'

save_params

Save parameters if True. Defaults to True.

TYPE: bool DEFAULT: True

RAISES DESCRIPTION
FileNotFoundError

If folder is not a directory.

Source code in qadence/model.py
def save(
    self, folder: str | Path, file_name: str = "quantum_model.pt", save_params: bool = True
) -> None:
    """Save model.

    Arguments:
        folder: Folder where model is saved.
        file_name: File name for saving model. Defaults to "quantum_model.pt".
        save_params: Save parameters if True. Defaults to True.

    Raises:
        FileNotFoundError: If folder is not a directory.
    """
    if not os.path.isdir(folder):
        raise FileNotFoundError
    try:
        torch.save(self._to_dict(save_params), folder / Path(file_name))
    except Exception as e:
        logger.error(f"Unable to write QuantumModel to disk due to {e}")

to(*args, **kwargs)

Conversion method for device or types.

RETURNS DESCRIPTION
QuantumModel

QuantumModel with conversions.

Source code in qadence/model.py
def to(self, *args: Any, **kwargs: Any) -> QuantumModel:
    """Conversion method for device or types.

    Returns:
        QuantumModel with conversions.
    """
    from pyqtorch import QuantumCircuit as PyQCircuit

    try:
        if isinstance(self._circuit.native, PyQCircuit):
            self._circuit.native = self._circuit.native.to(*args, **kwargs)
            if self._observable is not None:
                if isinstance(self._observable, ConvertedObservable):
                    self._observable.native = self._observable.native.to(*args, **kwargs)
                elif isinstance(self._observable, list):
                    for obs in self._observable:
                        obs.native = obs.native.to(*args, **kwargs)
            self._params = self._params.to(
                device=self._circuit.native.device,
                dtype=(
                    torch.float64
                    if self._circuit.native.dtype == torch.cdouble
                    else torch.float32
                ),
            )
            logger.debug(f"Moved {self} to {args}, {kwargs}.")
        else:
            logger.debug("QuantumModel.to only supports pyqtorch.QuantumCircuits.")
    except Exception as e:
        logger.warning(f"Unable to move {self} to {args}, {kwargs} due to {e}.")
    return self

Bases: QuantumModel

Quantum neural network model for n-dimensional inputs.

Examples:

import torch
from qadence import QuantumCircuit, QNN, Z
from qadence import hea, feature_map, hamiltonian_factory, kron

# create the circuit
n_qubits, depth = 2, 4
fm = kron(
    feature_map(1, support=(0,), param="x"),
    feature_map(1, support=(1,), param="y")
)
ansatz = hea(n_qubits=n_qubits, depth=depth)
circuit = QuantumCircuit(n_qubits, fm, ansatz)
obs_base = hamiltonian_factory(n_qubits, detuning=Z)

# the QNN will yield two outputs
obs = [2.0 * obs_base, 4.0 * obs_base]

# initialize and use the model
qnn = QNN(circuit, obs, inputs=["x", "y"])
y = qnn(torch.rand(3, 2))
tensor([[0.3483, 0.6966],
        [0.6764, 1.3527],
        [0.1195, 0.2390]], grad_fn=<CatBackward0>)

Initialize the QNN.

The number of inputs is determined by the feature parameters in the input quantum circuit while the number of outputs is determined by how many observables are provided as input

PARAMETER DESCRIPTION
circuit

The quantum circuit to use for the QNN.

TYPE: QuantumCircuit

observable

The observable.

TYPE: list[AbstractBlock] | AbstractBlock

backend

The chosen quantum backend.

TYPE: BackendName DEFAULT: PYQTORCH

diff_mode

The differentiation engine to use. Choices 'gpsr' or 'ad'.

TYPE: DiffMode DEFAULT: AD

measurement

optional measurement protocol. If None, use exact expectation value with a statevector simulator

TYPE: Measurements | None DEFAULT: None

noise

A noise model to use.

TYPE: NoiseHandler | None DEFAULT: None

configuration

optional configuration for the backend

TYPE: BackendConfiguration | dict | None DEFAULT: None

inputs

List that indicates the order of variables of the tensors that are passed to the model. Given input tensors xs = torch.rand(batch_size, input_size:=2) a QNN with inputs=["t", "x"] will assign t, x = xs[:,0], xs[:,1].

TYPE: list[Basic | str] | None DEFAULT: None

input_diff_mode

The differentiation mode for the input tensor.

TYPE: InputDiffMode | str DEFAULT: AD

Source code in qadence/ml_tools/models.py
def __init__(
    self,
    circuit: QuantumCircuit,
    observable: list[AbstractBlock] | AbstractBlock,
    backend: BackendName = BackendName.PYQTORCH,
    diff_mode: DiffMode = DiffMode.AD,
    measurement: Measurements | None = None,
    noise: NoiseHandler | None = None,
    configuration: BackendConfiguration | dict | None = None,
    inputs: list[sympy.Basic | str] | None = None,
    input_diff_mode: InputDiffMode | str = InputDiffMode.AD,
):
    """Initialize the QNN.

    The number of inputs is determined by the feature parameters in the input
    quantum circuit while the number of outputs is determined by how many
    observables are provided as input

    Args:
        circuit: The quantum circuit to use for the QNN.
        observable: The observable.
        backend: The chosen quantum backend.
        diff_mode: The differentiation engine to use. Choices 'gpsr' or 'ad'.
        measurement: optional measurement protocol. If None,
            use exact expectation value with a statevector simulator
        noise: A noise model to use.
        configuration: optional configuration for the backend
        inputs: List that indicates the order of variables of the tensors that are passed
            to the model. Given input tensors `xs = torch.rand(batch_size, input_size:=2)` a QNN
            with `inputs=["t", "x"]` will assign `t, x = xs[:,0], xs[:,1]`.
        input_diff_mode: The differentiation mode for the input tensor.
    """
    super().__init__(
        circuit,
        observable=observable,
        backend=backend,
        diff_mode=diff_mode,
        measurement=measurement,
        configuration=configuration,
        noise=noise,
    )
    if self._observable is None:
        raise ValueError("You need to provide at least one observable in the QNN constructor")
    if (inputs is not None) and (len(self.inputs) == len(inputs)):
        self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in inputs]  # type: ignore[union-attr]
    elif (inputs is None) and len(self.inputs) <= 1:
        self.inputs = [sympy.symbols(x) if isinstance(x, str) else x for x in self.inputs]  # type: ignore[union-attr]
    else:
        raise ValueError(
            """
            Your QNN has more than one input. Please provide a list of inputs in the order of
            your tensor domain. For example, if you want to pass
            `xs = torch.rand(batch_size, input_size:=3)` to you QNN, where
            ```
            t = x[:,0]
            x = x[:,1]
            y = x[:,2]
            ```
            you have to specify
            ```
            QNN(circuit, observable, inputs=["t", "x", "y"])
            ```
            You can also pass a list of sympy symbols.
        """
        )
    self.format_to_dict = format_to_dict_fn(self.inputs)  # type: ignore[arg-type]
    self.input_diff_mode = InputDiffMode(input_diff_mode)
    if self.input_diff_mode == InputDiffMode.FD:
        from qadence.backends.utils import finitediff

        self.__derivative = finitediff
    elif self.input_diff_mode == InputDiffMode.AD:
        self.__derivative = _torch_derivative  # type: ignore[assignment]
    else:
        raise ValueError(f"Unkown forward diff mode: {self.input_diff_mode}")

forward(values=None, state=None, measurement=None, noise=None, endianness=Endianness.BIG)

Forward pass of the model.

This returns the (differentiable) expectation value of the given observable operator defined in the constructor. Differently from the base QuantumModel class, the QNN accepts also a tensor as input for the forward pass. The tensor is expected to have shape: n_batches x in_features where n_batches is the number of data points and in_features is the dimensionality of the problem

The output of the forward pass is the expectation value of the input observable(s). If a single observable is given, the output shape is n_batches while if multiple observables are given the output shape is instead n_batches x n_observables

PARAMETER DESCRIPTION
values

the values of the feature parameters

TYPE: dict[str, Tensor] | Tensor DEFAULT: None

state

Initial state.

TYPE: Tensor | None DEFAULT: None

measurement

optional measurement protocol. If None, use exact expectation value with a statevector simulator

TYPE: Measurements | None DEFAULT: None

noise

A noise model to use.

TYPE: NoiseHandler | None DEFAULT: None

endianness

Endianness of the resulting bit strings.

TYPE: Endianness DEFAULT: BIG

RETURNS DESCRIPTION
Tensor

a tensor with the expectation value of the observables passed in the constructor of the model

TYPE: Tensor

Source code in qadence/ml_tools/models.py
def forward(
    self,
    values: dict[str, Tensor] | Tensor = None,
    state: Tensor | None = None,
    measurement: Measurements | None = None,
    noise: NoiseHandler | None = None,
    endianness: Endianness = Endianness.BIG,
) -> Tensor:
    """Forward pass of the model.

    This returns the (differentiable) expectation value of the given observable
    operator defined in the constructor. Differently from the base QuantumModel
    class, the QNN accepts also a tensor as input for the forward pass. The
    tensor is expected to have shape: `n_batches x in_features` where `n_batches`
    is the number of data points and `in_features` is the dimensionality of the problem

    The output of the forward pass is the expectation value of the input
    observable(s). If a single observable is given, the output shape is
    `n_batches` while if multiple observables are given the output shape
    is instead `n_batches x n_observables`

    Args:
        values: the values of the feature parameters
        state: Initial state.
        measurement: optional measurement protocol. If None,
            use exact expectation value with a statevector simulator
        noise: A noise model to use.
        endianness: Endianness of the resulting bit strings.

    Returns:
        Tensor: a tensor with the expectation value of the observables passed
            in the constructor of the model
    """
    return self.expectation(
        values, state=state, measurement=measurement, noise=noise, endianness=endianness
    )

from_configs(register, obs_config, fm_config=FeatureMapConfig(), ansatz_config=AnsatzConfig(), backend=BackendName.PYQTORCH, diff_mode=DiffMode.AD, measurement=None, noise=None, configuration=None, input_diff_mode=InputDiffMode.AD) classmethod

Create a QNN from a set of configurations.

PARAMETER DESCRIPTION
register

The number of qubits or a register object.

TYPE: int | Register

obs_config

The configuration(s) for the observable(s).

TYPE: list[ObservableConfig] | ObservableConfig

fm_config

The configuration for the feature map. Defaults to no feature encoding block.

TYPE: FeatureMapConfig DEFAULT: FeatureMapConfig()

ansatz_config

The configuration for the ansatz. Defaults to a single layer of hardware efficient ansatz.

TYPE: AnsatzConfig DEFAULT: AnsatzConfig()

backend

The chosen quantum backend.

TYPE: BackendName DEFAULT: PYQTORCH

diff_mode

The differentiation engine to use. Choices are 'gpsr' or 'ad'.

TYPE: DiffMode DEFAULT: AD

measurement

Optional measurement protocol. If None, use exact expectation value with a statevector simulator.

TYPE: Measurements DEFAULT: None

noise

A noise model to use.

TYPE: Noise DEFAULT: None

configuration

Optional backend configuration.

TYPE: BackendConfiguration | dict DEFAULT: None

input_diff_mode

The differentiation mode for the input tensor.

TYPE: InputDiffMode DEFAULT: AD

RETURNS DESCRIPTION
QNN

A QNN object.

RAISES DESCRIPTION
ValueError

If the observable configuration is not provided.

Example:

import torch
from qadence.ml_tools.config import AnsatzConfig, FeatureMapConfig
from qadence.ml_tools import QNN
from qadence.constructors import ObservableConfig
from qadence.operations import Z
from qadence.types import (
    AnsatzType, BackendName, BasisSet, ObservableTransform, ReuploadScaling, Strategy
)

register = 4
obs_config = ObservableConfig(
    detuning=Z,
    scale=5.0,
    shift=0.0,
    transformation_type=ObservableTransform.SCALE,
    trainable_transform=None,
)
fm_config = FeatureMapConfig(
    num_features=2,
    inputs=["x", "y"],
    basis_set=BasisSet.FOURIER,
    reupload_scaling=ReuploadScaling.CONSTANT,
    feature_range={
        "x": (-1.0, 1.0),
        "y": (0.0, 1.0),
    },
)
ansatz_config = AnsatzConfig(
    depth=2,
    ansatz_type=AnsatzType.HEA,
    ansatz_strategy=Strategy.DIGITAL,
)

qnn = QNN.from_configs(
    register, obs_config, fm_config, ansatz_config, backend=BackendName.PYQTORCH
)

x = torch.rand(2, 2)
y = qnn(x)
tensor([[3.7460],
        [3.6527]], grad_fn=<CatBackward0>)

Source code in qadence/ml_tools/models.py
@classmethod
def from_configs(
    cls,
    register: int | Register,
    obs_config: Any,
    fm_config: Any = FeatureMapConfig(),
    ansatz_config: Any = AnsatzConfig(),
    backend: BackendName = BackendName.PYQTORCH,
    diff_mode: DiffMode = DiffMode.AD,
    measurement: Measurements | None = None,
    noise: NoiseHandler | None = None,
    configuration: BackendConfiguration | dict | None = None,
    input_diff_mode: InputDiffMode | str = InputDiffMode.AD,
) -> QNN:
    """Create a QNN from a set of configurations.

    Args:
        register (int | Register): The number of qubits or a register object.
        obs_config (list[ObservableConfig] | ObservableConfig): The configuration(s)
            for the observable(s).
        fm_config (FeatureMapConfig): The configuration for the feature map.
            Defaults to no feature encoding block.
        ansatz_config (AnsatzConfig): The configuration for the ansatz.
            Defaults to a single layer of hardware efficient ansatz.
        backend (BackendName): The chosen quantum backend.
        diff_mode (DiffMode): The differentiation engine to use. Choices are
            'gpsr' or 'ad'.
        measurement (Measurements): Optional measurement protocol. If None,
            use exact expectation value with a statevector simulator.
        noise (Noise): A noise model to use.
        configuration (BackendConfiguration | dict): Optional backend configuration.
        input_diff_mode (InputDiffMode): The differentiation mode for the input tensor.

    Returns:
        A QNN object.

    Raises:
        ValueError: If the observable configuration is not provided.

    Example:
    ```python exec="on" source="material-block" result="json"
    import torch
    from qadence.ml_tools.config import AnsatzConfig, FeatureMapConfig
    from qadence.ml_tools import QNN
    from qadence.constructors import ObservableConfig
    from qadence.operations import Z
    from qadence.types import (
        AnsatzType, BackendName, BasisSet, ObservableTransform, ReuploadScaling, Strategy
    )

    register = 4
    obs_config = ObservableConfig(
        detuning=Z,
        scale=5.0,
        shift=0.0,
        transformation_type=ObservableTransform.SCALE,
        trainable_transform=None,
    )
    fm_config = FeatureMapConfig(
        num_features=2,
        inputs=["x", "y"],
        basis_set=BasisSet.FOURIER,
        reupload_scaling=ReuploadScaling.CONSTANT,
        feature_range={
            "x": (-1.0, 1.0),
            "y": (0.0, 1.0),
        },
    )
    ansatz_config = AnsatzConfig(
        depth=2,
        ansatz_type=AnsatzType.HEA,
        ansatz_strategy=Strategy.DIGITAL,
    )

    qnn = QNN.from_configs(
        register, obs_config, fm_config, ansatz_config, backend=BackendName.PYQTORCH
    )

    x = torch.rand(2, 2)
    y = qnn(x)
    print(str(y)) # markdown-exec: hide
    ```
    """
    from .constructors import build_qnn_from_configs

    return build_qnn_from_configs(
        register=register,
        observable_config=obs_config,
        fm_config=fm_config,
        ansatz_config=ansatz_config,
        backend=backend,
        diff_mode=diff_mode,
        measurement=measurement,
        noise=noise,
        configuration=configuration,
        input_diff_mode=input_diff_mode,
    )