Skip to content

Parameters

Parameters

ParamMap(**kwargs)

Connects UUIDs of parameters to their expressions and names.

This class is not user-facing and only needed for more complex block definitions. It provides convenient access to expressions/UUIDs/names needed in different backends.

PARAMETER DESCRIPTION
kwargs

Parameters.

TYPE: str | TNumber | Tensor | Basic | Parameter DEFAULT: {}

Example:

import sympy
from qadence.parameters import ParamMap

(x,y) = sympy.symbols("x y")
ps = ParamMap(omega=2.0, duration=x+y)

print(f"{ps.names() = }")
print(f"{ps.expressions() = }")
print(f"{ps.uuids() = }")
ps.names() = dict_keys(['omega', 'duration'])
ps.expressions() = dict_values([2.00000000000000, x + y])
ps.uuids() = dict_keys(['01619f82-ac6a-4b41-8632-2d44525b992b', 'b34ccdfc-b1d0-46c5-b66b-59aa594ca016'])

Source code in qadence/parameters.py
def __init__(self, **kwargs: str | TNumber | Tensor | Basic | Parameter):
    self._name_dict: dict[str, tuple[str, Basic]] = {}
    self._uuid_dict: dict[str, str] = {}
    for name, v in kwargs.items():
        param = v if isinstance(v, sympy.Basic) else Parameter(v)
        uuid = str(uuid4())
        self._name_dict[name] = (uuid, param)
        self._uuid_dict[uuid] = param

Parameter

Bases: Symbol

A wrapper on top of sympy.Symbol.

Includes two additional keywords: trainable and value. This class is to define both feature parameter and variational parameters.

trainable: bool instance-attribute

Trainable parameters are variational parameters.

Non-trainable parameters are feature parameters.

value: TNumber instance-attribute

(Initial) value of the parameter.

__new__(name, **assumptions)

Arguments:

name: When given a string only, the class
    constructs a trainable Parameter with a a randomly initialized value.
**assumptions: are passed on to the parent class `sympy.Symbol`. Two new assumption
    kwargs are supported by this constructor: `trainable: bool`, and `value: TNumber`.

Example:

from qadence.parameters import Parameter, VariationalParameter

theta = Parameter("theta")
print(f"{theta}: trainable={theta.trainable} value={theta.value}")
assert not theta.is_number

# you can specify both trainable/value in the constructor
theta = Parameter("theta", trainable=True, value=2.0)
print(f"{theta}: trainable={theta.trainable} value={theta.value}")

# VariationalParameter/FeatureParameter are constructing
# trainable/untrainable Parameters
theta = VariationalParameter("theta", value=2.0)
assert theta == Parameter("theta", trainable=True, value=2.0)

# When provided with a numeric type, Parameter constructs a sympy numeric type":
constant_zero = Parameter(0)
assert constant_zero.is_number

# When passed a Parameter or a sympy expression, it just returns it.
expr = Parameter("x") * Parameter("y")
print(f"{expr=} : {expr.free_symbols}")
theta: trainable=True value=0.146690661198833
theta: trainable=True value=2.0
expr=x*y : {y, x}

Source code in qadence/parameters.py
def __new__(
    cls, name: str | TNumber | Tensor | Basic | Parameter, **assumptions: Any
) -> Parameter | Basic | Expr | Array:
    """
    Arguments:

        name: When given a string only, the class
            constructs a trainable Parameter with a a randomly initialized value.
        **assumptions: are passed on to the parent class `sympy.Symbol`. Two new assumption
            kwargs are supported by this constructor: `trainable: bool`, and `value: TNumber`.

    Example:
    ```python exec="on" source="material-block" result="json"
    from qadence.parameters import Parameter, VariationalParameter

    theta = Parameter("theta")
    print(f"{theta}: trainable={theta.trainable} value={theta.value}")
    assert not theta.is_number

    # you can specify both trainable/value in the constructor
    theta = Parameter("theta", trainable=True, value=2.0)
    print(f"{theta}: trainable={theta.trainable} value={theta.value}")

    # VariationalParameter/FeatureParameter are constructing
    # trainable/untrainable Parameters
    theta = VariationalParameter("theta", value=2.0)
    assert theta == Parameter("theta", trainable=True, value=2.0)

    # When provided with a numeric type, Parameter constructs a sympy numeric type":
    constant_zero = Parameter(0)
    assert constant_zero.is_number

    # When passed a Parameter or a sympy expression, it just returns it.
    expr = Parameter("x") * Parameter("y")
    print(f"{expr=} : {expr.free_symbols}")
    ```
    """
    p: Parameter
    if isinstance(name, get_args(TNumber)):
        return sympify(name)
    elif isinstance(name, Tensor):
        if name.numel() == 1:
            return sympify(name)
        else:
            return Array(name.detach().numpy())
    elif isinstance(name, Parameter):
        p = super().__new__(cls, name.name, **assumptions)
        p.name = name.name
        p.trainable = name.trainable
        p.value = name.value
        return p
    elif isinstance(name, (Basic, Expr)):
        if name.is_number:
            return sympify(evaluate(name))
        return name
    elif isinstance(name, str):
        p = super().__new__(cls, name, **assumptions)
        p.trainable = assumptions.get("trainable", True)
        p.value = assumptions.get("value", None)
        if p.value is None:
            p.value = rand(1).item()
        return p
    else:
        raise TypeError(f"Parameter does not support type {type(name)}")

FeatureParameter(name, **kwargs)

Shorthand for Parameter(..., trainable=False).

Source code in qadence/parameters.py
def FeatureParameter(name: str, **kwargs: Any) -> Parameter:
    """Shorthand for `Parameter(..., trainable=False)`."""
    return Parameter(name, trainable=False, **kwargs)

VariationalParameter(name, **kwargs)

Shorthand for Parameter(..., trainable=True).

Source code in qadence/parameters.py
def VariationalParameter(name: str, **kwargs: Any) -> Parameter:
    """Shorthand for `Parameter(..., trainable=True)`."""
    return Parameter(name, trainable=True, **kwargs)

evaluate(expr, values={}, as_torch=False)

Arguments:

expr: An expression consisting of Parameters.
values: values dict which contains values for the Parameters,
    if empty, Parameter.value will be used.
as_torch: Whether to retrieve a torch-differentiable expression result.

Example:

from qadence.parameters import Parameter, evaluate

expr = Parameter("x") * Parameter("y")

# Unless specified, Parameter initialized random values
# Lets evaluate this expression and see what the result is
res = evaluate(expr)
print(res)

# We can also evaluate the expr using a custom dict
d = {"x": 1, "y":2}
res = evaluate(expr, d)
print(res)

# Lastly, if we want a differentiable result, lets put the as_torch flag
res = evaluate(expr, d, as_torch=True)
print(res)
0.00985726368325142
2.0
tensor([2])

Source code in qadence/parameters.py
def evaluate(expr: Expr, values: dict = {}, as_torch: bool = False) -> TNumber | Tensor:
    """
    Arguments:

        expr: An expression consisting of Parameters.
        values: values dict which contains values for the Parameters,
            if empty, Parameter.value will be used.
        as_torch: Whether to retrieve a torch-differentiable expression result.

    Example:
    ```python exec="on" source="material-block" result="json"
    from qadence.parameters import Parameter, evaluate

    expr = Parameter("x") * Parameter("y")

    # Unless specified, Parameter initialized random values
    # Lets evaluate this expression and see what the result is
    res = evaluate(expr)
    print(res)

    # We can also evaluate the expr using a custom dict
    d = {"x": 1, "y":2}
    res = evaluate(expr, d)
    print(res)

    # Lastly, if we want a differentiable result, lets put the as_torch flag
    res = evaluate(expr, d, as_torch=True)
    print(res)
    ```
    """
    res: Basic
    res_value: TNumber | Tensor
    query: dict[Parameter, TNumber | Tensor] = {}
    if isinstance(expr, Array):
        return Tensor(expr.tolist())
    else:
        if not expr.is_number:
            for s in expr.free_symbols:
                if s.name in values.keys():
                    query[s] = values[s.name]
                elif hasattr(s, "value"):
                    query[s] = s.value
                else:
                    raise ValueError(f"No value provided for symbol {s.name}")
        if as_torch:
            res_value = make_differentiable(expr)(**{s.name: tensor(v) for s, v in query.items()})
        else:
            res = expr.subs(query)
            res_value = sympy_to_numeric(res)
        return res_value

extract_original_param_entry(param)

Given an Expression, what was the original "param" given by the user? It is either.

going to be a numeric value, or a sympy Expression (in case a string was given, it was converted via Parameter("string").

Source code in qadence/parameters.py
def extract_original_param_entry(
    param: Expr,
) -> TNumber | Tensor | Expr:
    """
    Given an Expression, what was the original "param" given by the user? It is either.

    going to be a numeric value, or a sympy Expression (in case a string was given,
    it was converted via Parameter("string").
    """
    return param if not param.is_number else evaluate(param)

Parameter embedding

embedding(block, to_gate_params=False, engine=Engine.TORCH)

Construct embedding function which maps user-facing parameters to either expression-level.

parameters or gate-level parameters. The constructed embedding function has the signature:

 embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:

which means that it maps the variational parameter dict params and the feature parameter dict inputs to one new parameter dict embedded_dict which holds all parameters that are needed to execute a circuit on a given backend. There are two different modes for this mapping:

  • Expression-level parameters: For AD-based optimization. For every unique expression we end up with one entry in the embedded dict: len(embedded_dict) == len(unique_parameter_expressions).
  • Gate-level parameters: For PSR-based optimization or real devices. One parameter for each gate parameter, regardless if they are based on the same expression. len(embedded_dict) == len(parametric_gates). This is needed because PSR requires to shift the angles of every gate where the same parameter appears.
PARAMETER DESCRIPTION
block

parametrized block into which we want to embed parameters.

TYPE: AbstractBlock

to_gate_params

A boolean flag whether to generate gate-level parameters or expression-level parameters.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
tuple[ParamDictType, Callable[[ParamDictType, ParamDictType], ParamDictType]]

A tuple with variational parameter dict and the embedding function.

Source code in qadence/blocks/embedding.py
def embedding(
    block: AbstractBlock, to_gate_params: bool = False, engine: Engine = Engine.TORCH
) -> tuple[ParamDictType, Callable[[ParamDictType, ParamDictType], ParamDictType],]:
    """Construct embedding function which maps user-facing parameters to either *expression-level*.

    parameters or *gate-level* parameters. The constructed embedding function has the signature:

         embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:

    which means that it maps the *variational* parameter dict `params` and the *feature* parameter
    dict `inputs` to one new parameter dict `embedded_dict` which holds all parameters that are
    needed to execute a circuit on a given backend. There are two different *modes* for this
    mapping:

    - *Expression-level* parameters: For AD-based optimization. For every unique expression we end
      up with one entry in the embedded dict:
      `len(embedded_dict) == len(unique_parameter_expressions)`.
    - *Gate-level* parameters: For PSR-based optimization or real devices. One parameter for each
      gate parameter, regardless if they are based on the same expression. `len(embedded_dict) ==
      len(parametric_gates)`. This is needed because PSR requires to shift the angles of **every**
      gate where the same parameter appears.

    Arguments:
        block: parametrized block into which we want to embed parameters.
        to_gate_params: A boolean flag whether to generate gate-level parameters or
            expression-level parameters.

    Returns:
        A tuple with variational parameter dict and the embedding function.
    """
    concretize_parameter = _concretize_parameter(engine)
    if engine == Engine.TORCH:
        cast_dtype = tensor
    else:
        from jax.numpy import array

        cast_dtype = array

    unique_expressions = unique(expressions(block))
    unique_symbols = [p for p in unique(parameters(block)) if not isinstance(p, sympy.Array)]
    unique_const_matrices = [e for e in unique_expressions if isinstance(e, sympy.Array)]
    unique_expressions = [e for e in unique_expressions if not isinstance(e, sympy.Array)]

    # NOTE
    # there are 3 kinds of parameters in qadence
    # - non-trainable which are considered as inputs for classical data
    # - trainable which are the variational parameters to be optimized
    # - fixed: which are non-trainable parameters with fixed value (e.g. pi/2)
    #
    # both non-trainable and trainable parameters can have the same element applied
    # to different operations in the quantum circuit, e.g. assigning the same parameter
    # to multiple gates.
    non_numeric_symbols = [p for p in unique_symbols if not p.is_number]
    trainable_symbols = [p for p in non_numeric_symbols if p.trainable]
    constant_expressions = [expr for expr in unique_expressions if expr.is_number]
    # we dont need to care about constant symbols if they are contained in an symbolic expression
    # we only care about gate params which are ONLY a constant

    embeddings: dict[sympy.Expr, DifferentiableExpression] = {
        expr: make_differentiable(expr=expr, engine=engine)
        for expr in unique_expressions
        if not expr.is_number
    }

    uuid_to_expr = uuid_to_expression(block)

    def embedding_fn(params: ParamDictType, inputs: ParamDictType) -> ParamDictType:
        embedded_params: dict[sympy.Expr, ArrayLike] = {}
        for expr, fn in embeddings.items():
            angle: ArrayLike
            values = {}
            for symbol in expr.free_symbols:
                if symbol.name in inputs:
                    value = inputs[symbol.name]
                elif symbol.name in params:
                    value = params[symbol.name]
                else:
                    msg_trainable = "Trainable" if symbol.trainable else "Non-trainable"
                    raise KeyError(
                        f"{msg_trainable} parameter '{symbol.name}' not found in the "
                        f"inputs list: {list(inputs.keys())} nor the "
                        f"params list: {list(params.keys())}."
                    )
                values[symbol.name] = value
            angle = fn(**values)
            # do not reshape parameters which are multi-dimensional
            # tensors, such as for example generator matrices
            if not len(angle.squeeze().shape) > 1:
                angle = angle.reshape(-1)
            embedded_params[expr] = angle

        for e in constant_expressions + unique_const_matrices:
            embedded_params[e] = params[stringify(e)]

        if to_gate_params:
            gate_lvl_params: ParamDictType = {}
            for uuid, e in uuid_to_expr.items():
                gate_lvl_params[uuid] = embedded_params[e]
            return gate_lvl_params
        else:
            return {stringify(k): v for k, v in embedded_params.items()}

    params: ParamDictType
    params = {
        p.name: concretize_parameter(value=p.value, trainable=True) for p in trainable_symbols
    }
    params.update(
        {
            stringify(expr): concretize_parameter(value=evaluate(expr), trainable=False)
            for expr in constant_expressions
        }
    )
    params.update(
        {
            stringify(expr): cast_dtype(nparray(expr.tolist(), dtype=npcdouble))
            for expr in unique_const_matrices
        }
    )
    return params, embedding_fn