hqm.layers.recurrent

  1import torch
  2import sys
  3
  4sys.path += ['.', './utils/', '/circuits/']
  5
  6from hqm.circuits.circuit import QuantumCircuit
  7from hqm.utils.aiinterface import AIInterface
  8
  9
 10class QGRU(torch.nn.Module):
 11    '''
 12        Quantum Gradient Recurrent Unit layer.
 13
 14        Currently supports only Torch.
 15
 16        Reference
 17        ---------
 18        A. Ceschini, A. Rosato and M. Panella, "Hybrid Quantum-Classical Recurrent  
 19        Neural Networks for Time Series Prediction," 2022 International Joint Conference  
 20        on Neural Networks (IJCNN), Padua, Italy, 2022, pp. 1-8,   
 21        doi: 10.1109/IJCNN55064.2022.9892441.
 22    '''
 23
 24    def __init__(self, qcircuits : list[QuantumCircuit], inputsize : int, hiddensize : int, aiframework : str = 'torch') -> None:
 25        '''
 26        QGRU constructor.  
 27
 28        Parameters:  
 29        -----------   
 30        - qcircuits : list of QuantumCircuit  
 31            list containing three quantum circuits in this exact order: 1) Quantum Layer Reset, 2) Quantum Layer Update, 3) Quantum Layer Output
 32        - inputsize : int  
 33            integer representing the number of variable (channels) in the input date
 34        - hiddensize : int  
 35            integer size representing the recurrent filters
 36        - aiframework : str  
 37            string representing the AI framework in use, can be 'torch' or 'keras'. This will create  
 38            a compatible trainable layer for the framework.
 39
 40        Returns:    
 41        --------     
 42        Nothing, a QGRU object will be created.  
 43        '''
 44        
 45        super().__init__()
 46
 47        if aiframework not in ['torch', 'keras']: raise Exception(f"Quanvolution2D curently supports only 'torch' as framework, found {aiframework}")
 48        if inputsize       < 1:                   raise Exception(f"inputsize must be greater than 1, found {inputsize}")
 49        if hiddensize      < 1:                   raise Exception(f"hiddensize must be greater than 1, found {hiddensize}")
 50        if len(qcircuits) != 3:                   raise Exception(f"qcircuits must contain 3 elements, one for reset gate, one for update gate and one for output gate, found {len(qcircuits)}")
 51
 52        if (qcircuits[0].n_qubits != qcircuits[1].n_qubits) or (qcircuits[0].n_qubits != qcircuits[2].n_qubits) or (qcircuits[1].n_qubits != qcircuits[2].n_qubits) or (qcircuits[1].n_qubits != qcircuits[2].n_qubits):
 53            raise Exception(f"n_qubits must be the same for each circuit in qcircuits, found {qcircuits[0].n_qubits}, {qcircuits[1].n_qubits} and {qcircuits[2].n_qubits}")
 54
 55        self.aiframework    = aiframework
 56        self.n_qubits       = qcircuits[0].n_qubits
 57        self.hiddensize     = hiddensize
 58
 59        
 60        self.clayer_in      = torch.nn.Linear(inputsize+hiddensize, self.n_qubits)
 61        self.clayer_out     = torch.nn.Linear(self.n_qubits, hiddensize)
 62
 63        self.qlayer_reset   = AIInterface.network_layer(
 64                                circuit      = qcircuits[0].circuit, 
 65                                weight_shape = qcircuits[0].weight_shape, 
 66                                n_qubits     = self.n_qubits, 
 67                                aiframework  = self.aiframework
 68                            )
 69
 70        self.qlayer_update  = AIInterface.network_layer(
 71                                circuit      = qcircuits[1].circuit, 
 72                                weight_shape = qcircuits[1].weight_shape, 
 73                                n_qubits     = self.n_qubits, 
 74                                aiframework  = self.aiframework
 75                            )
 76        
 77        self.qlayer_output  = AIInterface.network_layer(
 78                                circuit      = qcircuits[2].circuit, 
 79                                weight_shape = qcircuits[2].weight_shape, 
 80                                n_qubits     = self.n_qubits, 
 81                                aiframework  = self.aiframework
 82                            )
 83        
 84        
 85    def forward(self, x : torch.Tensor) -> torch.Tensor:
 86        '''
 87        Torch forward function for QGRU layer
 88
 89        Parameters:
 90        -----------
 91        - x : torch.Tensor
 92            input image or tensor
 93        
 94        Returns:
 95        --------
 96        - out : torch.Tensor
 97            qgru input
 98        '''
 99
100        if len(x.shape) != 3: raise Exception(f"x must be a tensor of 3 elements (batch, sequencelenght, featuressize), found {len(x.shape)}")
101
102        batch_size, seq_length, featuressize = x.size()            
103        hidden_seq = []
104        h_t = torch.zeros(batch_size, self.hiddensize)
105        
106        for t in range(seq_length):
107             # get features from the t-th element in seq, for all entries in the batch
108            x_t = x[:, t, :]
109
110            # Concatenate input and hidden state
111            v_t = torch.cat((h_t, x_t), dim=1)
112
113            # match qubit dimension
114            y_t = self.clayer_in(v_t)
115
116            r_t = torch.sigmoid(self.clayer_out(self.qlayer_reset(y_t)))  # forget block
117            z_t = torch.sigmoid(self.clayer_out(self.qlayer_update(y_t)))  # update block
118
119            # Concatenate input and hidden state
120            v2_t = torch.cat(((r_t * h_t), x_t), dim=1)
121
122            # match qubit dimension
123            y2_t = self.clayer_in(v2_t)
124            
125            h_tilde_t = torch.tanh(self.clayer_out(self.qlayer_output(y2_t)))
126
127            h_t = ((1-z_t) * h_tilde_t) + (z_t * h_t)
128
129            hidden_seq.append(h_t.unsqueeze(0))
130
131        hidden_seq = torch.cat(hidden_seq, dim=0)
132        hidden_seq = hidden_seq.transpose(0, 1).contiguous()
133
134        out = hidden_seq
135        return out
class QGRU(torch.nn.modules.module.Module):
 11class QGRU(torch.nn.Module):
 12    '''
 13        Quantum Gradient Recurrent Unit layer.
 14
 15        Currently supports only Torch.
 16
 17        Reference
 18        ---------
 19        A. Ceschini, A. Rosato and M. Panella, "Hybrid Quantum-Classical Recurrent  
 20        Neural Networks for Time Series Prediction," 2022 International Joint Conference  
 21        on Neural Networks (IJCNN), Padua, Italy, 2022, pp. 1-8,   
 22        doi: 10.1109/IJCNN55064.2022.9892441.
 23    '''
 24
 25    def __init__(self, qcircuits : list[QuantumCircuit], inputsize : int, hiddensize : int, aiframework : str = 'torch') -> None:
 26        '''
 27        QGRU constructor.  
 28
 29        Parameters:  
 30        -----------   
 31        - qcircuits : list of QuantumCircuit  
 32            list containing three quantum circuits in this exact order: 1) Quantum Layer Reset, 2) Quantum Layer Update, 3) Quantum Layer Output
 33        - inputsize : int  
 34            integer representing the number of variable (channels) in the input date
 35        - hiddensize : int  
 36            integer size representing the recurrent filters
 37        - aiframework : str  
 38            string representing the AI framework in use, can be 'torch' or 'keras'. This will create  
 39            a compatible trainable layer for the framework.
 40
 41        Returns:    
 42        --------     
 43        Nothing, a QGRU object will be created.  
 44        '''
 45        
 46        super().__init__()
 47
 48        if aiframework not in ['torch', 'keras']: raise Exception(f"Quanvolution2D curently supports only 'torch' as framework, found {aiframework}")
 49        if inputsize       < 1:                   raise Exception(f"inputsize must be greater than 1, found {inputsize}")
 50        if hiddensize      < 1:                   raise Exception(f"hiddensize must be greater than 1, found {hiddensize}")
 51        if len(qcircuits) != 3:                   raise Exception(f"qcircuits must contain 3 elements, one for reset gate, one for update gate and one for output gate, found {len(qcircuits)}")
 52
 53        if (qcircuits[0].n_qubits != qcircuits[1].n_qubits) or (qcircuits[0].n_qubits != qcircuits[2].n_qubits) or (qcircuits[1].n_qubits != qcircuits[2].n_qubits) or (qcircuits[1].n_qubits != qcircuits[2].n_qubits):
 54            raise Exception(f"n_qubits must be the same for each circuit in qcircuits, found {qcircuits[0].n_qubits}, {qcircuits[1].n_qubits} and {qcircuits[2].n_qubits}")
 55
 56        self.aiframework    = aiframework
 57        self.n_qubits       = qcircuits[0].n_qubits
 58        self.hiddensize     = hiddensize
 59
 60        
 61        self.clayer_in      = torch.nn.Linear(inputsize+hiddensize, self.n_qubits)
 62        self.clayer_out     = torch.nn.Linear(self.n_qubits, hiddensize)
 63
 64        self.qlayer_reset   = AIInterface.network_layer(
 65                                circuit      = qcircuits[0].circuit, 
 66                                weight_shape = qcircuits[0].weight_shape, 
 67                                n_qubits     = self.n_qubits, 
 68                                aiframework  = self.aiframework
 69                            )
 70
 71        self.qlayer_update  = AIInterface.network_layer(
 72                                circuit      = qcircuits[1].circuit, 
 73                                weight_shape = qcircuits[1].weight_shape, 
 74                                n_qubits     = self.n_qubits, 
 75                                aiframework  = self.aiframework
 76                            )
 77        
 78        self.qlayer_output  = AIInterface.network_layer(
 79                                circuit      = qcircuits[2].circuit, 
 80                                weight_shape = qcircuits[2].weight_shape, 
 81                                n_qubits     = self.n_qubits, 
 82                                aiframework  = self.aiframework
 83                            )
 84        
 85        
 86    def forward(self, x : torch.Tensor) -> torch.Tensor:
 87        '''
 88        Torch forward function for QGRU layer
 89
 90        Parameters:
 91        -----------
 92        - x : torch.Tensor
 93            input image or tensor
 94        
 95        Returns:
 96        --------
 97        - out : torch.Tensor
 98            qgru input
 99        '''
100
101        if len(x.shape) != 3: raise Exception(f"x must be a tensor of 3 elements (batch, sequencelenght, featuressize), found {len(x.shape)}")
102
103        batch_size, seq_length, featuressize = x.size()            
104        hidden_seq = []
105        h_t = torch.zeros(batch_size, self.hiddensize)
106        
107        for t in range(seq_length):
108             # get features from the t-th element in seq, for all entries in the batch
109            x_t = x[:, t, :]
110
111            # Concatenate input and hidden state
112            v_t = torch.cat((h_t, x_t), dim=1)
113
114            # match qubit dimension
115            y_t = self.clayer_in(v_t)
116
117            r_t = torch.sigmoid(self.clayer_out(self.qlayer_reset(y_t)))  # forget block
118            z_t = torch.sigmoid(self.clayer_out(self.qlayer_update(y_t)))  # update block
119
120            # Concatenate input and hidden state
121            v2_t = torch.cat(((r_t * h_t), x_t), dim=1)
122
123            # match qubit dimension
124            y2_t = self.clayer_in(v2_t)
125            
126            h_tilde_t = torch.tanh(self.clayer_out(self.qlayer_output(y2_t)))
127
128            h_t = ((1-z_t) * h_tilde_t) + (z_t * h_t)
129
130            hidden_seq.append(h_t.unsqueeze(0))
131
132        hidden_seq = torch.cat(hidden_seq, dim=0)
133        hidden_seq = hidden_seq.transpose(0, 1).contiguous()
134
135        out = hidden_seq
136        return out

Quantum Gradient Recurrent Unit layer.

Currently supports only Torch.

Reference

A. Ceschini, A. Rosato and M. Panella, "Hybrid Quantum-Classical Recurrent
Neural Networks for Time Series Prediction," 2022 International Joint Conference
on Neural Networks (IJCNN), Padua, Italy, 2022, pp. 1-8,
doi: 10.1109/IJCNN55064.2022.9892441.

QGRU( qcircuits: list[hqm.circuits.circuit.QuantumCircuit], inputsize: int, hiddensize: int, aiframework: str = 'torch')
25    def __init__(self, qcircuits : list[QuantumCircuit], inputsize : int, hiddensize : int, aiframework : str = 'torch') -> None:
26        '''
27        QGRU constructor.  
28
29        Parameters:  
30        -----------   
31        - qcircuits : list of QuantumCircuit  
32            list containing three quantum circuits in this exact order: 1) Quantum Layer Reset, 2) Quantum Layer Update, 3) Quantum Layer Output
33        - inputsize : int  
34            integer representing the number of variable (channels) in the input date
35        - hiddensize : int  
36            integer size representing the recurrent filters
37        - aiframework : str  
38            string representing the AI framework in use, can be 'torch' or 'keras'. This will create  
39            a compatible trainable layer for the framework.
40
41        Returns:    
42        --------     
43        Nothing, a QGRU object will be created.  
44        '''
45        
46        super().__init__()
47
48        if aiframework not in ['torch', 'keras']: raise Exception(f"Quanvolution2D curently supports only 'torch' as framework, found {aiframework}")
49        if inputsize       < 1:                   raise Exception(f"inputsize must be greater than 1, found {inputsize}")
50        if hiddensize      < 1:                   raise Exception(f"hiddensize must be greater than 1, found {hiddensize}")
51        if len(qcircuits) != 3:                   raise Exception(f"qcircuits must contain 3 elements, one for reset gate, one for update gate and one for output gate, found {len(qcircuits)}")
52
53        if (qcircuits[0].n_qubits != qcircuits[1].n_qubits) or (qcircuits[0].n_qubits != qcircuits[2].n_qubits) or (qcircuits[1].n_qubits != qcircuits[2].n_qubits) or (qcircuits[1].n_qubits != qcircuits[2].n_qubits):
54            raise Exception(f"n_qubits must be the same for each circuit in qcircuits, found {qcircuits[0].n_qubits}, {qcircuits[1].n_qubits} and {qcircuits[2].n_qubits}")
55
56        self.aiframework    = aiframework
57        self.n_qubits       = qcircuits[0].n_qubits
58        self.hiddensize     = hiddensize
59
60        
61        self.clayer_in      = torch.nn.Linear(inputsize+hiddensize, self.n_qubits)
62        self.clayer_out     = torch.nn.Linear(self.n_qubits, hiddensize)
63
64        self.qlayer_reset   = AIInterface.network_layer(
65                                circuit      = qcircuits[0].circuit, 
66                                weight_shape = qcircuits[0].weight_shape, 
67                                n_qubits     = self.n_qubits, 
68                                aiframework  = self.aiframework
69                            )
70
71        self.qlayer_update  = AIInterface.network_layer(
72                                circuit      = qcircuits[1].circuit, 
73                                weight_shape = qcircuits[1].weight_shape, 
74                                n_qubits     = self.n_qubits, 
75                                aiframework  = self.aiframework
76                            )
77        
78        self.qlayer_output  = AIInterface.network_layer(
79                                circuit      = qcircuits[2].circuit, 
80                                weight_shape = qcircuits[2].weight_shape, 
81                                n_qubits     = self.n_qubits, 
82                                aiframework  = self.aiframework
83                            )

QGRU constructor.

Parameters:

  • qcircuits : list of QuantumCircuit
    list containing three quantum circuits in this exact order: 1) Quantum Layer Reset, 2) Quantum Layer Update, 3) Quantum Layer Output
  • inputsize : int
    integer representing the number of variable (channels) in the input date
  • hiddensize : int
    integer size representing the recurrent filters
  • aiframework : str
    string representing the AI framework in use, can be 'torch' or 'keras'. This will create
    a compatible trainable layer for the framework.

Returns:

Nothing, a QGRU object will be created.

def forward(self, x: torch.Tensor) -> torch.Tensor:
 86    def forward(self, x : torch.Tensor) -> torch.Tensor:
 87        '''
 88        Torch forward function for QGRU layer
 89
 90        Parameters:
 91        -----------
 92        - x : torch.Tensor
 93            input image or tensor
 94        
 95        Returns:
 96        --------
 97        - out : torch.Tensor
 98            qgru input
 99        '''
100
101        if len(x.shape) != 3: raise Exception(f"x must be a tensor of 3 elements (batch, sequencelenght, featuressize), found {len(x.shape)}")
102
103        batch_size, seq_length, featuressize = x.size()            
104        hidden_seq = []
105        h_t = torch.zeros(batch_size, self.hiddensize)
106        
107        for t in range(seq_length):
108             # get features from the t-th element in seq, for all entries in the batch
109            x_t = x[:, t, :]
110
111            # Concatenate input and hidden state
112            v_t = torch.cat((h_t, x_t), dim=1)
113
114            # match qubit dimension
115            y_t = self.clayer_in(v_t)
116
117            r_t = torch.sigmoid(self.clayer_out(self.qlayer_reset(y_t)))  # forget block
118            z_t = torch.sigmoid(self.clayer_out(self.qlayer_update(y_t)))  # update block
119
120            # Concatenate input and hidden state
121            v2_t = torch.cat(((r_t * h_t), x_t), dim=1)
122
123            # match qubit dimension
124            y2_t = self.clayer_in(v2_t)
125            
126            h_tilde_t = torch.tanh(self.clayer_out(self.qlayer_output(y2_t)))
127
128            h_t = ((1-z_t) * h_tilde_t) + (z_t * h_t)
129
130            hidden_seq.append(h_t.unsqueeze(0))
131
132        hidden_seq = torch.cat(hidden_seq, dim=0)
133        hidden_seq = hidden_seq.transpose(0, 1).contiguous()
134
135        out = hidden_seq
136        return out

Torch forward function for QGRU layer

Parameters:

  • x : torch.Tensor input image or tensor

Returns:

  • out : torch.Tensor qgru input
Inherited Members
torch.nn.modules.module.Module
register_buffer
register_parameter
add_module
register_module
get_submodule
get_parameter
get_buffer
get_extra_state
set_extra_state
apply
cuda
ipu
xpu
cpu
type
float
double
half
bfloat16
to_empty
to
register_full_backward_pre_hook
register_backward_hook
register_full_backward_hook
register_forward_pre_hook
register_forward_hook
register_state_dict_pre_hook
state_dict
register_load_state_dict_post_hook
load_state_dict
parameters
named_parameters
buffers
named_buffers
children
named_children
modules
named_modules
train
eval
requires_grad_
zero_grad
share_memory
extra_repr
compile