hqm.layers.quanvolution

  1import torchvision
  2import torch
  3import sys
  4
  5sys.path += ['.', './utils/', '/circuits/']
  6
  7from hqm.circuits.circuit import QuantumCircuit
  8from hqm.utils.aiinterface import AIInterface
  9
 10
 11class Quanvolution2D(torch.nn.Module):
 12    '''
 13        Quanvolution2D layer.
 14
 15        Currently supports only Torch.
 16    '''
 17
 18    def __init__(self, qcircuit : QuantumCircuit, filters : int, kernelsize : int = 3, stride : int = 1, padding : str = 'same', aiframework : str = 'torch') -> None:
 19        '''
 20        Quanvolution2D constructor.  
 21
 22        Parameters:  
 23        -----------  
 24        - qcircuit : hqm.circuits.circuit.QuantumCircuit  
 25            QuantumCircuit object to be embedded into the quantum layer
 26        - filters : int
 27            number of quanvolution filters
 28        - kernelsize : int
 29            size of quanvolution kernel
 30        - stride : int
 31            stride for quanvolution operation
 32        - padding : str
 33            padding mode, same of valid
 34        - aiframework : str    
 35            string representing the AI framework in use, can be 'torch' or 'keras'. This will create  
 36            a compatible trainable layer for the framework.
 37
 38        Returns:    
 39        --------     
 40        Nothing, a Quanvolution2D object will be created.  
 41        '''
 42
 43        super().__init__()
 44
 45        if aiframework not in ['torch', 'keras']: raise Exception(f"Quanvolution2D curently supports only 'torch' as framework, found {aiframework}")
 46        if kernelsize < 1:                        raise Exception(f"kernelsize must be greater than 1, found {kernelsize}")
 47        if stride < 1:                            raise Exception(f"stride must be greater than 1, found {stride}")
 48        
 49        self.aiframework = aiframework
 50        self.n_qubits    = qcircuit.n_qubits
 51        
 52        if kernelsize**2 > self.n_qubits:         raise Exception(f"kernelsize**2 must be lower than n_qubits, found kernelsize**2={kernelsize**2} and {self.n_qubits}")
 53        if filters > self.n_qubits:               raise Exception(f"filters must be lower than n_qubits, found {filters} and {self.n_qubits}")
 54        
 55        self.filters    = filters 
 56        self.kernelsize = kernelsize
 57        self.stride     = stride
 58        self.padding    = padding
 59        self.qlayer     = AIInterface.network_layer(
 60                                circuit      = qcircuit.circuit, 
 61                                weight_shape = qcircuit.weight_shape, 
 62                                n_qubits     = qcircuit.n_qubits, 
 63                                aiframework  = self.aiframework
 64                            )
 65        
 66    def forward(self, x : torch.Tensor) -> torch.Tensor:
 67        '''
 68        Torch forward function for quanvolution layer
 69
 70        Parameters:
 71        -----------
 72        - x : torch.Tensor
 73            input image or tensor
 74        
 75        Returns:
 76        --------
 77        - out : torch.Tensor
 78            quanvoluted input
 79        '''
 80
 81        if len(x.shape) != 4: raise Exception(f"x must be a tensor of 4 elements (batch, channels, width, height), found {len(x.shape)}")
 82
 83        # Calculates the image shape after the convolution
 84        bs, ch, h, w = x.shape
 85        
 86        h_out = int(((h-self.kernelsize) / self.stride) +1)
 87        w_out = int(((w-self.kernelsize) / self.stride) +1)
 88        
 89        if self.padding == 'same':
 90            w_pad = int(round((w - w_out)/2))
 91            h_pad = int(round((h - h_out)/2))
 92        
 93        if self.padding == 'valid':
 94            h_pad = 0
 95            w_pad = 0 
 96
 97        out = torch.zeros((bs, self.filters, h_out, w_out, ch))
 98
 99        # Batch Loop
100        for b in range(bs):
101            # Channel Loop
102            for c in range(ch):
103                # Spatial Loops                                                
104                for j in range(0, h_out, self.stride):
105                    for k in range(0, w_out, self.stride):            
106                        # Process a kernel_size*kernel_size region of the images
107                        # with the quantum circuit stride*stride
108                        p = x[b, c, j:j+self.kernelsize, k:k+self.kernelsize].reshape(-1)
109                        q_results = self.qlayer(p)
110
111                        for f in range(self.filters):
112                            #out[b, f, j // self.kernelsize, k // self.kernelsize, c] = q_results[f]
113                            out[b, f, j:j+self.kernelsize, k:k+self.kernelsize, c] = q_results[f]
114
115        out = torch.mean(out, axis=-1)
116        
117        
118        if self.padding == 'same':
119            out = torch.nn.functional.pad(out, (h_pad, h_pad, w_pad, w_pad), "constant", 0)
120            out = torchvision.transforms.Resize([w,h])(out)
121
122        return out
class Quanvolution2D(torch.nn.modules.module.Module):
 12class Quanvolution2D(torch.nn.Module):
 13    '''
 14        Quanvolution2D layer.
 15
 16        Currently supports only Torch.
 17    '''
 18
 19    def __init__(self, qcircuit : QuantumCircuit, filters : int, kernelsize : int = 3, stride : int = 1, padding : str = 'same', aiframework : str = 'torch') -> None:
 20        '''
 21        Quanvolution2D constructor.  
 22
 23        Parameters:  
 24        -----------  
 25        - qcircuit : hqm.circuits.circuit.QuantumCircuit  
 26            QuantumCircuit object to be embedded into the quantum layer
 27        - filters : int
 28            number of quanvolution filters
 29        - kernelsize : int
 30            size of quanvolution kernel
 31        - stride : int
 32            stride for quanvolution operation
 33        - padding : str
 34            padding mode, same of valid
 35        - aiframework : str    
 36            string representing the AI framework in use, can be 'torch' or 'keras'. This will create  
 37            a compatible trainable layer for the framework.
 38
 39        Returns:    
 40        --------     
 41        Nothing, a Quanvolution2D object will be created.  
 42        '''
 43
 44        super().__init__()
 45
 46        if aiframework not in ['torch', 'keras']: raise Exception(f"Quanvolution2D curently supports only 'torch' as framework, found {aiframework}")
 47        if kernelsize < 1:                        raise Exception(f"kernelsize must be greater than 1, found {kernelsize}")
 48        if stride < 1:                            raise Exception(f"stride must be greater than 1, found {stride}")
 49        
 50        self.aiframework = aiframework
 51        self.n_qubits    = qcircuit.n_qubits
 52        
 53        if kernelsize**2 > self.n_qubits:         raise Exception(f"kernelsize**2 must be lower than n_qubits, found kernelsize**2={kernelsize**2} and {self.n_qubits}")
 54        if filters > self.n_qubits:               raise Exception(f"filters must be lower than n_qubits, found {filters} and {self.n_qubits}")
 55        
 56        self.filters    = filters 
 57        self.kernelsize = kernelsize
 58        self.stride     = stride
 59        self.padding    = padding
 60        self.qlayer     = AIInterface.network_layer(
 61                                circuit      = qcircuit.circuit, 
 62                                weight_shape = qcircuit.weight_shape, 
 63                                n_qubits     = qcircuit.n_qubits, 
 64                                aiframework  = self.aiframework
 65                            )
 66        
 67    def forward(self, x : torch.Tensor) -> torch.Tensor:
 68        '''
 69        Torch forward function for quanvolution layer
 70
 71        Parameters:
 72        -----------
 73        - x : torch.Tensor
 74            input image or tensor
 75        
 76        Returns:
 77        --------
 78        - out : torch.Tensor
 79            quanvoluted input
 80        '''
 81
 82        if len(x.shape) != 4: raise Exception(f"x must be a tensor of 4 elements (batch, channels, width, height), found {len(x.shape)}")
 83
 84        # Calculates the image shape after the convolution
 85        bs, ch, h, w = x.shape
 86        
 87        h_out = int(((h-self.kernelsize) / self.stride) +1)
 88        w_out = int(((w-self.kernelsize) / self.stride) +1)
 89        
 90        if self.padding == 'same':
 91            w_pad = int(round((w - w_out)/2))
 92            h_pad = int(round((h - h_out)/2))
 93        
 94        if self.padding == 'valid':
 95            h_pad = 0
 96            w_pad = 0 
 97
 98        out = torch.zeros((bs, self.filters, h_out, w_out, ch))
 99
100        # Batch Loop
101        for b in range(bs):
102            # Channel Loop
103            for c in range(ch):
104                # Spatial Loops                                                
105                for j in range(0, h_out, self.stride):
106                    for k in range(0, w_out, self.stride):            
107                        # Process a kernel_size*kernel_size region of the images
108                        # with the quantum circuit stride*stride
109                        p = x[b, c, j:j+self.kernelsize, k:k+self.kernelsize].reshape(-1)
110                        q_results = self.qlayer(p)
111
112                        for f in range(self.filters):
113                            #out[b, f, j // self.kernelsize, k // self.kernelsize, c] = q_results[f]
114                            out[b, f, j:j+self.kernelsize, k:k+self.kernelsize, c] = q_results[f]
115
116        out = torch.mean(out, axis=-1)
117        
118        
119        if self.padding == 'same':
120            out = torch.nn.functional.pad(out, (h_pad, h_pad, w_pad, w_pad), "constant", 0)
121            out = torchvision.transforms.Resize([w,h])(out)
122
123        return out

Quanvolution2D layer.

Currently supports only Torch.

Quanvolution2D( qcircuit: hqm.circuits.circuit.QuantumCircuit, filters: int, kernelsize: int = 3, stride: int = 1, padding: str = 'same', aiframework: str = 'torch')
19    def __init__(self, qcircuit : QuantumCircuit, filters : int, kernelsize : int = 3, stride : int = 1, padding : str = 'same', aiframework : str = 'torch') -> None:
20        '''
21        Quanvolution2D constructor.  
22
23        Parameters:  
24        -----------  
25        - qcircuit : hqm.circuits.circuit.QuantumCircuit  
26            QuantumCircuit object to be embedded into the quantum layer
27        - filters : int
28            number of quanvolution filters
29        - kernelsize : int
30            size of quanvolution kernel
31        - stride : int
32            stride for quanvolution operation
33        - padding : str
34            padding mode, same of valid
35        - aiframework : str    
36            string representing the AI framework in use, can be 'torch' or 'keras'. This will create  
37            a compatible trainable layer for the framework.
38
39        Returns:    
40        --------     
41        Nothing, a Quanvolution2D object will be created.  
42        '''
43
44        super().__init__()
45
46        if aiframework not in ['torch', 'keras']: raise Exception(f"Quanvolution2D curently supports only 'torch' as framework, found {aiframework}")
47        if kernelsize < 1:                        raise Exception(f"kernelsize must be greater than 1, found {kernelsize}")
48        if stride < 1:                            raise Exception(f"stride must be greater than 1, found {stride}")
49        
50        self.aiframework = aiframework
51        self.n_qubits    = qcircuit.n_qubits
52        
53        if kernelsize**2 > self.n_qubits:         raise Exception(f"kernelsize**2 must be lower than n_qubits, found kernelsize**2={kernelsize**2} and {self.n_qubits}")
54        if filters > self.n_qubits:               raise Exception(f"filters must be lower than n_qubits, found {filters} and {self.n_qubits}")
55        
56        self.filters    = filters 
57        self.kernelsize = kernelsize
58        self.stride     = stride
59        self.padding    = padding
60        self.qlayer     = AIInterface.network_layer(
61                                circuit      = qcircuit.circuit, 
62                                weight_shape = qcircuit.weight_shape, 
63                                n_qubits     = qcircuit.n_qubits, 
64                                aiframework  = self.aiframework
65                            )

Quanvolution2D constructor.

Parameters:

  • qcircuit : hqm.circuits.circuit.QuantumCircuit
    QuantumCircuit object to be embedded into the quantum layer
  • filters : int number of quanvolution filters
  • kernelsize : int size of quanvolution kernel
  • stride : int stride for quanvolution operation
  • padding : str padding mode, same of valid
  • aiframework : str
    string representing the AI framework in use, can be 'torch' or 'keras'. This will create
    a compatible trainable layer for the framework.

Returns:

Nothing, a Quanvolution2D object will be created.

def forward(self, x: torch.Tensor) -> torch.Tensor:
 67    def forward(self, x : torch.Tensor) -> torch.Tensor:
 68        '''
 69        Torch forward function for quanvolution layer
 70
 71        Parameters:
 72        -----------
 73        - x : torch.Tensor
 74            input image or tensor
 75        
 76        Returns:
 77        --------
 78        - out : torch.Tensor
 79            quanvoluted input
 80        '''
 81
 82        if len(x.shape) != 4: raise Exception(f"x must be a tensor of 4 elements (batch, channels, width, height), found {len(x.shape)}")
 83
 84        # Calculates the image shape after the convolution
 85        bs, ch, h, w = x.shape
 86        
 87        h_out = int(((h-self.kernelsize) / self.stride) +1)
 88        w_out = int(((w-self.kernelsize) / self.stride) +1)
 89        
 90        if self.padding == 'same':
 91            w_pad = int(round((w - w_out)/2))
 92            h_pad = int(round((h - h_out)/2))
 93        
 94        if self.padding == 'valid':
 95            h_pad = 0
 96            w_pad = 0 
 97
 98        out = torch.zeros((bs, self.filters, h_out, w_out, ch))
 99
100        # Batch Loop
101        for b in range(bs):
102            # Channel Loop
103            for c in range(ch):
104                # Spatial Loops                                                
105                for j in range(0, h_out, self.stride):
106                    for k in range(0, w_out, self.stride):            
107                        # Process a kernel_size*kernel_size region of the images
108                        # with the quantum circuit stride*stride
109                        p = x[b, c, j:j+self.kernelsize, k:k+self.kernelsize].reshape(-1)
110                        q_results = self.qlayer(p)
111
112                        for f in range(self.filters):
113                            #out[b, f, j // self.kernelsize, k // self.kernelsize, c] = q_results[f]
114                            out[b, f, j:j+self.kernelsize, k:k+self.kernelsize, c] = q_results[f]
115
116        out = torch.mean(out, axis=-1)
117        
118        
119        if self.padding == 'same':
120            out = torch.nn.functional.pad(out, (h_pad, h_pad, w_pad, w_pad), "constant", 0)
121            out = torchvision.transforms.Resize([w,h])(out)
122
123        return out

Torch forward function for quanvolution layer

Parameters:

  • x : torch.Tensor input image or tensor

Returns:

  • out : torch.Tensor quanvoluted input
Inherited Members
torch.nn.modules.module.Module
register_buffer
register_parameter
add_module
register_module
get_submodule
get_parameter
get_buffer
get_extra_state
set_extra_state
apply
cuda
ipu
xpu
cpu
type
float
double
half
bfloat16
to_empty
to
register_full_backward_pre_hook
register_backward_hook
register_full_backward_hook
register_forward_pre_hook
register_forward_hook
register_state_dict_pre_hook
state_dict
register_load_state_dict_post_hook
load_state_dict
parameters
named_parameters
buffers
named_buffers
children
named_children
modules
named_modules
train
eval
requires_grad_
zero_grad
share_memory
extra_repr
compile