hqm.encoding.autoencoders
1import sys 2sys.path += ['.', './layers/', '/circuits/', './utils/'] 3 4from hqm.layers.quanvolution import Quanvolution2D 5from hqm.circuits.circuit import QuantumCircuit 6from hqm.layers.basiclayer import BasicLayer 7from hqm.utils.sizes import size_conv_layer 8 9import torch 10 11 12class QuanvolutionAutoencoder(torch.nn.Module): 13 ''' 14 Hybrdid convolutional autoencoder, the encoder is composed of quanvolution layers, the decoder is composed of classical layers. 15 ''' 16 17 def __init__(self, qcircuits : list[QuantumCircuit], in_shape : tuple[int], filters : list[int], kernelsizes : list[int], strides : list[int]) -> None: 18 ''' 19 QuanvolutionAutoencoder constructor. 20 21 Parameters: 22 ----------- 23 - qcircuits : list 24 list of QuantumCircuit objects 25 - in_shape : tuple 26 tuple represeting the image shape, (width, height, channels) 27 - filters : list 28 list containing the number of filters for each quanvolution layer 29 - kernelsizes: list 30 list containing the kernelsize for each quanvolution layer 31 - strides: list 32 list containin the stride for each quanvolution layer 33 34 Returns: 35 -------- 36 Nothing, a QuanvolutionAutoencoder object will be created. 37 ''' 38 39 super().__init__() 40 41 if len(filters) != len(qcircuits): raise Exception(f"lenght of filters must be the same of lenght of qcircuits, found {len(filters)} and {len(qcircuits)}") 42 if len(kernelsizes) != len(qcircuits): raise Exception(f"lenght of kernelsizes must be the same of lenght of qcircuits, found {len(kernelsizes)} and {len(qcircuits)}") 43 if len(strides) != len(qcircuits): raise Exception(f"lenght of strides must be the same of lenght of qcircuits, found {len(strides)} and {len(qcircuits)}") 44 if len(filters) != len(kernelsizes): raise Exception(f"lenght of filters must be the same of lenght of kernelsizes, found {len(filters)} and {len(kernelsizes)}") 45 if len(filters) != len(strides): raise Exception(f"lenght of filters must be the same of lenght of strides, found {len(filters)} and {len(strides)}") 46 if len(strides) != len(kernelsizes): raise Exception(f"lenght of strides must be the same of lenght of kernelsizes, found {len(strides)} and {len(kernelsizes)}") 47 if len(in_shape) != 3: raise Exception(f"length of in_shape must be equals to 3 (widht, height, channels, found {len(in_shape)}") 48 49 self.encoder = [] 50 self.decoder = [] 51 self.sizes = [] 52 53 w, h, c = in_shape 54 self.depth = len(qcircuits) 55 56 # Building the quanvolution encoder 57 for i in range(self.depth): 58 self.encoder.append(Quanvolution2D(qcircuit=qcircuits[i], filters=filters[i], kernelsize=kernelsizes[i], stride=strides[i])) 59 60 filters.reverse() 61 filters.append(filters[-1]) 62 kernelsizes.reverse() 63 strides.reverse() 64 65 # Building the classical decoder 66 for i in range(self.depth): 67 self.decoder.append(torch.nn.ConvTranspose2d(filters[i], filters[i+1], kernel_size=kernelsizes[i], stride=strides[i])) 68 69 70 def encoder_f(self, x : torch.Tensor) -> torch.Tensor: 71 ''' 72 Torch forward method for the encoder 73 74 Parameters: 75 ----------- 76 - x : torch.Tensor 77 input for the torch model 78 79 Returns: 80 -------- 81 - out : torch.Tensor 82 output from the torch model 83 ''' 84 85 for i in range(self.depth): 86 x = torch.nn.functional.relu(self.encoder[i](x)) 87 88 out = x 89 return out 90 91 def decoder_f(self, x : torch.Tensor) -> torch.Tensor: 92 ''' 93 Torch forward method for the decoder 94 95 Parameters: 96 ----------- 97 - x : torch.Tensor 98 input for the torch model 99 100 Returns: 101 -------- 102 - out : torch.Tensor 103 output from the torch model 104 ''' 105 106 for i in range(self.depth - 1): 107 x = torch.nn.functional.relu(self.decoder[i](x)) 108 out = torch.nn.functional.sigmoid(self.decoder[-1](x)) 109 110 return out 111 112 def forward(self, x : torch.Tensor) -> torch.Tensor: 113 ''' 114 Torch forward method 115 116 Parameters: 117 ----------- 118 - x : torch.Tensor 119 input for the torch model 120 121 Returns: 122 -------- 123 - out : torch.Tensor 124 output from the torch model 125 ''' 126 127 dec = self.encoder_f(x) 128 out = self.decoder_f(dec) 129 130 return out 131 132class HybridAutoencoder(torch.nn.Module): 133 ''' 134 Hybrdid convolutional autoencoder, the encoder and the decodere are composed of clasical layer, the hidenn space 135 is processed by a quantum circuit. 136 ''' 137 138 def __init__(self, qlayer : BasicLayer, in_shape : tuple[int], filters : list[int], kernelsizes : list[int], strides : list[int]) -> None: 139 ''' 140 QuanvolutionAutoencoder constructor. 141 142 Parameters: 143 ----------- 144 - qlayer : hqm.layers.basilayer.BasicLayer 145 hqm quantum layer to be stacked between two fully connected layers 146 - in_shape : tuple 147 tuple represeting the image shape, (width, height, channels) 148 - filters : list 149 list containing the number of filters for each quanvolution layer 150 - kernelsizes: list 151 list containing the kernelsize for each quanvolution layer 152 - strides: list 153 list containin the stride for each quanvolution layer 154 155 Returns: 156 -------- 157 Nothing, a QuanvolutionAutoencoder object will be created. 158 ''' 159 160 super().__init__() 161 162 if len(filters) != len(kernelsizes): raise Exception(f"lenght of filters must be the same of lenght of kernelsizes, found {len(filters)} and {len(kernelsizes)}") 163 if len(filters) != len(strides): raise Exception(f"lenght of filters must be the same of lenght of strides, found {len(filters)} and {len(strides)}") 164 if len(strides) != len(kernelsizes): raise Exception(f"lenght of strides must be the same of lenght of kernelsizes, found {len(strides)} and {len(kernelsizes)}") 165 if len(in_shape) != 3: raise Exception(f"length of in_shape must be equals to 3 (widht, height, channels, found {len(in_shape)}") 166 167 self.encoder = [] 168 self.decoder = [] 169 self.sizes = [] 170 171 w, h, c = in_shape 172 self.depth = len(filters) 173 174 self.encoder.append(torch.nn.Conv2d(c, filters[0], kernel_size=kernelsizes[0], stride=strides[0])) 175 w = size_conv_layer(s=w, kernel_size=kernelsizes[0], padding=0, stride=strides[0]) 176 h = size_conv_layer(s=h, kernel_size=kernelsizes[0], padding=0, stride=strides[0]) 177 c = filters[0] 178 # Building the quanvolution encoder 179 for i in range(1, self.depth): 180 self.encoder.append(torch.nn.Conv2d(filters[i-1], filters[i], kernel_size=kernelsizes[i], stride=strides[i])) 181 w = size_conv_layer(s=w, kernel_size=kernelsizes[i], padding=0, stride=strides[i]) 182 h = size_conv_layer(s=h, kernel_size=kernelsizes[i], padding=0, stride=strides[i]) 183 c = filters[i] 184 185 self.w = w 186 self.h = h 187 self.c = c 188 self.flatten_size = w*h*c 189 self.fc1 = torch.nn.Linear(self.flatten_size, qlayer.n_qubits) 190 self.qc_1 = qlayer.qlayer 191 self.fc2 = torch.nn.Linear(qlayer.n_qubits, self.flatten_size) 192 193 filters.reverse() 194 filters.append(filters[-1]) 195 kernelsizes.reverse() 196 strides.reverse() 197 198 # Building the classical decoder 199 for i in range(self.depth): 200 self.decoder.append(torch.nn.ConvTranspose2d(filters[i], filters[i+1], kernel_size=kernelsizes[i], stride=strides[i])) 201 202 203 def encoder_f(self, x : torch.Tensor) -> torch.Tensor: 204 ''' 205 Torch forward method for the encoder 206 207 Parameters: 208 ----------- 209 - x : torch.Tensor 210 input for the torch model 211 212 Returns: 213 -------- 214 - out : torch.Tensor 215 output from the torch model 216 ''' 217 218 for i in range(self.depth): 219 x = torch.nn.functional.relu(self.encoder[i](x)) 220 221 x = x.view(-1, self.flatten_size) 222 x = torch.nn.functional.relu(self.fc1(x)) 223 x = torch.nn.functional.relu(self.qc_1(x)) 224 x = torch.nn.functional.relu(self.fc2(x)) 225 out = x.reshape((x.shape[0], self.c, self.w, self.h)) 226 return out 227 228 def decoder_f(self, x : torch.Tensor) -> torch.Tensor: 229 ''' 230 Torch forward method for the decoder 231 232 Parameters: 233 ----------- 234 - x : torch.Tensor 235 input for the torch model 236 237 Returns: 238 -------- 239 - out : torch.Tensor 240 output from the torch model 241 ''' 242 243 for i in range(self.depth - 1): 244 x = torch.nn.functional.relu(self.decoder[i](x)) 245 out = torch.nn.functional.sigmoid(self.decoder[-1](x)) 246 247 return out 248 249 def forward(self, x : torch.Tensor) -> torch.Tensor: 250 ''' 251 Torch forward method 252 253 Parameters: 254 ----------- 255 - x : torch.Tensor 256 input for the torch model 257 258 Returns: 259 -------- 260 - out : torch.Tensor 261 output from the torch model 262 ''' 263 264 dec = self.encoder_f(x) 265 out = self.decoder_f(dec) 266 267 return out 268
class
QuanvolutionAutoencoder(torch.nn.modules.module.Module):
13class QuanvolutionAutoencoder(torch.nn.Module): 14 ''' 15 Hybrdid convolutional autoencoder, the encoder is composed of quanvolution layers, the decoder is composed of classical layers. 16 ''' 17 18 def __init__(self, qcircuits : list[QuantumCircuit], in_shape : tuple[int], filters : list[int], kernelsizes : list[int], strides : list[int]) -> None: 19 ''' 20 QuanvolutionAutoencoder constructor. 21 22 Parameters: 23 ----------- 24 - qcircuits : list 25 list of QuantumCircuit objects 26 - in_shape : tuple 27 tuple represeting the image shape, (width, height, channels) 28 - filters : list 29 list containing the number of filters for each quanvolution layer 30 - kernelsizes: list 31 list containing the kernelsize for each quanvolution layer 32 - strides: list 33 list containin the stride for each quanvolution layer 34 35 Returns: 36 -------- 37 Nothing, a QuanvolutionAutoencoder object will be created. 38 ''' 39 40 super().__init__() 41 42 if len(filters) != len(qcircuits): raise Exception(f"lenght of filters must be the same of lenght of qcircuits, found {len(filters)} and {len(qcircuits)}") 43 if len(kernelsizes) != len(qcircuits): raise Exception(f"lenght of kernelsizes must be the same of lenght of qcircuits, found {len(kernelsizes)} and {len(qcircuits)}") 44 if len(strides) != len(qcircuits): raise Exception(f"lenght of strides must be the same of lenght of qcircuits, found {len(strides)} and {len(qcircuits)}") 45 if len(filters) != len(kernelsizes): raise Exception(f"lenght of filters must be the same of lenght of kernelsizes, found {len(filters)} and {len(kernelsizes)}") 46 if len(filters) != len(strides): raise Exception(f"lenght of filters must be the same of lenght of strides, found {len(filters)} and {len(strides)}") 47 if len(strides) != len(kernelsizes): raise Exception(f"lenght of strides must be the same of lenght of kernelsizes, found {len(strides)} and {len(kernelsizes)}") 48 if len(in_shape) != 3: raise Exception(f"length of in_shape must be equals to 3 (widht, height, channels, found {len(in_shape)}") 49 50 self.encoder = [] 51 self.decoder = [] 52 self.sizes = [] 53 54 w, h, c = in_shape 55 self.depth = len(qcircuits) 56 57 # Building the quanvolution encoder 58 for i in range(self.depth): 59 self.encoder.append(Quanvolution2D(qcircuit=qcircuits[i], filters=filters[i], kernelsize=kernelsizes[i], stride=strides[i])) 60 61 filters.reverse() 62 filters.append(filters[-1]) 63 kernelsizes.reverse() 64 strides.reverse() 65 66 # Building the classical decoder 67 for i in range(self.depth): 68 self.decoder.append(torch.nn.ConvTranspose2d(filters[i], filters[i+1], kernel_size=kernelsizes[i], stride=strides[i])) 69 70 71 def encoder_f(self, x : torch.Tensor) -> torch.Tensor: 72 ''' 73 Torch forward method for the encoder 74 75 Parameters: 76 ----------- 77 - x : torch.Tensor 78 input for the torch model 79 80 Returns: 81 -------- 82 - out : torch.Tensor 83 output from the torch model 84 ''' 85 86 for i in range(self.depth): 87 x = torch.nn.functional.relu(self.encoder[i](x)) 88 89 out = x 90 return out 91 92 def decoder_f(self, x : torch.Tensor) -> torch.Tensor: 93 ''' 94 Torch forward method for the decoder 95 96 Parameters: 97 ----------- 98 - x : torch.Tensor 99 input for the torch model 100 101 Returns: 102 -------- 103 - out : torch.Tensor 104 output from the torch model 105 ''' 106 107 for i in range(self.depth - 1): 108 x = torch.nn.functional.relu(self.decoder[i](x)) 109 out = torch.nn.functional.sigmoid(self.decoder[-1](x)) 110 111 return out 112 113 def forward(self, x : torch.Tensor) -> torch.Tensor: 114 ''' 115 Torch forward method 116 117 Parameters: 118 ----------- 119 - x : torch.Tensor 120 input for the torch model 121 122 Returns: 123 -------- 124 - out : torch.Tensor 125 output from the torch model 126 ''' 127 128 dec = self.encoder_f(x) 129 out = self.decoder_f(dec) 130 131 return out
Hybrdid convolutional autoencoder, the encoder is composed of quanvolution layers, the decoder is composed of classical layers.
QuanvolutionAutoencoder( qcircuits: list[hqm.circuits.circuit.QuantumCircuit], in_shape: tuple[int], filters: list[int], kernelsizes: list[int], strides: list[int])
18 def __init__(self, qcircuits : list[QuantumCircuit], in_shape : tuple[int], filters : list[int], kernelsizes : list[int], strides : list[int]) -> None: 19 ''' 20 QuanvolutionAutoencoder constructor. 21 22 Parameters: 23 ----------- 24 - qcircuits : list 25 list of QuantumCircuit objects 26 - in_shape : tuple 27 tuple represeting the image shape, (width, height, channels) 28 - filters : list 29 list containing the number of filters for each quanvolution layer 30 - kernelsizes: list 31 list containing the kernelsize for each quanvolution layer 32 - strides: list 33 list containin the stride for each quanvolution layer 34 35 Returns: 36 -------- 37 Nothing, a QuanvolutionAutoencoder object will be created. 38 ''' 39 40 super().__init__() 41 42 if len(filters) != len(qcircuits): raise Exception(f"lenght of filters must be the same of lenght of qcircuits, found {len(filters)} and {len(qcircuits)}") 43 if len(kernelsizes) != len(qcircuits): raise Exception(f"lenght of kernelsizes must be the same of lenght of qcircuits, found {len(kernelsizes)} and {len(qcircuits)}") 44 if len(strides) != len(qcircuits): raise Exception(f"lenght of strides must be the same of lenght of qcircuits, found {len(strides)} and {len(qcircuits)}") 45 if len(filters) != len(kernelsizes): raise Exception(f"lenght of filters must be the same of lenght of kernelsizes, found {len(filters)} and {len(kernelsizes)}") 46 if len(filters) != len(strides): raise Exception(f"lenght of filters must be the same of lenght of strides, found {len(filters)} and {len(strides)}") 47 if len(strides) != len(kernelsizes): raise Exception(f"lenght of strides must be the same of lenght of kernelsizes, found {len(strides)} and {len(kernelsizes)}") 48 if len(in_shape) != 3: raise Exception(f"length of in_shape must be equals to 3 (widht, height, channels, found {len(in_shape)}") 49 50 self.encoder = [] 51 self.decoder = [] 52 self.sizes = [] 53 54 w, h, c = in_shape 55 self.depth = len(qcircuits) 56 57 # Building the quanvolution encoder 58 for i in range(self.depth): 59 self.encoder.append(Quanvolution2D(qcircuit=qcircuits[i], filters=filters[i], kernelsize=kernelsizes[i], stride=strides[i])) 60 61 filters.reverse() 62 filters.append(filters[-1]) 63 kernelsizes.reverse() 64 strides.reverse() 65 66 # Building the classical decoder 67 for i in range(self.depth): 68 self.decoder.append(torch.nn.ConvTranspose2d(filters[i], filters[i+1], kernel_size=kernelsizes[i], stride=strides[i]))
QuanvolutionAutoencoder constructor.
Parameters:
- qcircuits : list
list of QuantumCircuit objects - in_shape : tuple tuple represeting the image shape, (width, height, channels)
- filters : list list containing the number of filters for each quanvolution layer
- kernelsizes: list list containing the kernelsize for each quanvolution layer
- strides: list list containin the stride for each quanvolution layer
Returns:
Nothing, a QuanvolutionAutoencoder object will be created.
def
encoder_f(self, x: torch.Tensor) -> torch.Tensor:
71 def encoder_f(self, x : torch.Tensor) -> torch.Tensor: 72 ''' 73 Torch forward method for the encoder 74 75 Parameters: 76 ----------- 77 - x : torch.Tensor 78 input for the torch model 79 80 Returns: 81 -------- 82 - out : torch.Tensor 83 output from the torch model 84 ''' 85 86 for i in range(self.depth): 87 x = torch.nn.functional.relu(self.encoder[i](x)) 88 89 out = x 90 return out
Torch forward method for the encoder
Parameters:
- x : torch.Tensor
input for the torch model
Returns:
- out : torch.Tensor
output from the torch model
def
decoder_f(self, x: torch.Tensor) -> torch.Tensor:
92 def decoder_f(self, x : torch.Tensor) -> torch.Tensor: 93 ''' 94 Torch forward method for the decoder 95 96 Parameters: 97 ----------- 98 - x : torch.Tensor 99 input for the torch model 100 101 Returns: 102 -------- 103 - out : torch.Tensor 104 output from the torch model 105 ''' 106 107 for i in range(self.depth - 1): 108 x = torch.nn.functional.relu(self.decoder[i](x)) 109 out = torch.nn.functional.sigmoid(self.decoder[-1](x)) 110 111 return out
Torch forward method for the decoder
Parameters:
- x : torch.Tensor
input for the torch model
Returns:
- out : torch.Tensor
output from the torch model
def
forward(self, x: torch.Tensor) -> torch.Tensor:
113 def forward(self, x : torch.Tensor) -> torch.Tensor: 114 ''' 115 Torch forward method 116 117 Parameters: 118 ----------- 119 - x : torch.Tensor 120 input for the torch model 121 122 Returns: 123 -------- 124 - out : torch.Tensor 125 output from the torch model 126 ''' 127 128 dec = self.encoder_f(x) 129 out = self.decoder_f(dec) 130 131 return out
Torch forward method
Parameters:
- x : torch.Tensor
input for the torch model
Returns:
- out : torch.Tensor
output from the torch model
Inherited Members
- torch.nn.modules.module.Module
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
class
HybridAutoencoder(torch.nn.modules.module.Module):
133class HybridAutoencoder(torch.nn.Module): 134 ''' 135 Hybrdid convolutional autoencoder, the encoder and the decodere are composed of clasical layer, the hidenn space 136 is processed by a quantum circuit. 137 ''' 138 139 def __init__(self, qlayer : BasicLayer, in_shape : tuple[int], filters : list[int], kernelsizes : list[int], strides : list[int]) -> None: 140 ''' 141 QuanvolutionAutoencoder constructor. 142 143 Parameters: 144 ----------- 145 - qlayer : hqm.layers.basilayer.BasicLayer 146 hqm quantum layer to be stacked between two fully connected layers 147 - in_shape : tuple 148 tuple represeting the image shape, (width, height, channels) 149 - filters : list 150 list containing the number of filters for each quanvolution layer 151 - kernelsizes: list 152 list containing the kernelsize for each quanvolution layer 153 - strides: list 154 list containin the stride for each quanvolution layer 155 156 Returns: 157 -------- 158 Nothing, a QuanvolutionAutoencoder object will be created. 159 ''' 160 161 super().__init__() 162 163 if len(filters) != len(kernelsizes): raise Exception(f"lenght of filters must be the same of lenght of kernelsizes, found {len(filters)} and {len(kernelsizes)}") 164 if len(filters) != len(strides): raise Exception(f"lenght of filters must be the same of lenght of strides, found {len(filters)} and {len(strides)}") 165 if len(strides) != len(kernelsizes): raise Exception(f"lenght of strides must be the same of lenght of kernelsizes, found {len(strides)} and {len(kernelsizes)}") 166 if len(in_shape) != 3: raise Exception(f"length of in_shape must be equals to 3 (widht, height, channels, found {len(in_shape)}") 167 168 self.encoder = [] 169 self.decoder = [] 170 self.sizes = [] 171 172 w, h, c = in_shape 173 self.depth = len(filters) 174 175 self.encoder.append(torch.nn.Conv2d(c, filters[0], kernel_size=kernelsizes[0], stride=strides[0])) 176 w = size_conv_layer(s=w, kernel_size=kernelsizes[0], padding=0, stride=strides[0]) 177 h = size_conv_layer(s=h, kernel_size=kernelsizes[0], padding=0, stride=strides[0]) 178 c = filters[0] 179 # Building the quanvolution encoder 180 for i in range(1, self.depth): 181 self.encoder.append(torch.nn.Conv2d(filters[i-1], filters[i], kernel_size=kernelsizes[i], stride=strides[i])) 182 w = size_conv_layer(s=w, kernel_size=kernelsizes[i], padding=0, stride=strides[i]) 183 h = size_conv_layer(s=h, kernel_size=kernelsizes[i], padding=0, stride=strides[i]) 184 c = filters[i] 185 186 self.w = w 187 self.h = h 188 self.c = c 189 self.flatten_size = w*h*c 190 self.fc1 = torch.nn.Linear(self.flatten_size, qlayer.n_qubits) 191 self.qc_1 = qlayer.qlayer 192 self.fc2 = torch.nn.Linear(qlayer.n_qubits, self.flatten_size) 193 194 filters.reverse() 195 filters.append(filters[-1]) 196 kernelsizes.reverse() 197 strides.reverse() 198 199 # Building the classical decoder 200 for i in range(self.depth): 201 self.decoder.append(torch.nn.ConvTranspose2d(filters[i], filters[i+1], kernel_size=kernelsizes[i], stride=strides[i])) 202 203 204 def encoder_f(self, x : torch.Tensor) -> torch.Tensor: 205 ''' 206 Torch forward method for the encoder 207 208 Parameters: 209 ----------- 210 - x : torch.Tensor 211 input for the torch model 212 213 Returns: 214 -------- 215 - out : torch.Tensor 216 output from the torch model 217 ''' 218 219 for i in range(self.depth): 220 x = torch.nn.functional.relu(self.encoder[i](x)) 221 222 x = x.view(-1, self.flatten_size) 223 x = torch.nn.functional.relu(self.fc1(x)) 224 x = torch.nn.functional.relu(self.qc_1(x)) 225 x = torch.nn.functional.relu(self.fc2(x)) 226 out = x.reshape((x.shape[0], self.c, self.w, self.h)) 227 return out 228 229 def decoder_f(self, x : torch.Tensor) -> torch.Tensor: 230 ''' 231 Torch forward method for the decoder 232 233 Parameters: 234 ----------- 235 - x : torch.Tensor 236 input for the torch model 237 238 Returns: 239 -------- 240 - out : torch.Tensor 241 output from the torch model 242 ''' 243 244 for i in range(self.depth - 1): 245 x = torch.nn.functional.relu(self.decoder[i](x)) 246 out = torch.nn.functional.sigmoid(self.decoder[-1](x)) 247 248 return out 249 250 def forward(self, x : torch.Tensor) -> torch.Tensor: 251 ''' 252 Torch forward method 253 254 Parameters: 255 ----------- 256 - x : torch.Tensor 257 input for the torch model 258 259 Returns: 260 -------- 261 - out : torch.Tensor 262 output from the torch model 263 ''' 264 265 dec = self.encoder_f(x) 266 out = self.decoder_f(dec) 267 268 return out
Hybrdid convolutional autoencoder, the encoder and the decodere are composed of clasical layer, the hidenn space is processed by a quantum circuit.
HybridAutoencoder( qlayer: hqm.layers.basiclayer.BasicLayer, in_shape: tuple[int], filters: list[int], kernelsizes: list[int], strides: list[int])
139 def __init__(self, qlayer : BasicLayer, in_shape : tuple[int], filters : list[int], kernelsizes : list[int], strides : list[int]) -> None: 140 ''' 141 QuanvolutionAutoencoder constructor. 142 143 Parameters: 144 ----------- 145 - qlayer : hqm.layers.basilayer.BasicLayer 146 hqm quantum layer to be stacked between two fully connected layers 147 - in_shape : tuple 148 tuple represeting the image shape, (width, height, channels) 149 - filters : list 150 list containing the number of filters for each quanvolution layer 151 - kernelsizes: list 152 list containing the kernelsize for each quanvolution layer 153 - strides: list 154 list containin the stride for each quanvolution layer 155 156 Returns: 157 -------- 158 Nothing, a QuanvolutionAutoencoder object will be created. 159 ''' 160 161 super().__init__() 162 163 if len(filters) != len(kernelsizes): raise Exception(f"lenght of filters must be the same of lenght of kernelsizes, found {len(filters)} and {len(kernelsizes)}") 164 if len(filters) != len(strides): raise Exception(f"lenght of filters must be the same of lenght of strides, found {len(filters)} and {len(strides)}") 165 if len(strides) != len(kernelsizes): raise Exception(f"lenght of strides must be the same of lenght of kernelsizes, found {len(strides)} and {len(kernelsizes)}") 166 if len(in_shape) != 3: raise Exception(f"length of in_shape must be equals to 3 (widht, height, channels, found {len(in_shape)}") 167 168 self.encoder = [] 169 self.decoder = [] 170 self.sizes = [] 171 172 w, h, c = in_shape 173 self.depth = len(filters) 174 175 self.encoder.append(torch.nn.Conv2d(c, filters[0], kernel_size=kernelsizes[0], stride=strides[0])) 176 w = size_conv_layer(s=w, kernel_size=kernelsizes[0], padding=0, stride=strides[0]) 177 h = size_conv_layer(s=h, kernel_size=kernelsizes[0], padding=0, stride=strides[0]) 178 c = filters[0] 179 # Building the quanvolution encoder 180 for i in range(1, self.depth): 181 self.encoder.append(torch.nn.Conv2d(filters[i-1], filters[i], kernel_size=kernelsizes[i], stride=strides[i])) 182 w = size_conv_layer(s=w, kernel_size=kernelsizes[i], padding=0, stride=strides[i]) 183 h = size_conv_layer(s=h, kernel_size=kernelsizes[i], padding=0, stride=strides[i]) 184 c = filters[i] 185 186 self.w = w 187 self.h = h 188 self.c = c 189 self.flatten_size = w*h*c 190 self.fc1 = torch.nn.Linear(self.flatten_size, qlayer.n_qubits) 191 self.qc_1 = qlayer.qlayer 192 self.fc2 = torch.nn.Linear(qlayer.n_qubits, self.flatten_size) 193 194 filters.reverse() 195 filters.append(filters[-1]) 196 kernelsizes.reverse() 197 strides.reverse() 198 199 # Building the classical decoder 200 for i in range(self.depth): 201 self.decoder.append(torch.nn.ConvTranspose2d(filters[i], filters[i+1], kernel_size=kernelsizes[i], stride=strides[i]))
QuanvolutionAutoencoder constructor.
Parameters:
- qlayer : hqm.layers.basilayer.BasicLayer
hqm quantum layer to be stacked between two fully connected layers - in_shape : tuple tuple represeting the image shape, (width, height, channels)
- filters : list list containing the number of filters for each quanvolution layer
- kernelsizes: list list containing the kernelsize for each quanvolution layer
- strides: list list containin the stride for each quanvolution layer
Returns:
Nothing, a QuanvolutionAutoencoder object will be created.
def
encoder_f(self, x: torch.Tensor) -> torch.Tensor:
204 def encoder_f(self, x : torch.Tensor) -> torch.Tensor: 205 ''' 206 Torch forward method for the encoder 207 208 Parameters: 209 ----------- 210 - x : torch.Tensor 211 input for the torch model 212 213 Returns: 214 -------- 215 - out : torch.Tensor 216 output from the torch model 217 ''' 218 219 for i in range(self.depth): 220 x = torch.nn.functional.relu(self.encoder[i](x)) 221 222 x = x.view(-1, self.flatten_size) 223 x = torch.nn.functional.relu(self.fc1(x)) 224 x = torch.nn.functional.relu(self.qc_1(x)) 225 x = torch.nn.functional.relu(self.fc2(x)) 226 out = x.reshape((x.shape[0], self.c, self.w, self.h)) 227 return out
Torch forward method for the encoder
Parameters:
- x : torch.Tensor
input for the torch model
Returns:
- out : torch.Tensor
output from the torch model
def
decoder_f(self, x: torch.Tensor) -> torch.Tensor:
229 def decoder_f(self, x : torch.Tensor) -> torch.Tensor: 230 ''' 231 Torch forward method for the decoder 232 233 Parameters: 234 ----------- 235 - x : torch.Tensor 236 input for the torch model 237 238 Returns: 239 -------- 240 - out : torch.Tensor 241 output from the torch model 242 ''' 243 244 for i in range(self.depth - 1): 245 x = torch.nn.functional.relu(self.decoder[i](x)) 246 out = torch.nn.functional.sigmoid(self.decoder[-1](x)) 247 248 return out
Torch forward method for the decoder
Parameters:
- x : torch.Tensor
input for the torch model
Returns:
- out : torch.Tensor
output from the torch model
def
forward(self, x: torch.Tensor) -> torch.Tensor:
250 def forward(self, x : torch.Tensor) -> torch.Tensor: 251 ''' 252 Torch forward method 253 254 Parameters: 255 ----------- 256 - x : torch.Tensor 257 input for the torch model 258 259 Returns: 260 -------- 261 - out : torch.Tensor 262 output from the torch model 263 ''' 264 265 dec = self.encoder_f(x) 266 out = self.decoder_f(dec) 267 268 return out
Torch forward method
Parameters:
- x : torch.Tensor
input for the torch model
Returns:
- out : torch.Tensor
output from the torch model
Inherited Members
- torch.nn.modules.module.Module
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile