[docs]classTfliteLayer:"""Wrapper for TFLite flatbuffer layer"""
[docs]@staticmethoddeffrom_flatbuffer(index:int,model:TfliteModel,fb_operation:_tflite_schema_fb.OperatorT)->TfliteLayer:"""Instantiate a TfliteLayer from then given TfliteModel flatbuffer operation"""fb_opcode=model.flatbuffer_model.operatorCodes[fb_operation.opcodeIndex]# See: https://github.com/tensorflow/community/pull/285/files# for why we return the max(DeprecatedBuiltinCode, BuiltinCode)opcode=max(getattr(fb_opcode,'deprecatedBuiltinCode',-1),fb_opcode.builtinCode)opcode_version=fb_opcode.versionlayer_cls=_LAYER_MAP.get(opcode,TfliteLayer)returnlayer_cls(index=index,opcode=opcode,opcode_version=opcode_version,model=model,fb_operation=fb_operation)
def__str__(self):returnself.name@propertydefindex(self)->int:"""Index of this layer in the model"""returnself._index@propertydefname(self)->str:"""Name of current layer as: op<index>-<OpCodeStr>"""returnf'op{self._index}-{self._opcode_str}'@propertydefopcode(self)->TfliteOpCode:"""OpCode numeric value"""returnself._opcode@propertydefopcode_str(self)->str:"""OpCode as a string"""returnself._opcode_str@propertydefoptions(self)->TfliteLayerOptions:"""Layer-specific options/config"""returnself._options@propertydefmodel(self)->TfliteModel:"""Reference to associated TfliteModel"""returnself._model@propertydefmetadata(self)->Dict[str,object]:"""Additional key/value data to associated with layer NOTE: This information is generated by the framework/Python scripts (i.e. The information does NOT come from the .tflite model) """returnself._metadata@propertydefinputs(self)->List[TfliteTensor]:"""List of layer input tensor(s)"""returnself._inputs@propertydefn_inputs(self)->int:"""Return the number of inputs"""returnlen(self._inputs)@propertydefoutputs(self)->List[TfliteTensor]:"""List of layer output tensor(s)"""returnself._outputs@propertydefn_outputs(self)->int:"""Return the number of outputs"""returnlen(self._outputs)
[docs]defget_input_tensor(self,index=0)->TfliteTensor:"""Get layer input tensor as TfliteTensor"""ifindex>=self.n_inputs:raiseIndexError(f'Index overflow ({index} >= {self.n_inputs})')returnself._inputs[index]
[docs]defget_input_data(self,index=0)->np.ndarray:"""Get layer input tensor as np.ndarray"""ifindex>=self.n_inputs:raiseIndexError(f'Index overflow ({index} >= {self.n_inputs})')returnself._inputs[index].data
[docs]defget_output_tensor(self,index=0)->TfliteTensor:"""Layer output tensor as TfliteTensor"""ifindex>=self.n_outputs:raiseIndexError(f'Index overflow ({index} >= {self.n_outputs})')returnself._outputs[index]
[docs]defget_output_data(self,index=0)->np.ndarray:"""Layer output tensor as np.ndarray"""ifindex>=self.n_outputs:raiseIndexError(f'Index overflow ({index} >= {self.n_outputs})')returnself._outputs[index].data
classTfliteLayerOptions:"""Generic layer options object"""def__init__(self,fb_object=None,type:_tflite_schema_fb.BuiltinOptions=None):self._type=typeiffb_objectisnotNone:forxinvars(fb_object):setattr(self,x,getattr(fb_object,x))@propertydefoptions_type(self)->_tflite_schema_fb.BuiltinOptions:""".tflite schema option code"""returnself._type@propertydefoptions_type_str(self)->str:""".tflite schema option as a string """return_convert_object_value_to_string(_tflite_schema_fb.BuiltinOptions(),self._type)def__str__(self):returnf'Type={self.options_type_str}'
classTfliteAddLayerOptions(_tflite_schema_fb.AddOptionsT,TfliteLayerOptions):"""Add layer options"""def__init__(self,opts=None):_tflite_schema_fb.AddOptionsT.__init__(self)TfliteLayerOptions.__init__(self,opts,type=_tflite_schema_fb.BuiltinOptions.AddOptions)self._activation=TfliteActivation(self.fusedActivationFunction)@propertydefactivation_str(self)->str:"""Fused activation as a string"""returnself._activation.to_string()@activation_str.setterdefactivation_str(self,v):self._activation=TfliteActivation(v)@propertydefactivation(self)->TfliteActivation:"""Fused activation"""returnself._activation@activation.setterdefactivation(self,v:int):self._activation=TfliteActivation(v)def__str__(self):returnf'Activation:{self.activation_str}'
@propertydefoptions(self)->TfliteConv2DLayerOptions:"""Layer-specific options/config"""returnself._options@propertydeffilters(self)->int:"""The number of filters"""returnself._filters@propertydefkernel_size(self)->Tuple[int,int]:"""Filters kernel size has height x width"""returnself._kernel_size@propertydefstrides(self)->Tuple[int,int]:"""Kernel stride height x width"""return(self._options.stride_height,self._options.stride_width)@propertydefpadding(self)->str:"""Kernel padding"""returnself._options.padding_str@propertydefactivation(self)->str:"""Fused activation"""returnself._options.activation_str@propertydefuse_bias(self)->bool:"""Return if the layer uses a bias"""returnlen(self._inputs)>2@propertydefinput_tensor(self)->TfliteTensor:"""Input tensor data"""returnself._inputs[0]@propertydefinput_data(self)->np.ndarray:"""Input tensor data"""returnself.input_tensor.data@propertydeffilters_tensor(self)->TfliteTensor:"""Filters tensor data"""returnself._inputs[1]@propertydeffilters_data(self)->np.ndarray:"""Filters tensor data"""returnself.filters_tensor.data@propertydefbias_tensor(self)->TfliteTensor:"""Bias tensor data (None if no bias used)"""returnself._inputs[2]ifself.use_biaselseNone@propertydefbias_data(self)->np.ndarray:"""Bias tensor data (None if no bias used)"""returnself.bias_tensor.dataifself.use_biaselseNone@propertydefoutput_tensor(self)->TfliteTensor:"""Output tensor data"""returnself._outputs[0]@propertydefoutput_data(self)->np.ndarray:"""Output tensor data"""returnself.output_tensor.data@propertydefparams(self)->TfliteConvParams:"""Calculated layer parameters"""returnTfliteConvParams.calculate(self)
@propertydefactivation_str(self)->str:"""Fused activation as a string"""returnself._activation.to_string()@activation_str.setterdefactivation_str(self,v):self._activation=TfliteActivation(v)@propertydefactivation(self)->TfliteActivation:"""Fused activation"""returnself._activation@activation.setterdefactivation(self,v:int):self._activation=TfliteActivation(v)@propertydefpadding_str(self)->str:"""Padding as a string"""returnself._padding.to_string()@padding_str.setterdefpadding_str(self,v):self._padding=TflitePadding(v)@propertydefpadding(self)->TflitePadding:"""Padding type"""returnself._padding@padding.setterdefpadding(self,v:int):self._padding=TflitePadding(v)@propertydefstride_width(self)->int:"""Kernel stride width"""returnself.strideW@stride_width.setterdefstride_width(self,v):self.strideW=v@propertydefstride_height(self)->int:"""Kernel stride height"""returnself.strideH@stride_height.setterdefstride_height(self,v):self.strideH=vdef__str__(self):returnf'Padding:{self.padding_str} stride:{self.stride_width}x{self.stride_height} activation:{self.activation_str}'
@propertydefoptions(self)->TfliteTransposeConvLayerOptions:"""Layer-specific options/config"""returnself._options@propertydeffilters(self)->int:"""The number of filters"""returnself._filters@propertydefkernel_size(self)->Tuple[int,int]:"""Filters kernel size has height x width"""returnself._kernel_size@propertydefstrides(self)->Tuple[int,int]:"""Kernel stride height x width"""return(self._options.stride_height,self._options.stride_width)@propertydefpadding(self)->str:"""Kernel padding"""returnself._options.padding_str@propertydefuse_bias(self)->bool:"""Return if the layer uses a bias"""returnlen(self._inputs)>3@propertydefinput_tensor(self)->TfliteTensor:"""Input tensor data"""returnself._inputs[2]@propertydefinput_data(self)->np.ndarray:"""Input tensor data"""returnself.input_tensor.data@propertydeffilters_tensor(self)->TfliteTensor:"""Filters tensor data"""returnself._inputs[1]@propertydeffilters_data(self)->np.ndarray:"""Filters tensor data"""returnself.filters_tensor.data@propertydefbias_tensor(self)->TfliteTensor:"""Bias tensor data (None if no bias used)"""returnself._inputs[3]ifself.use_biaselseNone@propertydefbias_data(self)->np.ndarray:"""Bias tensor data (None if no bias used)"""returnself.bias_tensor.dataifself.use_biaselseNone@propertydefoutput_tensor(self)->TfliteTensor:"""Output tensor data"""returnself._outputs[0]@propertydefoutput_data(self)->np.ndarray:"""Output tensor data"""returnself.output_tensor.data@propertydefparams(self)->TfliteTransposeConvParams:"""Calculated layer parameters"""returnTfliteTransposeConvParams.calculate(self)
@propertydefoptions(self)->TfliteFullyConnectedLayerOptions:"""Layer-specific options/config"""returnself._options@propertydefaccumulator_depth(self)->int:"""Number of weights to accumulate"""returnself.weights_tensor.shape[-1]@propertydefunits(self)->int:"""Number of neurons"""returnself.output_tensor.shape[-1]@propertydefactivation(self)->str:"""Fused activation"""returnself._options.activation_str@propertydefuse_bias(self)->bool:"""Return if the layer uses a bias"""returnlen(self._inputs)>2@propertydefinput_tensor(self)->TfliteTensor:"""Input tensor data"""returnself._inputs[0]@propertydefinput_data(self)->np.ndarray:"""Input tensor data"""returnself.input_tensor.data@propertydefweights_tensor(self)->TfliteTensor:"""Weights tensor data"""returnself._inputs[1]@propertydefweights_data(self)->np.ndarray:"""Weights tensor data"""returnself.weights_tensor.data@propertydefbias_tensor(self)->TfliteTensor:"""Bias tensor data (None if no bias used)"""returnself._inputs[2]ifself.use_biaselseNone@propertydefbias_data(self)->np.ndarray:"""Bias tensor data (None if no bias used)"""returnself.bias_tensor.dataifself.use_biaselseNone@propertydefoutput_tensor(self)->TfliteTensor:"""Output tensor data"""returnself._outputs[0]@propertydefoutput_data(self)->np.ndarray:"""Output tensor data"""returnself.output_tensor.data@propertydefparams(self)->TfliteFullyConnectedParams:"""Calculated layer parameters"""returnTfliteFullyConnectedParams.calculate(self)
@propertydefactivation_str(self)->str:"""Fused activation as a string"""returnself._activation.to_string()@activation_str.setterdefactivation_str(self,v):self._activation=TfliteActivation(v)@propertydefactivation(self)->TfliteActivation:"""Fused activation"""returnself._activation@activation.setterdefactivation(self,v:int):self._activation=TfliteActivation(v)def__str__(self):returnf'Activation:{self.activation_str}'
@propertydefoptions(self)->TfliteDepthwiseConv2DLayerOptions:"""Layer-specific options/config"""returnself._options@propertydefmultiplier(self)->int:"""Depth multiplier"""returnself._options.multiplier@propertydefkernel_size(self)->Tuple[int,int]:"""Filters kernel size has height x width"""returnself._kernel_size@propertydefstrides(self)->Tuple[int,int]:"""Kernel stride height x width"""return(self._options.stride_height,self._options.stride_width)@propertydefpadding(self)->str:"""Kernel padding"""returnself._options.padding_str@propertydefactivation(self)->str:"""Fused activation"""returnself._options.activation_str@propertydefuse_bias(self)->bool:"""Return if the layer uses a bias"""returnlen(self._inputs)>2@propertydefinput_tensor(self)->TfliteTensor:"""Input tensor data"""returnself._inputs[0]@propertydefinput_data(self)->np.ndarray:"""Input tensor data"""returnself.input_tensor.data@propertydeffilters_tensor(self)->TfliteTensor:"""Filters tensor data"""returnself._inputs[1]@propertydeffilters_data(self)->np.ndarray:"""Filters tensor data"""returnself.filters_tensor.data@propertydefbias_tensor(self)->TfliteTensor:"""Bias tensor data (None if no bias used)"""returnself._inputs[2]ifself.use_biaselseNone@propertydefbias_data(self)->np.ndarray:"""Bias tensor data (None if no bias used)"""returnself.bias_tensor.dataifself.use_biaselseNone@propertydefoutput_tensor(self)->TfliteTensor:"""Output tensor data"""returnself._outputs[0]@propertydefoutput_data(self)->np.ndarray:"""Output tensor data"""returnself.output_tensor.data@propertydefparams(self)->TfliteDepthwiseConvParams:"""Calculated layer parameters"""returnTfliteDepthwiseConvParams.calculate(self)
@propertydefstride_width(self)->int:"""Kernel stride width"""returnself.strideW@stride_width.setterdefstride_width(self,v):self.strideW=v@propertydefstride_height(self)->int:""""Kernel stride height"""returnself.strideH@stride_height.setterdefstride_height(self,v):self.strideH=v@propertydefmultiplier(self)->int:""""Depth multiplier"""returnself.depthMultiplier@multiplier.setterdefmultiplier(self,v):self.depthMultiplier=v@propertydefactivation_str(self)->str:"""Fused activation as a string"""returnself._activation.to_string()@activation_str.setterdefactivation_str(self,v):self._activation=TfliteActivation(v)@propertydefactivation(self)->TfliteActivation:"""Fused activation"""returnself._activation@activation.setterdefactivation(self,v:int):self._activation=TfliteActivation(v)@propertydefpadding_str(self)->str:"""Kernel padding as a string"""returnself._padding.to_string()@padding_str.setterdefpadding_str(self,v):self._padding=TflitePadding(v)@propertydefpadding(self)->TflitePadding:"""Kernel padding"""returnself._padding@padding.setterdefpadding(self,v:int):self._padding=TflitePadding(v)def__str__(self):returnf'Multiplier:{self.multiplier} padding:{self.padding_str} stride:{self.stride_width}x{self.stride_height} activation:{self.activation_str}'
[docs]classTflitePooling2dLayer(TfliteLayer):"""AVERAGE_POOL_2D or MAX_POOL_2D operation TfliteLayer"""
@propertydefstride_width(self)->int:"""Filter stride width"""returnself.strideW@stride_width.setterdefstride_width(self,v):self.strideW=v@propertydefstride_height(self)->int:"""Filter stride height"""returnself.strideH@stride_height.setterdefstride_height(self,v):self.strideH=v@propertydeffilter_width(self)->int:"""Filter width"""returnself.filterWidth@filter_width.setterdeffilter_width(self,v):self.filterWidth=v@propertydeffilter_height(self)->int:"""Filter height"""returnself.filterHeight@filter_height.setterdeffilter_height(self,v):self.filterHeight=v@propertydefactivation_str(self)->str:"""Fused activation as a string"""returnself._activation.to_string()@activation_str.setterdefactivation_str(self,v):self._activation=TfliteActivation(v)@propertydefactivation(self)->TfliteActivation:"""Fused activation"""returnself._activation@activation.setterdefactivation(self,v:int):self._activation=TfliteActivation(v)@propertydefpadding_str(self)->str:"""Filter padding as a string"""returnself._padding.to_string()@padding_str.setterdefpadding_str(self,v):self._padding=TflitePadding(v)@propertydefpadding(self)->TflitePadding:"""Filter padding"""returnself._padding@padding.setterdefpadding(self,v:int):self._padding=TflitePadding(v)def__str__(self):returnf'Padding:{self.padding_str} stride:{self.stride_width}x{self.stride_height} filter:{self.filter_width}x{self.filter_height} activation:{self.activation_str}'
@propertydefinput_tensor(self)->TfliteTensor:"""Input tensor data"""returnself._inputs[0]@propertydefinput_data(self)->np.ndarray:"""Input tensor data"""returnself.input_tensor.data@propertydefoutput_tensor(self)->TfliteTensor:"""Output tensor data"""returnself._outputs[0]@propertydefoutput_data(self)->np.ndarray:"""Output tensor data"""returnself.output_tensor.data@propertydefrequires_copy(self)->bool:"""Return true if a memcpy is required, False if the reshape was done in-place"""returnself._inputs[0].index!=self._outputs[0].index@propertydefn_input_elements(self)->int:"""Return the number of input elements"""returnself._inputs[0].shape.flat_size
@propertydefoptions(self)->TfliteUnidirectionalLstmLayerOptions:"""Layer-specific options/config"""returnself._options@propertydefactivation(self)->str:"""Fused activation"""returnself._options.activation_str@propertydefis_time_major(self)->bool:"""Return if this kernel uses time major or batch major"""returnself._options.timeMajor@propertydefcell_clip(self)->float:"""Input tensor data"""returnself._options.cellClip@propertydefproj_clip(self)->float:"""Input tensor data"""returnself._options.projClip@propertydefn_cells(self)->int:"""Number of LSTM cells"""returnself.input_to_cell_weights_tensor.shape[0]@propertydefinput_tensor(self)->TfliteTensor:"""Input tensor of size {n_batch, n_input}"""returnself._inputs[0]@propertydefinput_data(self)->np.ndarray:"""Input tensor data"""returnself.input_tensor.data@propertydefoutput_tensor(self)->TfliteTensor:"""Output tensor data"""returnself._outputs[0]@propertydefoutput_data(self)->np.ndarray:"""Output tensor data"""returnself.output_tensor.data@propertydefinput_to_input_weights_tensor(self)->TfliteTensor:"""(Optional) Input weight tensor of size: {n_cell, n_input}"""returnself._inputs[1]@propertydefinput_to_forget_weights_tensor(self)->TfliteTensor:"""Input weight tensor of size: {n_cell, n_input}"""returnself._inputs[2]@propertydefinput_to_cell_weights_tensor(self)->TfliteTensor:"""Input weight tensor of size: {n_cell, n_input}"""returnself._inputs[3]@propertydefinput_to_output_weights_tensor(self)->TfliteTensor:"""Input weight tensor of size: {n_cell, n_input}"""returnself._inputs[4]@propertydefrecurrent_to_input_weights_tensor(self)->TfliteTensor:"""(Optional) Recurrent weight tensor of size {n_cell, n_output}"""returnself._inputs[5]@propertydefrecurrent_to_forget_weights_tensor(self)->TfliteTensor:"""Recurrent weight tensor of size {n_cell, n_output}"""returnself._inputs[6]@propertydefrecurrent_to_cell_weights_tensor(self)->TfliteTensor:"""Recurrent weight tensor of size {n_cell, n_output}"""returnself._inputs[7]@propertydefrecurrent_to_output_weights_tensor(self)->TfliteTensor:"""Recurrent weight tensor of size {n_cell, n_output}"""returnself._inputs[8]@propertydefcell_to_input_weights_tensor(self)->TfliteTensor:"""(Optional) Peephole weights tensor of size {n_cell}, representing a diagonal matrix"""returnself._inputs[9]@propertydefcell_to_forget_weights_tensor(self)->TfliteTensor:"""(Optional) Peephole weights tensor of size {n_cell}, representing a diagonal matrix"""returnself._inputs[10]@propertydefcell_to_output_weights_tensor(self)->TfliteTensor:"""(Optional) Peephole weights tensor of size {n_cell}, representing a diagonal matrix"""returnself._inputs[11]@propertydefinput_gate_bias_tensor(self)->TfliteTensor:"""(Optional) Input gate bias tensor of size {n_cell}"""returnself._inputs[12]@propertydefforget_gate_bias_tensor(self)->TfliteTensor:"""Forget gate bias tensor of size {n_cell}"""returnself._inputs[13]@propertydefcell_gate_bias_tensor(self)->TfliteTensor:"""Cell gate bias tensor of size {n_cell}"""returnself._inputs[14]@propertydefoutput_gate_bias_tensor(self)->TfliteTensor:"""Output gate bias tensor of size {n_cell}"""returnself._inputs[15]@propertydefprojection_weights_tensor(self)->TfliteTensor:"""(Optional) Projection weight tensor of size {n_output, n_cell}"""returnself._inputs[16]@propertydefprojection_bias_tensor(self)->TfliteTensor:"""(Optional) Projection bias tensor of size {n_output}"""returnself._inputs[17]@propertydefoutput_state_tensor(self)->TfliteTensor:"""The output state tensor is defined as variable tensor, and will be modified at runtime"""returnself._inputs[18]@propertydefcell_state_tensor(self)->TfliteTensor:"""The cell state tensor is defined as variable tensor, and will be modified at runtime"""returnself._inputs[19]@propertydefinput_layer_norm_coeff_tensor(self)->TfliteTensor:"""(Optional) Layer norm coefficient tensor of size {n_cell}, representing a diagonal matrix"""returnself._inputs[20]@propertydefforget_layer_norm_coeff_tensor(self)->TfliteTensor:"""(Optional) Layer norm coefficient tensor of size {n_cell}, representing a diagonal matrix"""returnself._inputs[21]@propertydefcell_layer_norm_coeff_tensor(self)->TfliteTensor:"""(Optional) Layer norm coefficient tensor of size {n_cell}, representing a diagonal matrix"""returnself._inputs[22]@propertydefoutput_layer_norm_coeff_tensor(self)->TfliteTensor:"""(Optional) Layer norm coefficient tensor of size {n_cell}, representing a diagonal matrix"""returnself._inputs[23]defget_tensor_name_value_tuples(self)->List[Tuple[str,TfliteTensor]]:tensor_names=['input','input_to_input_weights','input_to_forget_weights','input_to_cell_weights','input_to_output_weights','recurrent_to_input_weights','recurrent_to_forget_weights','recurrent_to_cell_weights','recurrent_to_output_weights','cell_to_input_weights','cell_to_forget_weights','cell_to_output_weights','input_gate_bias','forget_gate_bias','cell_gate_bias','output_gate_bias','projection_weights','projection_bias','output_state','cell_state','input_layer_norm_coeff','forget_layer_norm_coeff','cell_layer_norm_coeff','output_layer_norm_coeff','output']retval=[]fornameintensor_names:retval.append((name,getattr(self,f'{name}_tensor')))returnretval
@propertydefactivation_str(self)->str:"""Fused activation as a string"""returnself._activation.to_string()@activation_str.setterdefactivation_str(self,v):self._activation=TfliteActivation(v)@propertydefactivation(self)->TfliteActivation:"""Fused activation"""returnself._activation@activation.setterdefactivation(self,v:int):self._activation=TfliteActivation(v)def__str__(self):returnf'Time major:{self.timeMajor}, Activation:{self.activation_str}, Cell clip:{self.cellClip}'
Important: We use cookies only for functional and traffic analytics.
We DO NOT use cookies for any marketing purposes. By using our site you acknowledge you have read and understood our Cookie Policy.