UAE-Large-V1/openvino/openvino_model_qint8_quanti...

40773 lines
1.2 MiB

<?xml version="1.0"?>
<net name="Model8247" version="11">
<layers>
<layer id="2" name="input_ids" type="Parameter" version="opset1">
<data shape="?,?" element_type="i64" />
<output>
<port id="0" precision="I64" names="input_ids">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="1" name="attention_mask" type="Parameter" version="opset1">
<data shape="?,?" element_type="i64" />
<output>
<port id="0" precision="I64" names="attention_mask">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="0" name="token_type_ids" type="Parameter" version="opset1">
<data shape="?,?" element_type="i64" />
<output>
<port id="0" precision="I64" names="token_type_ids">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="3" name="Constant_7690717" type="Const" version="opset1">
<data element_type="i8" shape="30522, 1024" offset="0" size="31254528" />
<output>
<port id="0" precision="I8">
<dim>30522</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="4" name="Convert_7690718" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>30522</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>30522</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="5" name="Constant_7690719" type="Const" version="opset1">
<data element_type="f32" shape="30522, 1" offset="31254528" size="122088" />
<output>
<port id="0" precision="FP32">
<dim>30522</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="6" name="__module.embeddings.word_embeddings/aten::embedding/Gather/fq_weights_0" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>30522</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>30522</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>30522</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="7" name="__module.embeddings.word_embeddings/aten::embedding/Convert" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="8" name="__module.embeddings.word_embeddings/aten::embedding/Constant" type="Const" version="opset1">
<data element_type="i32" shape="" offset="31376616" size="4" />
<output>
<port id="0" precision="I32" />
</output>
</layer>
<layer id="9" name="__module.embeddings.word_embeddings/aten::embedding/Gather" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="FP32">
<dim>30522</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="2" precision="I32" />
</input>
<output>
<port id="3" precision="FP32" names="79,inputs_embeds">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="10" name="Constant_7690721" type="Const" version="opset1">
<data element_type="i8" shape="2, 1024" offset="31376620" size="2048" />
<output>
<port id="0" precision="I8">
<dim>2</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="11" name="Convert_7690722" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>2</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>2</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="12" name="Constant_7690723" type="Const" version="opset1">
<data element_type="f32" shape="2, 1" offset="31378668" size="8" />
<output>
<port id="0" precision="FP32">
<dim>2</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="13" name="__module.embeddings.token_type_embeddings/aten::embedding/Gather/fq_weights_0" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>2</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>2</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>2</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="14" name="__module.embeddings.token_type_embeddings/aten::embedding/Convert" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="15" name="__module.embeddings.token_type_embeddings/aten::embedding/Constant" type="Const" version="opset1">
<data element_type="i32" shape="" offset="31376616" size="4" />
<output>
<port id="0" precision="I32" />
</output>
</layer>
<layer id="16" name="__module.embeddings.token_type_embeddings/aten::embedding/Gather" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="FP32">
<dim>2</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="2" precision="I32" />
</input>
<output>
<port id="3" precision="FP32" names="81,token_type_embeddings.1">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="17" name="__module.embeddings/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="82_1">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="18" name="Constant_7690725" type="Const" version="opset1">
<data element_type="i8" shape="512, 1024" offset="31378676" size="524288" />
<output>
<port id="0" precision="I8">
<dim>512</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="19" name="Convert_7690726" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>512</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>512</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="20" name="Constant_7690727" type="Const" version="opset1">
<data element_type="f32" shape="512, 1" offset="31902964" size="2048" />
<output>
<port id="0" precision="FP32">
<dim>512</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="21" name="__module.embeddings.position_embeddings/aten::embedding/Gather/fq_weights_0" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>512</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>512</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>512</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="22" name="__module.embeddings/aten::slice/Slice" type="Const" version="opset1">
<data element_type="i64" shape="1, 512" offset="31905012" size="4096" />
<output>
<port id="0" precision="I64" names="76">
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="23" name="__module.embeddings/aten::slice/Reshape" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="31909108" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="24" name="ShapeOf_6596474" type="ShapeOf" version="opset3">
<data output_type="i64" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="25" name="Constant_6596614" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="31909116" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="26" name="Constant_6596476" type="Const" version="opset1">
<data element_type="i64" shape="" offset="31909108" size="8" />
<output>
<port id="0" precision="I64" />
</output>
</layer>
<layer id="27" name="Gather_6596477" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="I64">
<dim>2</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64" />
</input>
<output>
<port id="3" precision="I64" names="10,17,19,72,74,75,8">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="28" name="__module.embeddings/aten::slice/Reshape_2" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="31909116" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="29" name="__module.embeddings/aten::slice/Reshape_3" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="31909116" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="30" name="__module.embeddings/aten::slice/Slice_1" type="Slice" version="opset8">
<input>
<port id="0" precision="I64">
<dim>1</dim>
<dim>512</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64">
<dim>1</dim>
</port>
<port id="3" precision="I64">
<dim>1</dim>
</port>
<port id="4" precision="I64">
<dim>1</dim>
</port>
</input>
<output>
<port id="5" precision="I64" names="77">
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="31" name="__module.embeddings.position_embeddings/aten::embedding/Convert" type="Convert" version="opset1">
<data destination_type="i32" />
<input>
<port id="0" precision="I64">
<dim>1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="I32">
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="32" name="__module.embeddings.position_embeddings/aten::embedding/Constant" type="Const" version="opset1">
<data element_type="i32" shape="" offset="31376616" size="4" />
<output>
<port id="0" precision="I32" />
</output>
</layer>
<layer id="33" name="__module.embeddings.position_embeddings/aten::embedding/Gather" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="FP32">
<dim>512</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
<dim>-1</dim>
</port>
<port id="2" precision="I32" />
</input>
<output>
<port id="3" precision="FP32" names="84,position_embeddings.1">
<dim>1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="34" name="__module.embeddings/aten::add_/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="82,embeddings.1">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="35" name="__module.embeddings.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="36" name="__module.embeddings.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="37" name="Constant_6596043" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="31909128" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="38" name="__module.embeddings.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="39" name="Constant_6596044" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="31913224" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="40" name="__module.embeddings.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="89,input.1">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="41" name="__module.embeddings.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="31917320" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="42" name="__module.embeddings.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="43" name="__module.embeddings.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="31921416" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="44" name="__module.embeddings.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="31921420" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="45" name="__module.embeddings.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="31921416" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="46" name="__module.embeddings.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="31921420" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="47" name="__module.embeddings.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="48" name="Constant_7690729" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="31921424" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="49" name="Convert_7690730" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="50" name="Constant_7690731" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="32970000" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="51" name="__module.encoder.layer.0.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="52" name="__module.encoder.layer.0.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="53" name="Constant_6596045" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="32974096" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="54" name="__module.encoder.layer.0.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="165,x.1">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="55" name="__module.encoder.layer.0.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="32978192" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="56" name="__module.encoder.layer.0.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="32978196" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="57" name="__module.encoder.layer.0.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="32978192" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="58" name="__module.encoder.layer.0.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="32978196" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="59" name="__module.encoder.layer.0.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="60" name="__module.encoder.layer.0.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="61" name="__module.encoder.layer.0.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="169,x.3">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="62" name="Constant_6580646" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="170">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="63" name="__module.encoder.layer.0.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="171">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="64" name="Constant_7690733" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="32978264" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="65" name="Convert_7690734" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="66" name="Constant_7690735" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="34026840" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="67" name="__module.encoder.layer.0.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="68" name="__module.encoder.layer.0.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="69" name="Constant_6596046" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="34030936" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="70" name="__module.encoder.layer.0.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="174,x.5">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="71" name="__module.encoder.layer.0.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="34035032" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="72" name="__module.encoder.layer.0.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="34035036" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="73" name="__module.encoder.layer.0.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="34035032" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="74" name="__module.encoder.layer.0.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="34035036" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="75" name="__module.encoder.layer.0.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="76" name="__module.encoder.layer.0.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="77" name="__module.encoder.layer.0.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="178,x.7">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="78" name="Constant_6580671" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="179">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="79" name="__module.encoder.layer.0.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="180">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="80" name="Constant_7690737" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="34035040" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="81" name="Convert_7690738" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="82" name="Constant_7690739" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="35083616" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="83" name="__module.encoder.layer.0.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="84" name="__module.encoder.layer.0.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="85" name="Constant_6596047" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="35087712" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="86" name="__module.encoder.layer.0.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="183,x.9">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="87" name="__module.encoder.layer.0.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="88" name="__module.encoder.layer.0.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="187,x.11">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="89" name="Constant_6580696" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="188">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="90" name="__module.encoder.layer.0.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="189">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="91" name="Constant_6596049" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="35091808" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="92" name="25" type="Const" version="opset1">
<data element_type="i64" shape="" offset="31909116" size="8" />
<output>
<port id="0" precision="I64" names="25" />
</output>
</layer>
<layer id="93" name="aten::unsqueeze/Unsqueeze" type="Unsqueeze" version="opset1">
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I64" />
</input>
<output>
<port id="2" precision="I64" names="26">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="94" name="27" type="Const" version="opset1">
<data element_type="i64" shape="" offset="35091812" size="8" />
<output>
<port id="0" precision="I64" names="27" />
</output>
</layer>
<layer id="95" name="aten::unsqueeze/Unsqueeze_1" type="Unsqueeze" version="opset1">
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I64" />
</input>
<output>
<port id="2" precision="I64" names="28,33">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="96" name="Constant_6596617" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="31909108" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="97" name="Constant_6596484" type="Const" version="opset1">
<data element_type="i64" shape="" offset="31909108" size="8" />
<output>
<port id="0" precision="I64" />
</output>
</layer>
<layer id="98" name="Gather_6596485" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="I64">
<dim>2</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64" />
</input>
<output>
<port id="3" precision="I64" names="13,15">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="99" name="Constant_6593455" type="Const" version="opset1">
<data element_type="i64" shape="1" offset="31909116" size="8" />
<output>
<port id="0" precision="I64">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="100" name="Constant_6596619" type="Const" version="opset1">
<data element_type="i64" shape="2" offset="35091820" size="16" />
<output>
<port id="0" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="101" name="Constant_6596620" type="Const" version="opset1">
<data element_type="i64" shape="" offset="31909108" size="8" />
<output>
<port id="0" precision="I64" />
</output>
</layer>
<layer id="102" name="Gather_6596621" type="Gather" version="opset8">
<data batch_dims="0" />
<input>
<port id="0" precision="I64">
<dim>2</dim>
</port>
<port id="1" precision="I64">
<dim>2</dim>
</port>
<port id="2" precision="I64" />
</input>
<output>
<port id="3" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="103" name="prim::ListConstruct/Concat" type="Concat" version="opset1">
<data axis="0" />
<input>
<port id="0" precision="I64">
<dim>1</dim>
</port>
<port id="1" precision="I64">
<dim>1</dim>
</port>
<port id="2" precision="I64">
<dim>2</dim>
</port>
</input>
<output>
<port id="3" precision="I64" names="35">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="104" name="aten::expand/Broadcast" type="Broadcast" version="opset3">
<data mode="bidirectional" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="I64" names="37">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="105" name="aten::to/Convert" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I64">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="42">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="106" name="Constant_6596048" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1, 1" offset="35091808" size="4" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="107" name="aten::rsub/Multiply" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="108" name="aten::rsub/Subtract" type="Subtract" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="45,inverted_mask">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="109" name="aten::to/Convert_1" type="Convert" version="opset1">
<data destination_type="boolean" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="1" precision="BOOL" names="50">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="110" name="aten::masked_fill/ConvertLike" type="Const" version="opset1">
<data element_type="f32" shape="" offset="35091836" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="111" name="aten::masked_fill/Select" type="Select" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="BOOL">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="3" precision="FP32" names="52">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</output>
</layer>
<layer id="112" name="__module.encoder.layer.0.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="190,attn_output.1">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="113" name="__module.encoder.layer.0.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="114" name="__module.encoder.layer.0.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="191,attn_output.3">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="115" name="Constant_6596493" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="116" name="__module.encoder.layer.0.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="193">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="117" name="__module.encoder.layer.0.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="35091880" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="118" name="__module.encoder.layer.0.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="119" name="__module.encoder.layer.0.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="35095976" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="120" name="__module.encoder.layer.0.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="35095980" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="121" name="__module.encoder.layer.0.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="35095976" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="122" name="__module.encoder.layer.0.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="35095980" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="123" name="__module.encoder.layer.0.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="124" name="Constant_7690741" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="35095984" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="125" name="Convert_7690742" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="126" name="Constant_7690743" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="36144560" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="127" name="__module.encoder.layer.0.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="128" name="__module.encoder.layer.0.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="129" name="Constant_6596050" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="36148656" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="130" name="__module.encoder.layer.0.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="199,input.3">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="131" name="__module.encoder.layer.0.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="201">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="132" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="133" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="134" name="Constant_6596051" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="36152752" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="135" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="136" name="Constant_6596052" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="36156848" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="137" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="205,input_tensor.1">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="138" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="36160944" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="139" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="140" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="36165040" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="141" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="36165044" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="142" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="36165040" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="143" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="36165044" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="144" name="__module.encoder.layer.0.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="145" name="Constant_7690745" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="36165048" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="146" name="Convert_7690746" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="147" name="Constant_7690747" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="40359352" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="148" name="__module.encoder.layer.0.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="149" name="__module.encoder.layer.0.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="150" name="Constant_6596053" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="40375736" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="151" name="__module.encoder.layer.0.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="210">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="152" name="__module.encoder.layer.0.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="211">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="153" name="__module.encoder.layer.0.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="40392120" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="154" name="__module.encoder.layer.0.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="155" name="__module.encoder.layer.0.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="40408504" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="156" name="__module.encoder.layer.0.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="40408508" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="157" name="__module.encoder.layer.0.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="40408504" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="158" name="__module.encoder.layer.0.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="40408508" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="159" name="__module.encoder.layer.0.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="160" name="Constant_7690749" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="40408512" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="161" name="Convert_7690750" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="162" name="Constant_7690751" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="44602816" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="163" name="__module.encoder.layer.0.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="164" name="__module.encoder.layer.0.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="165" name="Constant_6596054" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="44606912" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="166" name="__module.encoder.layer.0.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="217,input.5">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="167" name="__module.encoder.layer.0.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="219">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="168" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="169" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="170" name="Constant_6596055" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="44611008" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="171" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="172" name="Constant_6596056" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="44615104" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="173" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="223,hidden_states.7">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="174" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="44619200" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="175" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="176" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="44623296" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="177" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="44623300" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="178" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="44623296" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="179" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="44623300" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="180" name="__module.encoder.layer.0.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="181" name="Constant_7690753" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="44623304" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="182" name="Convert_7690754" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="183" name="Constant_7690755" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="45671880" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="184" name="__module.encoder.layer.1.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="185" name="__module.encoder.layer.1.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="186" name="Constant_6596057" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="45675976" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="187" name="__module.encoder.layer.1.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="236,x.13">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="188" name="__module.encoder.layer.1.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="45680072" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="189" name="__module.encoder.layer.1.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="45680076" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="190" name="__module.encoder.layer.1.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="45680072" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="191" name="__module.encoder.layer.1.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="45680076" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="192" name="__module.encoder.layer.1.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="193" name="__module.encoder.layer.1.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="194" name="__module.encoder.layer.1.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="240,x.15">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="195" name="Constant_6580878" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="241">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="196" name="__module.encoder.layer.1.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="242">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="197" name="Constant_7690757" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="45680080" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="198" name="Convert_7690758" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="199" name="Constant_7690759" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="46728656" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="200" name="__module.encoder.layer.1.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="201" name="__module.encoder.layer.1.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="202" name="Constant_6596058" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="46732752" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="203" name="__module.encoder.layer.1.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="245,x.17">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="204" name="__module.encoder.layer.1.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="46736848" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="205" name="__module.encoder.layer.1.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="46736852" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="206" name="__module.encoder.layer.1.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="46736848" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="207" name="__module.encoder.layer.1.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="46736852" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="208" name="__module.encoder.layer.1.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="209" name="__module.encoder.layer.1.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="210" name="__module.encoder.layer.1.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="249,x.19">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="211" name="Constant_6580901" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="250">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="212" name="__module.encoder.layer.1.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="251">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="213" name="Constant_7690761" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="46736856" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="214" name="Convert_7690762" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="215" name="Constant_7690763" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="47785432" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="216" name="__module.encoder.layer.1.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="217" name="__module.encoder.layer.1.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="218" name="Constant_6596059" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="47789528" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="219" name="__module.encoder.layer.1.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="254,x.21">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="220" name="__module.encoder.layer.1.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="221" name="__module.encoder.layer.1.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="258,x.23">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="222" name="Constant_6580924" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="259">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="223" name="__module.encoder.layer.1.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="260">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="224" name="__module.encoder.layer.1.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="261,attn_output.5">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="225" name="__module.encoder.layer.1.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="226" name="__module.encoder.layer.1.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="262,attn_output.7">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="227" name="Constant_6596494" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="228" name="__module.encoder.layer.1.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="264">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="229" name="__module.encoder.layer.1.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="47793624" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="230" name="__module.encoder.layer.1.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="231" name="__module.encoder.layer.1.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="47797720" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="232" name="__module.encoder.layer.1.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="47797724" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="233" name="__module.encoder.layer.1.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="47797720" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="234" name="__module.encoder.layer.1.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="47797724" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="235" name="__module.encoder.layer.1.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="236" name="Constant_7690765" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="47797728" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="237" name="Convert_7690766" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="238" name="Constant_7690767" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="48846304" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="239" name="__module.encoder.layer.1.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="240" name="__module.encoder.layer.1.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="241" name="Constant_6596060" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="48850400" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="242" name="__module.encoder.layer.1.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="270,input.7">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="243" name="__module.encoder.layer.1.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="272">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="244" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="245" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="246" name="Constant_6596061" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="48854496" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="247" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="248" name="Constant_6596062" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="48858592" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="249" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="276,input_tensor.3">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="250" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="48862688" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="251" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="252" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="48866784" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="253" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="48866788" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="254" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="48866784" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="255" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="48866788" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="256" name="__module.encoder.layer.1.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="257" name="Constant_7690769" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="48866792" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="258" name="Convert_7690770" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="259" name="Constant_7690771" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="53061096" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="260" name="__module.encoder.layer.1.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="261" name="__module.encoder.layer.1.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="262" name="Constant_6596063" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="53077480" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="263" name="__module.encoder.layer.1.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="281">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="264" name="__module.encoder.layer.1.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="282">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="265" name="__module.encoder.layer.1.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="53093864" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="266" name="__module.encoder.layer.1.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="267" name="__module.encoder.layer.1.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="53110248" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="268" name="__module.encoder.layer.1.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="53110252" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="269" name="__module.encoder.layer.1.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="53110248" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="270" name="__module.encoder.layer.1.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="53110252" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="271" name="__module.encoder.layer.1.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="272" name="Constant_7690773" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="53110256" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="273" name="Convert_7690774" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="274" name="Constant_7690775" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="57304560" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="275" name="__module.encoder.layer.1.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="276" name="__module.encoder.layer.1.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="277" name="Constant_6596064" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="57308656" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="278" name="__module.encoder.layer.1.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="288,input.9">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="279" name="__module.encoder.layer.1.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="290">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="280" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="281" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="282" name="Constant_6596065" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="57312752" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="283" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="284" name="Constant_6596066" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="57316848" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="285" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="294,hidden_states.13">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="286" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="57320944" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="287" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="288" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="57325040" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="289" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="57325044" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="290" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="57325040" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="291" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="57325044" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="292" name="__module.encoder.layer.1.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="293" name="Constant_7690777" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="57325048" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="294" name="Convert_7690778" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="295" name="Constant_7690779" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="58373624" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="296" name="__module.encoder.layer.2.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="297" name="__module.encoder.layer.2.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="298" name="Constant_6596067" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="58377720" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="299" name="__module.encoder.layer.2.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="307,x.25">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="300" name="__module.encoder.layer.2.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="58381816" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="301" name="__module.encoder.layer.2.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="58381820" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="302" name="__module.encoder.layer.2.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="58381816" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="303" name="__module.encoder.layer.2.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="58381820" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="304" name="__module.encoder.layer.2.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="305" name="__module.encoder.layer.2.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="306" name="__module.encoder.layer.2.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="311,x.27">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="307" name="Constant_6581104" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="312">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="308" name="__module.encoder.layer.2.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="313">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="309" name="Constant_7690781" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="58381824" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="310" name="Convert_7690782" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="311" name="Constant_7690783" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="59430400" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="312" name="__module.encoder.layer.2.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="313" name="__module.encoder.layer.2.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="314" name="Constant_6596068" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="59434496" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="315" name="__module.encoder.layer.2.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="316,x.29">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="316" name="__module.encoder.layer.2.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="59438592" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="317" name="__module.encoder.layer.2.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="59438596" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="318" name="__module.encoder.layer.2.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="59438592" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="319" name="__module.encoder.layer.2.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="59438596" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="320" name="__module.encoder.layer.2.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="321" name="__module.encoder.layer.2.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="322" name="__module.encoder.layer.2.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="320,x.31">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="323" name="Constant_6581127" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="321">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="324" name="__module.encoder.layer.2.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="322">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="325" name="Constant_7690785" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="59438600" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="326" name="Convert_7690786" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="327" name="Constant_7690787" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="60487176" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="328" name="__module.encoder.layer.2.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="329" name="__module.encoder.layer.2.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="330" name="Constant_6596069" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="60491272" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="331" name="__module.encoder.layer.2.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="325,x.33">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="332" name="__module.encoder.layer.2.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="333" name="__module.encoder.layer.2.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="329,x.35">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="334" name="Constant_6581150" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="330">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="335" name="__module.encoder.layer.2.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="331">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="336" name="__module.encoder.layer.2.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="332,attn_output.9">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="337" name="__module.encoder.layer.2.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="338" name="__module.encoder.layer.2.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="333,attn_output.11">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="339" name="Constant_6596495" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="340" name="__module.encoder.layer.2.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="335">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="341" name="__module.encoder.layer.2.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="60495368" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="342" name="__module.encoder.layer.2.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="343" name="__module.encoder.layer.2.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="60499464" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="344" name="__module.encoder.layer.2.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="60499468" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="345" name="__module.encoder.layer.2.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="60499464" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="346" name="__module.encoder.layer.2.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="60499468" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="347" name="__module.encoder.layer.2.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="348" name="Constant_7690789" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="60499472" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="349" name="Convert_7690790" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="350" name="Constant_7690791" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="61548048" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="351" name="__module.encoder.layer.2.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="352" name="__module.encoder.layer.2.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="353" name="Constant_6596070" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="61552144" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="354" name="__module.encoder.layer.2.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="341,input.11">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="355" name="__module.encoder.layer.2.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="343">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="356" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="357" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="358" name="Constant_6596071" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="61556240" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="359" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="360" name="Constant_6596072" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="61560336" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="361" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="347,input_tensor.5">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="362" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="61564432" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="363" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="364" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="61568528" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="365" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="61568532" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="366" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="61568528" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="367" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="61568532" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="368" name="__module.encoder.layer.2.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="369" name="Constant_7690793" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="61568536" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="370" name="Convert_7690794" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="371" name="Constant_7690795" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="65762840" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="372" name="__module.encoder.layer.2.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="373" name="__module.encoder.layer.2.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="374" name="Constant_6596073" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="65779224" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="375" name="__module.encoder.layer.2.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="352">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="376" name="__module.encoder.layer.2.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="353">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="377" name="__module.encoder.layer.2.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="65795608" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="378" name="__module.encoder.layer.2.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="379" name="__module.encoder.layer.2.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="65811992" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="380" name="__module.encoder.layer.2.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="65811996" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="381" name="__module.encoder.layer.2.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="65811992" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="382" name="__module.encoder.layer.2.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="65811996" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="383" name="__module.encoder.layer.2.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="384" name="Constant_7690797" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="65812000" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="385" name="Convert_7690798" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="386" name="Constant_7690799" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="70006304" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="387" name="__module.encoder.layer.2.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="388" name="__module.encoder.layer.2.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="389" name="Constant_6596074" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="70010400" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="390" name="__module.encoder.layer.2.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="359,input.13">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="391" name="__module.encoder.layer.2.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="361">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="392" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="393" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="394" name="Constant_6596075" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="70014496" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="395" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="396" name="Constant_6596076" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="70018592" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="397" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="365,hidden_states.19">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="398" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="70022688" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="399" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="400" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="70026784" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="401" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="70026788" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="402" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="70026784" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="403" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="70026788" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="404" name="__module.encoder.layer.2.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="405" name="Constant_7690801" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="70026792" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="406" name="Convert_7690802" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="407" name="Constant_7690803" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="71075368" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="408" name="__module.encoder.layer.3.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="409" name="__module.encoder.layer.3.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="410" name="Constant_6596077" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="71079464" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="411" name="__module.encoder.layer.3.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="378,x.37">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="412" name="__module.encoder.layer.3.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="71083560" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="413" name="__module.encoder.layer.3.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="71083564" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="414" name="__module.encoder.layer.3.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="71083560" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="415" name="__module.encoder.layer.3.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="71083564" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="416" name="__module.encoder.layer.3.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="417" name="__module.encoder.layer.3.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="418" name="__module.encoder.layer.3.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="382,x.39">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="419" name="Constant_6581330" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="383">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="420" name="__module.encoder.layer.3.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="384">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="421" name="Constant_7690805" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="71083568" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="422" name="Convert_7690806" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="423" name="Constant_7690807" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="72132144" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="424" name="__module.encoder.layer.3.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="425" name="__module.encoder.layer.3.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="426" name="Constant_6596078" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="72136240" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="427" name="__module.encoder.layer.3.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="387,x.41">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="428" name="__module.encoder.layer.3.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="72140336" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="429" name="__module.encoder.layer.3.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="72140340" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="430" name="__module.encoder.layer.3.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="72140336" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="431" name="__module.encoder.layer.3.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="72140340" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="432" name="__module.encoder.layer.3.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="433" name="__module.encoder.layer.3.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="434" name="__module.encoder.layer.3.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="391,x.43">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="435" name="Constant_6581353" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="392">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="436" name="__module.encoder.layer.3.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="393">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="437" name="Constant_7690809" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="72140344" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="438" name="Convert_7690810" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="439" name="Constant_7690811" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="73188920" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="440" name="__module.encoder.layer.3.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="441" name="__module.encoder.layer.3.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="442" name="Constant_6596079" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="73193016" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="443" name="__module.encoder.layer.3.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="396,x.45">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="444" name="__module.encoder.layer.3.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="445" name="__module.encoder.layer.3.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="400,x.47">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="446" name="Constant_6581376" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="401">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="447" name="__module.encoder.layer.3.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="402">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="448" name="__module.encoder.layer.3.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="403,attn_output.13">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="449" name="__module.encoder.layer.3.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="450" name="__module.encoder.layer.3.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="404,attn_output.15">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="451" name="Constant_6596496" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="452" name="__module.encoder.layer.3.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="406">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="453" name="__module.encoder.layer.3.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="73197112" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="454" name="__module.encoder.layer.3.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="455" name="__module.encoder.layer.3.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="73201208" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="456" name="__module.encoder.layer.3.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="73201212" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="457" name="__module.encoder.layer.3.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="73201208" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="458" name="__module.encoder.layer.3.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="73201212" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="459" name="__module.encoder.layer.3.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="460" name="Constant_7690813" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="73201216" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="461" name="Convert_7690814" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="462" name="Constant_7690815" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="74249792" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="463" name="__module.encoder.layer.3.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="464" name="__module.encoder.layer.3.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="465" name="Constant_6596080" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="74253888" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="466" name="__module.encoder.layer.3.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="412,input.15">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="467" name="__module.encoder.layer.3.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="414">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="468" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="469" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="470" name="Constant_6596081" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="74257984" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="471" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="472" name="Constant_6596082" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="74262080" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="473" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="418,input_tensor.7">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="474" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="74266176" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="475" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="476" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="74270272" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="477" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="74270276" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="478" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="74270272" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="479" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="74270276" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="480" name="__module.encoder.layer.3.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="481" name="Constant_7690817" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="74270280" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="482" name="Convert_7690818" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="483" name="Constant_7690819" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="78464584" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="484" name="__module.encoder.layer.3.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="485" name="__module.encoder.layer.3.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="486" name="Constant_6596083" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="78480968" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="487" name="__module.encoder.layer.3.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="423">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="488" name="__module.encoder.layer.3.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="424">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="489" name="__module.encoder.layer.3.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="78497352" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="490" name="__module.encoder.layer.3.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="491" name="__module.encoder.layer.3.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="78513736" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="492" name="__module.encoder.layer.3.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="78513740" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="493" name="__module.encoder.layer.3.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="78513736" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="494" name="__module.encoder.layer.3.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="78513740" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="495" name="__module.encoder.layer.3.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="496" name="Constant_7690821" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="78513744" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="497" name="Convert_7690822" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="498" name="Constant_7690823" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="82708048" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="499" name="__module.encoder.layer.3.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="500" name="__module.encoder.layer.3.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="501" name="Constant_6596084" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="82712144" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="502" name="__module.encoder.layer.3.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="430,input.17">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="503" name="__module.encoder.layer.3.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="432">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="504" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="505" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="506" name="Constant_6596085" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="82716240" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="507" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="508" name="Constant_6596086" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="82720336" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="509" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="436,hidden_states.25">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="510" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="82724432" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="511" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="512" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="82728528" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="513" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="82728532" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="514" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="82728528" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="515" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="82728532" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="516" name="__module.encoder.layer.3.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="517" name="Constant_7690825" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="82728536" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="518" name="Convert_7690826" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="519" name="Constant_7690827" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="83777112" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="520" name="__module.encoder.layer.4.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="521" name="__module.encoder.layer.4.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="522" name="Constant_6596087" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="83781208" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="523" name="__module.encoder.layer.4.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="449,x.49">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="524" name="__module.encoder.layer.4.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="83785304" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="525" name="__module.encoder.layer.4.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="83785308" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="526" name="__module.encoder.layer.4.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="83785304" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="527" name="__module.encoder.layer.4.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="83785308" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="528" name="__module.encoder.layer.4.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="529" name="__module.encoder.layer.4.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="530" name="__module.encoder.layer.4.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="453,x.51">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="531" name="Constant_6581556" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="454">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="532" name="__module.encoder.layer.4.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="455">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="533" name="Constant_7690829" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="83785312" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="534" name="Convert_7690830" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="535" name="Constant_7690831" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="84833888" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="536" name="__module.encoder.layer.4.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="537" name="__module.encoder.layer.4.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="538" name="Constant_6596088" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="84837984" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="539" name="__module.encoder.layer.4.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="458,x.53">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="540" name="__module.encoder.layer.4.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="84842080" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="541" name="__module.encoder.layer.4.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="84842084" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="542" name="__module.encoder.layer.4.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="84842080" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="543" name="__module.encoder.layer.4.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="84842084" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="544" name="__module.encoder.layer.4.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="545" name="__module.encoder.layer.4.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="546" name="__module.encoder.layer.4.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="462,x.55">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="547" name="Constant_6581579" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="463">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="548" name="__module.encoder.layer.4.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="464">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="549" name="Constant_7690833" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="84842088" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="550" name="Convert_7690834" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="551" name="Constant_7690835" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="85890664" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="552" name="__module.encoder.layer.4.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="553" name="__module.encoder.layer.4.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="554" name="Constant_6596089" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="85894760" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="555" name="__module.encoder.layer.4.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="467,x.57">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="556" name="__module.encoder.layer.4.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="557" name="__module.encoder.layer.4.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="471,x.59">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="558" name="Constant_6581602" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="472">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="559" name="__module.encoder.layer.4.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="473">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="560" name="__module.encoder.layer.4.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="474,attn_output.17">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="561" name="__module.encoder.layer.4.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="562" name="__module.encoder.layer.4.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="475,attn_output.19">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="563" name="Constant_6596497" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="564" name="__module.encoder.layer.4.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="477">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="565" name="__module.encoder.layer.4.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="85898856" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="566" name="__module.encoder.layer.4.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="567" name="__module.encoder.layer.4.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="85902952" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="568" name="__module.encoder.layer.4.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="85902956" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="569" name="__module.encoder.layer.4.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="85902952" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="570" name="__module.encoder.layer.4.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="85902956" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="571" name="__module.encoder.layer.4.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="572" name="Constant_7690837" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="85902960" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="573" name="Convert_7690838" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="574" name="Constant_7690839" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="86951536" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="575" name="__module.encoder.layer.4.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="576" name="__module.encoder.layer.4.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="577" name="Constant_6596090" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="86955632" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="578" name="__module.encoder.layer.4.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="483,input.19">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="579" name="__module.encoder.layer.4.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="485">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="580" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="581" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="582" name="Constant_6596091" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="86959728" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="583" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="584" name="Constant_6596092" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="86963824" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="585" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="489,input_tensor.9">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="586" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="86967920" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="587" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="588" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="86972016" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="589" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="86972020" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="590" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="86972016" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="591" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="86972020" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="592" name="__module.encoder.layer.4.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="593" name="Constant_7690841" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="86972024" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="594" name="Convert_7690842" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="595" name="Constant_7690843" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="91166328" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="596" name="__module.encoder.layer.4.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="597" name="__module.encoder.layer.4.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="598" name="Constant_6596093" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="91182712" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="599" name="__module.encoder.layer.4.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="494">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="600" name="__module.encoder.layer.4.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="495">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="601" name="__module.encoder.layer.4.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="91199096" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="602" name="__module.encoder.layer.4.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="603" name="__module.encoder.layer.4.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="91215480" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="604" name="__module.encoder.layer.4.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="91215484" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="605" name="__module.encoder.layer.4.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="91215480" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="606" name="__module.encoder.layer.4.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="91215484" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="607" name="__module.encoder.layer.4.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="608" name="Constant_7690845" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="91215488" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="609" name="Convert_7690846" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="610" name="Constant_7690847" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="95409792" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="611" name="__module.encoder.layer.4.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="612" name="__module.encoder.layer.4.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="613" name="Constant_6596094" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="95413888" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="614" name="__module.encoder.layer.4.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="501,input.21">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="615" name="__module.encoder.layer.4.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="503">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="616" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="617" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="618" name="Constant_6596095" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="95417984" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="619" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="620" name="Constant_6596096" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="95422080" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="621" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="507,hidden_states.31">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="622" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="95426176" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="623" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="624" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="95430272" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="625" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="95430276" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="626" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="95430272" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="627" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="95430276" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="628" name="__module.encoder.layer.4.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="629" name="Constant_7690849" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="95430280" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="630" name="Convert_7690850" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="631" name="Constant_7690851" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="96478856" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="632" name="__module.encoder.layer.5.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="633" name="__module.encoder.layer.5.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="634" name="Constant_6596097" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="96482952" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="635" name="__module.encoder.layer.5.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="520,x.61">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="636" name="__module.encoder.layer.5.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="96487048" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="637" name="__module.encoder.layer.5.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="96487052" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="638" name="__module.encoder.layer.5.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="96487048" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="639" name="__module.encoder.layer.5.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="96487052" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="640" name="__module.encoder.layer.5.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="641" name="__module.encoder.layer.5.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="642" name="__module.encoder.layer.5.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="524,x.63">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="643" name="Constant_6581782" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="525">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="644" name="__module.encoder.layer.5.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="526">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="645" name="Constant_7690853" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="96487056" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="646" name="Convert_7690854" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="647" name="Constant_7690855" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="97535632" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="648" name="__module.encoder.layer.5.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="649" name="__module.encoder.layer.5.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="650" name="Constant_6596098" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="97539728" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="651" name="__module.encoder.layer.5.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="529,x.65">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="652" name="__module.encoder.layer.5.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="97543824" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="653" name="__module.encoder.layer.5.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="97543828" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="654" name="__module.encoder.layer.5.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="97543824" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="655" name="__module.encoder.layer.5.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="97543828" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="656" name="__module.encoder.layer.5.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="657" name="__module.encoder.layer.5.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="658" name="__module.encoder.layer.5.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="533,x.67">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="659" name="Constant_6581805" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="534">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="660" name="__module.encoder.layer.5.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="535">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="661" name="Constant_7690857" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="97543832" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="662" name="Convert_7690858" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="663" name="Constant_7690859" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="98592408" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="664" name="__module.encoder.layer.5.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="665" name="__module.encoder.layer.5.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="666" name="Constant_6596099" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="98596504" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="667" name="__module.encoder.layer.5.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="538,x.69">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="668" name="__module.encoder.layer.5.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="669" name="__module.encoder.layer.5.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="542,x.71">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="670" name="Constant_6581828" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="543">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="671" name="__module.encoder.layer.5.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="544">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="672" name="__module.encoder.layer.5.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="545,attn_output.21">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="673" name="__module.encoder.layer.5.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="674" name="__module.encoder.layer.5.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="546,attn_output.23">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="675" name="Constant_6596498" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="676" name="__module.encoder.layer.5.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="548">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="677" name="__module.encoder.layer.5.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="98600600" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="678" name="__module.encoder.layer.5.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="679" name="__module.encoder.layer.5.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="98604696" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="680" name="__module.encoder.layer.5.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="98604700" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="681" name="__module.encoder.layer.5.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="98604696" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="682" name="__module.encoder.layer.5.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="98604700" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="683" name="__module.encoder.layer.5.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="684" name="Constant_7690861" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="98604704" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="685" name="Convert_7690862" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="686" name="Constant_7690863" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="99653280" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="687" name="__module.encoder.layer.5.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="688" name="__module.encoder.layer.5.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="689" name="Constant_6596100" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="99657376" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="690" name="__module.encoder.layer.5.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="554,input.23">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="691" name="__module.encoder.layer.5.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="556">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="692" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="693" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="694" name="Constant_6596101" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="99661472" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="695" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="696" name="Constant_6596102" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="99665568" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="697" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="560,input_tensor.11">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="698" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="99669664" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="699" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="700" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="99673760" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="701" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="99673764" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="702" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="99673760" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="703" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="99673764" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="704" name="__module.encoder.layer.5.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="705" name="Constant_7690865" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="99673768" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="706" name="Convert_7690866" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="707" name="Constant_7690867" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="103868072" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="708" name="__module.encoder.layer.5.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="709" name="__module.encoder.layer.5.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="710" name="Constant_6596103" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="103884456" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="711" name="__module.encoder.layer.5.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="565">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="712" name="__module.encoder.layer.5.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="566">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="713" name="__module.encoder.layer.5.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="103900840" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="714" name="__module.encoder.layer.5.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="715" name="__module.encoder.layer.5.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="103917224" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="716" name="__module.encoder.layer.5.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="103917228" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="717" name="__module.encoder.layer.5.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="103917224" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="718" name="__module.encoder.layer.5.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="103917228" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="719" name="__module.encoder.layer.5.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="720" name="Constant_7690869" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="103917232" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="721" name="Convert_7690870" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="722" name="Constant_7690871" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="108111536" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="723" name="__module.encoder.layer.5.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="724" name="__module.encoder.layer.5.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="725" name="Constant_6596104" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="108115632" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="726" name="__module.encoder.layer.5.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="572,input.25">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="727" name="__module.encoder.layer.5.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="574">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="728" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="729" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="730" name="Constant_6596105" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="108119728" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="731" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="732" name="Constant_6596106" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="108123824" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="733" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="578,hidden_states.37">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="734" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="108127920" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="735" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="736" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="108132016" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="737" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="108132020" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="738" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="108132016" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="739" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="108132020" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="740" name="__module.encoder.layer.5.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="741" name="Constant_7690873" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="108132024" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="742" name="Convert_7690874" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="743" name="Constant_7690875" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="109180600" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="744" name="__module.encoder.layer.6.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="745" name="__module.encoder.layer.6.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="746" name="Constant_6596107" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="109184696" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="747" name="__module.encoder.layer.6.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="591,x.73">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="748" name="__module.encoder.layer.6.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="109188792" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="749" name="__module.encoder.layer.6.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="109188796" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="750" name="__module.encoder.layer.6.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="109188792" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="751" name="__module.encoder.layer.6.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="109188796" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="752" name="__module.encoder.layer.6.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="753" name="__module.encoder.layer.6.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="754" name="__module.encoder.layer.6.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="595,x.75">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="755" name="Constant_6582008" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="596">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="756" name="__module.encoder.layer.6.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="597">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="757" name="Constant_7690877" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="109188800" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="758" name="Convert_7690878" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="759" name="Constant_7690879" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="110237376" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="760" name="__module.encoder.layer.6.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="761" name="__module.encoder.layer.6.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="762" name="Constant_6596108" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="110241472" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="763" name="__module.encoder.layer.6.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="600,x.77">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="764" name="__module.encoder.layer.6.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="110245568" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="765" name="__module.encoder.layer.6.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="110245572" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="766" name="__module.encoder.layer.6.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="110245568" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="767" name="__module.encoder.layer.6.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="110245572" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="768" name="__module.encoder.layer.6.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="769" name="__module.encoder.layer.6.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="770" name="__module.encoder.layer.6.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="604,x.79">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="771" name="Constant_6582031" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="605">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="772" name="__module.encoder.layer.6.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="606">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="773" name="Constant_7690881" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="110245576" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="774" name="Convert_7690882" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="775" name="Constant_7690883" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="111294152" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="776" name="__module.encoder.layer.6.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="777" name="__module.encoder.layer.6.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="778" name="Constant_6596109" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="111298248" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="779" name="__module.encoder.layer.6.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="609,x.81">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="780" name="__module.encoder.layer.6.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="781" name="__module.encoder.layer.6.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="613,x.83">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="782" name="Constant_6582054" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="614">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="783" name="__module.encoder.layer.6.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="615">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="784" name="__module.encoder.layer.6.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="616,attn_output.25">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="785" name="__module.encoder.layer.6.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="786" name="__module.encoder.layer.6.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="617,attn_output.27">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="787" name="Constant_6596499" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="788" name="__module.encoder.layer.6.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="619">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="789" name="__module.encoder.layer.6.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="111302344" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="790" name="__module.encoder.layer.6.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="791" name="__module.encoder.layer.6.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="111306440" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="792" name="__module.encoder.layer.6.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="111306444" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="793" name="__module.encoder.layer.6.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="111306440" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="794" name="__module.encoder.layer.6.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="111306444" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="795" name="__module.encoder.layer.6.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="796" name="Constant_7690885" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="111306448" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="797" name="Convert_7690886" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="798" name="Constant_7690887" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="112355024" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="799" name="__module.encoder.layer.6.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="800" name="__module.encoder.layer.6.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="801" name="Constant_6596110" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="112359120" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="802" name="__module.encoder.layer.6.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="625,input.27">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="803" name="__module.encoder.layer.6.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="627">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="804" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="805" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="806" name="Constant_6596111" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="112363216" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="807" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="808" name="Constant_6596112" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="112367312" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="809" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="631,input_tensor.13">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="810" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="112371408" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="811" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="812" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="112375504" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="813" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="112375508" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="814" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="112375504" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="815" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="112375508" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="816" name="__module.encoder.layer.6.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="817" name="Constant_7690889" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="112375512" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="818" name="Convert_7690890" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="819" name="Constant_7690891" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="116569816" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="820" name="__module.encoder.layer.6.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="821" name="__module.encoder.layer.6.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="822" name="Constant_6596113" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="116586200" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="823" name="__module.encoder.layer.6.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="636">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="824" name="__module.encoder.layer.6.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="637">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="825" name="__module.encoder.layer.6.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="116602584" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="826" name="__module.encoder.layer.6.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="827" name="__module.encoder.layer.6.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="116618968" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="828" name="__module.encoder.layer.6.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="116618972" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="829" name="__module.encoder.layer.6.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="116618968" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="830" name="__module.encoder.layer.6.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="116618972" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="831" name="__module.encoder.layer.6.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="832" name="Constant_7690893" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="116618976" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="833" name="Convert_7690894" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="834" name="Constant_7690895" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="120813280" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="835" name="__module.encoder.layer.6.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="836" name="__module.encoder.layer.6.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="837" name="Constant_6596114" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="120817376" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="838" name="__module.encoder.layer.6.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="643,input.29">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="839" name="__module.encoder.layer.6.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="645">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="840" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="841" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="842" name="Constant_6596115" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="120821472" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="843" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="844" name="Constant_6596116" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="120825568" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="845" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="649,hidden_states.43">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="846" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="120829664" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="847" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="848" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="120833760" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="849" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="120833764" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="850" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="120833760" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="851" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="120833764" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="852" name="__module.encoder.layer.6.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="853" name="Constant_7690897" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="120833768" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="854" name="Convert_7690898" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="855" name="Constant_7690899" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="121882344" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="856" name="__module.encoder.layer.7.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="857" name="__module.encoder.layer.7.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="858" name="Constant_6596117" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="121886440" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="859" name="__module.encoder.layer.7.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="662,x.85">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="860" name="__module.encoder.layer.7.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="121890536" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="861" name="__module.encoder.layer.7.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="121890540" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="862" name="__module.encoder.layer.7.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="121890536" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="863" name="__module.encoder.layer.7.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="121890540" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="864" name="__module.encoder.layer.7.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="865" name="__module.encoder.layer.7.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="866" name="__module.encoder.layer.7.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="666,x.87">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="867" name="Constant_6582234" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="667">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="868" name="__module.encoder.layer.7.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="668">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="869" name="Constant_7690901" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="121890544" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="870" name="Convert_7690902" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="871" name="Constant_7690903" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="122939120" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="872" name="__module.encoder.layer.7.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="873" name="__module.encoder.layer.7.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="874" name="Constant_6596118" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="122943216" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="875" name="__module.encoder.layer.7.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="671,x.89">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="876" name="__module.encoder.layer.7.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="122947312" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="877" name="__module.encoder.layer.7.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="122947316" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="878" name="__module.encoder.layer.7.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="122947312" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="879" name="__module.encoder.layer.7.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="122947316" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="880" name="__module.encoder.layer.7.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="881" name="__module.encoder.layer.7.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="882" name="__module.encoder.layer.7.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="675,x.91">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="883" name="Constant_6582257" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="676">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="884" name="__module.encoder.layer.7.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="677">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="885" name="Constant_7690905" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="122947320" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="886" name="Convert_7690906" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="887" name="Constant_7690907" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="123995896" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="888" name="__module.encoder.layer.7.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="889" name="__module.encoder.layer.7.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="890" name="Constant_6596119" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="123999992" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="891" name="__module.encoder.layer.7.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="680,x.93">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="892" name="__module.encoder.layer.7.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="893" name="__module.encoder.layer.7.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="684,x.95">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="894" name="Constant_6582280" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="685">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="895" name="__module.encoder.layer.7.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="686">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="896" name="__module.encoder.layer.7.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="687,attn_output.29">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="897" name="__module.encoder.layer.7.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="898" name="__module.encoder.layer.7.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="688,attn_output.31">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="899" name="Constant_6596500" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="900" name="__module.encoder.layer.7.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="690">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="901" name="__module.encoder.layer.7.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="124004088" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="902" name="__module.encoder.layer.7.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="903" name="__module.encoder.layer.7.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="124008184" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="904" name="__module.encoder.layer.7.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="124008188" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="905" name="__module.encoder.layer.7.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="124008184" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="906" name="__module.encoder.layer.7.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="124008188" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="907" name="__module.encoder.layer.7.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="908" name="Constant_7690909" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="124008192" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="909" name="Convert_7690910" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="910" name="Constant_7690911" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="125056768" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="911" name="__module.encoder.layer.7.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="912" name="__module.encoder.layer.7.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="913" name="Constant_6596120" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="125060864" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="914" name="__module.encoder.layer.7.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="696,input.31">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="915" name="__module.encoder.layer.7.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="698">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="916" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="917" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="918" name="Constant_6596121" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="125064960" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="919" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="920" name="Constant_6596122" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="125069056" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="921" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="702,input_tensor.15">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="922" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="125073152" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="923" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="924" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="125077248" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="925" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="125077252" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="926" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="125077248" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="927" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="125077252" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="928" name="__module.encoder.layer.7.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="929" name="Constant_7690913" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="125077256" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="930" name="Convert_7690914" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="931" name="Constant_7690915" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="129271560" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="932" name="__module.encoder.layer.7.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="933" name="__module.encoder.layer.7.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="934" name="Constant_6596123" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="129287944" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="935" name="__module.encoder.layer.7.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="707">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="936" name="__module.encoder.layer.7.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="708">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="937" name="__module.encoder.layer.7.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="129304328" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="938" name="__module.encoder.layer.7.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="939" name="__module.encoder.layer.7.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="129320712" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="940" name="__module.encoder.layer.7.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="129320716" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="941" name="__module.encoder.layer.7.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="129320712" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="942" name="__module.encoder.layer.7.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="129320716" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="943" name="__module.encoder.layer.7.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="944" name="Constant_7690917" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="129320720" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="945" name="Convert_7690918" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="946" name="Constant_7690919" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="133515024" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="947" name="__module.encoder.layer.7.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="948" name="__module.encoder.layer.7.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="949" name="Constant_6596124" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="133519120" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="950" name="__module.encoder.layer.7.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="714,input.33">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="951" name="__module.encoder.layer.7.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="716">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="952" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="953" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="954" name="Constant_6596125" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="133523216" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="955" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="956" name="Constant_6596126" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="133527312" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="957" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="720,hidden_states.49">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="958" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="133531408" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="959" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="960" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="133535504" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="961" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="133535508" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="962" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="133535504" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="963" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="133535508" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="964" name="__module.encoder.layer.7.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="965" name="Constant_7690921" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="133535512" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="966" name="Convert_7690922" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="967" name="Constant_7690923" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="134584088" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="968" name="__module.encoder.layer.8.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="969" name="__module.encoder.layer.8.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="970" name="Constant_6596127" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="134588184" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="971" name="__module.encoder.layer.8.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="733,x.97">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="972" name="__module.encoder.layer.8.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="134592280" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="973" name="__module.encoder.layer.8.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="134592284" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="974" name="__module.encoder.layer.8.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="134592280" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="975" name="__module.encoder.layer.8.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="134592284" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="976" name="__module.encoder.layer.8.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="977" name="__module.encoder.layer.8.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="978" name="__module.encoder.layer.8.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="737,x.99">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="979" name="Constant_6582460" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="738">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="980" name="__module.encoder.layer.8.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="739">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="981" name="Constant_7690925" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="134592288" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="982" name="Convert_7690926" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="983" name="Constant_7690927" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="135640864" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="984" name="__module.encoder.layer.8.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="985" name="__module.encoder.layer.8.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="986" name="Constant_6596128" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="135644960" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="987" name="__module.encoder.layer.8.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="742,x.101">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="988" name="__module.encoder.layer.8.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="135649056" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="989" name="__module.encoder.layer.8.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="135649060" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="990" name="__module.encoder.layer.8.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="135649056" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="991" name="__module.encoder.layer.8.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="135649060" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="992" name="__module.encoder.layer.8.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="993" name="__module.encoder.layer.8.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="994" name="__module.encoder.layer.8.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="746,x.103">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="995" name="Constant_6582483" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="747">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="996" name="__module.encoder.layer.8.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="748">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="997" name="Constant_7690929" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="135649064" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="998" name="Convert_7690930" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="999" name="Constant_7690931" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="136697640" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1000" name="__module.encoder.layer.8.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1001" name="__module.encoder.layer.8.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1002" name="Constant_6596129" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="136701736" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1003" name="__module.encoder.layer.8.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="751,x.105">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1004" name="__module.encoder.layer.8.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1005" name="__module.encoder.layer.8.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="755,x.107">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1006" name="Constant_6582506" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="756">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1007" name="__module.encoder.layer.8.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="757">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1008" name="__module.encoder.layer.8.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="758,attn_output.33">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1009" name="__module.encoder.layer.8.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1010" name="__module.encoder.layer.8.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="759,attn_output.35">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1011" name="Constant_6596501" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="1012" name="__module.encoder.layer.8.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="761">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1013" name="__module.encoder.layer.8.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="136705832" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1014" name="__module.encoder.layer.8.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1015" name="__module.encoder.layer.8.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="136709928" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1016" name="__module.encoder.layer.8.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="136709932" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1017" name="__module.encoder.layer.8.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="136709928" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1018" name="__module.encoder.layer.8.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="136709932" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1019" name="__module.encoder.layer.8.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1020" name="Constant_7690933" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="136709936" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1021" name="Convert_7690934" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1022" name="Constant_7690935" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="137758512" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1023" name="__module.encoder.layer.8.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1024" name="__module.encoder.layer.8.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1025" name="Constant_6596130" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="137762608" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1026" name="__module.encoder.layer.8.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="767,input.35">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1027" name="__module.encoder.layer.8.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="769">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1028" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1029" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1030" name="Constant_6596131" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="137766704" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1031" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1032" name="Constant_6596132" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="137770800" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1033" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="773,input_tensor.17">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1034" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="137774896" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1035" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1036" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="137778992" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1037" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="137778996" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1038" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="137778992" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1039" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="137778996" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1040" name="__module.encoder.layer.8.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1041" name="Constant_7690937" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="137779000" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1042" name="Convert_7690938" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1043" name="Constant_7690939" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="141973304" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1044" name="__module.encoder.layer.8.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1045" name="__module.encoder.layer.8.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1046" name="Constant_6596133" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="141989688" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1047" name="__module.encoder.layer.8.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="778">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1048" name="__module.encoder.layer.8.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="779">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1049" name="__module.encoder.layer.8.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="142006072" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1050" name="__module.encoder.layer.8.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1051" name="__module.encoder.layer.8.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="142022456" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1052" name="__module.encoder.layer.8.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="142022460" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1053" name="__module.encoder.layer.8.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="142022456" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1054" name="__module.encoder.layer.8.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="142022460" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1055" name="__module.encoder.layer.8.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1056" name="Constant_7690941" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="142022464" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1057" name="Convert_7690942" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1058" name="Constant_7690943" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="146216768" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1059" name="__module.encoder.layer.8.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1060" name="__module.encoder.layer.8.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1061" name="Constant_6596134" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="146220864" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1062" name="__module.encoder.layer.8.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="785,input.37">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1063" name="__module.encoder.layer.8.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="787">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1064" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1065" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1066" name="Constant_6596135" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="146224960" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1067" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1068" name="Constant_6596136" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="146229056" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1069" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="791,hidden_states.55">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1070" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="146233152" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1071" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1072" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="146237248" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1073" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="146237252" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1074" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="146237248" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1075" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="146237252" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1076" name="__module.encoder.layer.8.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1077" name="Constant_7690945" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="146237256" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1078" name="Convert_7690946" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1079" name="Constant_7690947" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="147285832" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1080" name="__module.encoder.layer.9.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1081" name="__module.encoder.layer.9.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1082" name="Constant_6596137" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="147289928" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1083" name="__module.encoder.layer.9.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="804,x.109">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1084" name="__module.encoder.layer.9.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="147294024" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1085" name="__module.encoder.layer.9.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="147294028" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1086" name="__module.encoder.layer.9.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="147294024" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1087" name="__module.encoder.layer.9.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="147294028" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1088" name="__module.encoder.layer.9.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1089" name="__module.encoder.layer.9.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1090" name="__module.encoder.layer.9.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="808,x.111">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1091" name="Constant_6582686" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="809">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1092" name="__module.encoder.layer.9.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="810">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1093" name="Constant_7690949" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="147294032" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1094" name="Convert_7690950" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1095" name="Constant_7690951" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="148342608" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1096" name="__module.encoder.layer.9.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1097" name="__module.encoder.layer.9.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1098" name="Constant_6596138" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="148346704" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1099" name="__module.encoder.layer.9.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="813,x.113">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1100" name="__module.encoder.layer.9.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="148350800" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1101" name="__module.encoder.layer.9.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="148350804" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1102" name="__module.encoder.layer.9.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="148350800" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1103" name="__module.encoder.layer.9.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="148350804" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1104" name="__module.encoder.layer.9.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1105" name="__module.encoder.layer.9.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1106" name="__module.encoder.layer.9.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="817,x.115">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1107" name="Constant_6582709" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="818">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1108" name="__module.encoder.layer.9.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="819">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1109" name="Constant_7690953" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="148350808" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1110" name="Convert_7690954" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1111" name="Constant_7690955" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="149399384" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1112" name="__module.encoder.layer.9.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1113" name="__module.encoder.layer.9.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1114" name="Constant_6596139" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="149403480" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1115" name="__module.encoder.layer.9.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="822,x.117">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1116" name="__module.encoder.layer.9.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1117" name="__module.encoder.layer.9.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="826,x.119">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1118" name="Constant_6582732" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="827">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1119" name="__module.encoder.layer.9.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="828">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1120" name="__module.encoder.layer.9.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="829,attn_output.37">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1121" name="__module.encoder.layer.9.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1122" name="__module.encoder.layer.9.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="830,attn_output.39">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1123" name="Constant_6596502" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="1124" name="__module.encoder.layer.9.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="832">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1125" name="__module.encoder.layer.9.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="149407576" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1126" name="__module.encoder.layer.9.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1127" name="__module.encoder.layer.9.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="149411672" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1128" name="__module.encoder.layer.9.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="149411676" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1129" name="__module.encoder.layer.9.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="149411672" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1130" name="__module.encoder.layer.9.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="149411676" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1131" name="__module.encoder.layer.9.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1132" name="Constant_7690957" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="149411680" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1133" name="Convert_7690958" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1134" name="Constant_7690959" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="150460256" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1135" name="__module.encoder.layer.9.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1136" name="__module.encoder.layer.9.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1137" name="Constant_6596140" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="150464352" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1138" name="__module.encoder.layer.9.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="838,input.39">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1139" name="__module.encoder.layer.9.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="840">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1140" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1141" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1142" name="Constant_6596141" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="150468448" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1143" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1144" name="Constant_6596142" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="150472544" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1145" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="844,input_tensor.19">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1146" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="150476640" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1147" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1148" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="150480736" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1149" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="150480740" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1150" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="150480736" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1151" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="150480740" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1152" name="__module.encoder.layer.9.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1153" name="Constant_7690961" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="150480744" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1154" name="Convert_7690962" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1155" name="Constant_7690963" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="154675048" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1156" name="__module.encoder.layer.9.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1157" name="__module.encoder.layer.9.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1158" name="Constant_6596143" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="154691432" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1159" name="__module.encoder.layer.9.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="849">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1160" name="__module.encoder.layer.9.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="850">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1161" name="__module.encoder.layer.9.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="154707816" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1162" name="__module.encoder.layer.9.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1163" name="__module.encoder.layer.9.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="154724200" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1164" name="__module.encoder.layer.9.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="154724204" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1165" name="__module.encoder.layer.9.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="154724200" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1166" name="__module.encoder.layer.9.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="154724204" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1167" name="__module.encoder.layer.9.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1168" name="Constant_7690965" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="154724208" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1169" name="Convert_7690966" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1170" name="Constant_7690967" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="158918512" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1171" name="__module.encoder.layer.9.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1172" name="__module.encoder.layer.9.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1173" name="Constant_6596144" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="158922608" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1174" name="__module.encoder.layer.9.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="856,input.41">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1175" name="__module.encoder.layer.9.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="858">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1176" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1177" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1178" name="Constant_6596145" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="158926704" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1179" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1180" name="Constant_6596146" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="158930800" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1181" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="862,hidden_states.61">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1182" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="158934896" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1183" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1184" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="158938992" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1185" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="158938996" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1186" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="158938992" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1187" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="158938996" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1188" name="__module.encoder.layer.9.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1189" name="Constant_7690969" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="158939000" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1190" name="Convert_7690970" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1191" name="Constant_7690971" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="159987576" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1192" name="__module.encoder.layer.10.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1193" name="__module.encoder.layer.10.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1194" name="Constant_6596147" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="159991672" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1195" name="__module.encoder.layer.10.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="875,x.121">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1196" name="__module.encoder.layer.10.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="159995768" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1197" name="__module.encoder.layer.10.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="159995772" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1198" name="__module.encoder.layer.10.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="159995768" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1199" name="__module.encoder.layer.10.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="159995772" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1200" name="__module.encoder.layer.10.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1201" name="__module.encoder.layer.10.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1202" name="__module.encoder.layer.10.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="879,x.123">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1203" name="Constant_6582912" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="880">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1204" name="__module.encoder.layer.10.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="881">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1205" name="Constant_7690973" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="159995776" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1206" name="Convert_7690974" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1207" name="Constant_7690975" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="161044352" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1208" name="__module.encoder.layer.10.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1209" name="__module.encoder.layer.10.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1210" name="Constant_6596148" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="161048448" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1211" name="__module.encoder.layer.10.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="884,x.125">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1212" name="__module.encoder.layer.10.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="161052544" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1213" name="__module.encoder.layer.10.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="161052548" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1214" name="__module.encoder.layer.10.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="161052544" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1215" name="__module.encoder.layer.10.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="161052548" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1216" name="__module.encoder.layer.10.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1217" name="__module.encoder.layer.10.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1218" name="__module.encoder.layer.10.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="888,x.127">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1219" name="Constant_6582935" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="889">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1220" name="__module.encoder.layer.10.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="890">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1221" name="Constant_7690977" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="161052552" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1222" name="Convert_7690978" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1223" name="Constant_7690979" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="162101128" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1224" name="__module.encoder.layer.10.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1225" name="__module.encoder.layer.10.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1226" name="Constant_6596149" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="162105224" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1227" name="__module.encoder.layer.10.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="893,x.129">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1228" name="__module.encoder.layer.10.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1229" name="__module.encoder.layer.10.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="897,x.131">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1230" name="Constant_6582958" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="898">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1231" name="__module.encoder.layer.10.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="899">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1232" name="__module.encoder.layer.10.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="900,attn_output.41">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1233" name="__module.encoder.layer.10.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1234" name="__module.encoder.layer.10.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="901,attn_output.43">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1235" name="Constant_6596503" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="1236" name="__module.encoder.layer.10.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="903">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1237" name="__module.encoder.layer.10.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="162109320" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1238" name="__module.encoder.layer.10.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1239" name="__module.encoder.layer.10.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="162113416" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1240" name="__module.encoder.layer.10.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="162113420" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1241" name="__module.encoder.layer.10.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="162113416" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1242" name="__module.encoder.layer.10.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="162113420" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1243" name="__module.encoder.layer.10.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1244" name="Constant_7690981" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="162113424" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1245" name="Convert_7690982" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1246" name="Constant_7690983" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="163162000" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1247" name="__module.encoder.layer.10.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1248" name="__module.encoder.layer.10.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1249" name="Constant_6596150" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="163166096" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1250" name="__module.encoder.layer.10.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="909,input.43">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1251" name="__module.encoder.layer.10.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="911">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1252" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1253" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1254" name="Constant_6596151" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="163170192" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1255" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1256" name="Constant_6596152" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="163174288" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1257" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="915,input_tensor.21">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1258" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="163178384" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1259" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1260" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="163182480" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1261" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="163182484" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1262" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="163182480" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1263" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="163182484" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1264" name="__module.encoder.layer.10.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1265" name="Constant_7690985" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="163182488" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1266" name="Convert_7690986" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1267" name="Constant_7690987" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="167376792" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1268" name="__module.encoder.layer.10.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1269" name="__module.encoder.layer.10.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1270" name="Constant_6596153" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="167393176" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1271" name="__module.encoder.layer.10.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="920">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1272" name="__module.encoder.layer.10.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="921">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1273" name="__module.encoder.layer.10.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="167409560" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1274" name="__module.encoder.layer.10.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1275" name="__module.encoder.layer.10.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="167425944" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1276" name="__module.encoder.layer.10.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="167425948" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1277" name="__module.encoder.layer.10.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="167425944" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1278" name="__module.encoder.layer.10.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="167425948" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1279" name="__module.encoder.layer.10.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1280" name="Constant_7690989" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="167425952" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1281" name="Convert_7690990" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1282" name="Constant_7690991" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="171620256" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1283" name="__module.encoder.layer.10.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1284" name="__module.encoder.layer.10.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1285" name="Constant_6596154" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="171624352" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1286" name="__module.encoder.layer.10.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="927,input.45">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1287" name="__module.encoder.layer.10.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="929">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1288" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1289" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1290" name="Constant_6596155" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="171628448" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1291" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1292" name="Constant_6596156" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="171632544" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1293" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="933,hidden_states.67">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1294" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="171636640" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1295" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1296" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="171640736" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1297" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="171640740" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1298" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="171640736" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1299" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="171640740" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1300" name="__module.encoder.layer.10.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1301" name="Constant_7690993" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="171640744" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1302" name="Convert_7690994" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1303" name="Constant_7690995" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="172689320" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1304" name="__module.encoder.layer.11.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1305" name="__module.encoder.layer.11.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1306" name="Constant_6596157" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="172693416" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1307" name="__module.encoder.layer.11.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="946,x.133">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1308" name="__module.encoder.layer.11.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="172697512" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1309" name="__module.encoder.layer.11.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="172697516" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1310" name="__module.encoder.layer.11.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="172697512" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1311" name="__module.encoder.layer.11.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="172697516" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1312" name="__module.encoder.layer.11.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1313" name="__module.encoder.layer.11.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1314" name="__module.encoder.layer.11.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="950,x.135">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1315" name="Constant_6583138" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="951">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1316" name="__module.encoder.layer.11.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="952">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1317" name="Constant_7690997" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="172697520" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1318" name="Convert_7690998" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1319" name="Constant_7690999" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="173746096" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1320" name="__module.encoder.layer.11.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1321" name="__module.encoder.layer.11.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1322" name="Constant_6596158" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="173750192" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1323" name="__module.encoder.layer.11.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="955,x.137">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1324" name="__module.encoder.layer.11.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="173754288" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1325" name="__module.encoder.layer.11.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="173754292" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1326" name="__module.encoder.layer.11.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="173754288" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1327" name="__module.encoder.layer.11.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="173754292" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1328" name="__module.encoder.layer.11.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1329" name="__module.encoder.layer.11.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1330" name="__module.encoder.layer.11.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="959,x.139">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1331" name="Constant_6583161" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="960">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1332" name="__module.encoder.layer.11.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="961">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1333" name="Constant_7691001" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="173754296" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1334" name="Convert_7691002" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1335" name="Constant_7691003" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="174802872" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1336" name="__module.encoder.layer.11.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1337" name="__module.encoder.layer.11.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1338" name="Constant_6596159" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="174806968" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1339" name="__module.encoder.layer.11.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="964,x.141">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1340" name="__module.encoder.layer.11.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1341" name="__module.encoder.layer.11.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="968,x.143">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1342" name="Constant_6583184" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="969">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1343" name="__module.encoder.layer.11.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="970">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1344" name="__module.encoder.layer.11.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="971,attn_output.45">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1345" name="__module.encoder.layer.11.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1346" name="__module.encoder.layer.11.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="972,attn_output.47">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1347" name="Constant_6596504" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="1348" name="__module.encoder.layer.11.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="974">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1349" name="__module.encoder.layer.11.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="174811064" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1350" name="__module.encoder.layer.11.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1351" name="__module.encoder.layer.11.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="174815160" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1352" name="__module.encoder.layer.11.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="174815164" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1353" name="__module.encoder.layer.11.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="174815160" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1354" name="__module.encoder.layer.11.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="174815164" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1355" name="__module.encoder.layer.11.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1356" name="Constant_7691005" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="174815168" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1357" name="Convert_7691006" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1358" name="Constant_7691007" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="175863744" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1359" name="__module.encoder.layer.11.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1360" name="__module.encoder.layer.11.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1361" name="Constant_6596160" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="175867840" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1362" name="__module.encoder.layer.11.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="980,input.47">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1363" name="__module.encoder.layer.11.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="982">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1364" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1365" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1366" name="Constant_6596161" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="175871936" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1367" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1368" name="Constant_6596162" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="175876032" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1369" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="986,input_tensor.23">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1370" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="175880128" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1371" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1372" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="175884224" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1373" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="175884228" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1374" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="175884224" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1375" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="175884228" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1376" name="__module.encoder.layer.11.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1377" name="Constant_7691009" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="175884232" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1378" name="Convert_7691010" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1379" name="Constant_7691011" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="180078536" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1380" name="__module.encoder.layer.11.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1381" name="__module.encoder.layer.11.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1382" name="Constant_6596163" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="180094920" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1383" name="__module.encoder.layer.11.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="991">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1384" name="__module.encoder.layer.11.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="992">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1385" name="__module.encoder.layer.11.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="180111304" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1386" name="__module.encoder.layer.11.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1387" name="__module.encoder.layer.11.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="180127688" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1388" name="__module.encoder.layer.11.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="180127692" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1389" name="__module.encoder.layer.11.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="180127688" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1390" name="__module.encoder.layer.11.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="180127692" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1391" name="__module.encoder.layer.11.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1392" name="Constant_7691013" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="180127696" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1393" name="Convert_7691014" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1394" name="Constant_7691015" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="184322000" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1395" name="__module.encoder.layer.11.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1396" name="__module.encoder.layer.11.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1397" name="Constant_6596164" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="184326096" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1398" name="__module.encoder.layer.11.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="998,input.49">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1399" name="__module.encoder.layer.11.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1000">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1400" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1401" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1402" name="Constant_6596165" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="184330192" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1403" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1404" name="Constant_6596166" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="184334288" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1405" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1004,hidden_states.73">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1406" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="184338384" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1407" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1408" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="184342480" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1409" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="184342484" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1410" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="184342480" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1411" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="184342484" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1412" name="__module.encoder.layer.11.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1413" name="Constant_7691017" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="184342488" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1414" name="Convert_7691018" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1415" name="Constant_7691019" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="185391064" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1416" name="__module.encoder.layer.12.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1417" name="__module.encoder.layer.12.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1418" name="Constant_6596167" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="185395160" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1419" name="__module.encoder.layer.12.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1017,x.145">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1420" name="__module.encoder.layer.12.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="185399256" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1421" name="__module.encoder.layer.12.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="185399260" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1422" name="__module.encoder.layer.12.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="185399256" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1423" name="__module.encoder.layer.12.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="185399260" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1424" name="__module.encoder.layer.12.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1425" name="__module.encoder.layer.12.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1426" name="__module.encoder.layer.12.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1021,x.147">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1427" name="Constant_6583364" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1022">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1428" name="__module.encoder.layer.12.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1023">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1429" name="Constant_7691021" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="185399264" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1430" name="Convert_7691022" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1431" name="Constant_7691023" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="186447840" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1432" name="__module.encoder.layer.12.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1433" name="__module.encoder.layer.12.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1434" name="Constant_6596168" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="186451936" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1435" name="__module.encoder.layer.12.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1026,x.149">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1436" name="__module.encoder.layer.12.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="186456032" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1437" name="__module.encoder.layer.12.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="186456036" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1438" name="__module.encoder.layer.12.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="186456032" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1439" name="__module.encoder.layer.12.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="186456036" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1440" name="__module.encoder.layer.12.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1441" name="__module.encoder.layer.12.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1442" name="__module.encoder.layer.12.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1030,x.151">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1443" name="Constant_6583387" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1031">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1444" name="__module.encoder.layer.12.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1032">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1445" name="Constant_7691025" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="186456040" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1446" name="Convert_7691026" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1447" name="Constant_7691027" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="187504616" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1448" name="__module.encoder.layer.12.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1449" name="__module.encoder.layer.12.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1450" name="Constant_6596169" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="187508712" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1451" name="__module.encoder.layer.12.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1035,x.153">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1452" name="__module.encoder.layer.12.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1453" name="__module.encoder.layer.12.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1039,x.155">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1454" name="Constant_6583410" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1040">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1455" name="__module.encoder.layer.12.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1041">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1456" name="__module.encoder.layer.12.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1042,attn_output.49">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1457" name="__module.encoder.layer.12.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1458" name="__module.encoder.layer.12.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1043,attn_output.51">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1459" name="Constant_6596505" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="1460" name="__module.encoder.layer.12.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1045">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1461" name="__module.encoder.layer.12.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="187512808" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1462" name="__module.encoder.layer.12.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1463" name="__module.encoder.layer.12.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="187516904" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1464" name="__module.encoder.layer.12.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="187516908" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1465" name="__module.encoder.layer.12.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="187516904" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1466" name="__module.encoder.layer.12.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="187516908" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1467" name="__module.encoder.layer.12.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1468" name="Constant_7691029" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="187516912" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1469" name="Convert_7691030" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1470" name="Constant_7691031" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="188565488" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1471" name="__module.encoder.layer.12.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1472" name="__module.encoder.layer.12.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1473" name="Constant_6596170" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="188569584" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1474" name="__module.encoder.layer.12.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1051,input.51">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1475" name="__module.encoder.layer.12.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1053">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1476" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1477" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1478" name="Constant_6596171" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="188573680" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1479" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1480" name="Constant_6596172" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="188577776" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1481" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1057,input_tensor.25">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1482" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="188581872" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1483" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1484" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="188585968" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1485" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="188585972" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1486" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="188585968" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1487" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="188585972" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1488" name="__module.encoder.layer.12.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1489" name="Constant_7691033" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="188585976" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1490" name="Convert_7691034" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1491" name="Constant_7691035" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="192780280" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1492" name="__module.encoder.layer.12.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1493" name="__module.encoder.layer.12.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1494" name="Constant_6596173" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="192796664" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1495" name="__module.encoder.layer.12.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1062">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1496" name="__module.encoder.layer.12.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1063">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1497" name="__module.encoder.layer.12.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="192813048" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1498" name="__module.encoder.layer.12.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1499" name="__module.encoder.layer.12.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="192829432" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1500" name="__module.encoder.layer.12.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="192829436" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1501" name="__module.encoder.layer.12.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="192829432" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1502" name="__module.encoder.layer.12.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="192829436" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1503" name="__module.encoder.layer.12.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1504" name="Constant_7691037" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="192829440" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1505" name="Convert_7691038" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1506" name="Constant_7691039" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="197023744" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1507" name="__module.encoder.layer.12.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1508" name="__module.encoder.layer.12.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1509" name="Constant_6596174" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="197027840" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1510" name="__module.encoder.layer.12.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1069,input.53">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1511" name="__module.encoder.layer.12.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1071">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1512" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1513" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1514" name="Constant_6596175" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="197031936" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1515" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1516" name="Constant_6596176" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="197036032" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1517" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1075,hidden_states.79">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1518" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="197040128" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1519" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1520" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="197044224" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1521" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="197044228" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1522" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="197044224" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1523" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="197044228" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1524" name="__module.encoder.layer.12.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1525" name="Constant_7691041" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="197044232" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1526" name="Convert_7691042" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1527" name="Constant_7691043" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="198092808" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1528" name="__module.encoder.layer.13.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1529" name="__module.encoder.layer.13.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1530" name="Constant_6596177" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="198096904" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1531" name="__module.encoder.layer.13.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1088,x.157">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1532" name="__module.encoder.layer.13.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="198101000" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1533" name="__module.encoder.layer.13.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="198101004" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1534" name="__module.encoder.layer.13.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="198101000" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1535" name="__module.encoder.layer.13.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="198101004" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1536" name="__module.encoder.layer.13.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1537" name="__module.encoder.layer.13.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1538" name="__module.encoder.layer.13.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1092,x.159">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1539" name="Constant_6583590" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1093">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1540" name="__module.encoder.layer.13.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1094">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1541" name="Constant_7691045" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="198101008" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1542" name="Convert_7691046" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1543" name="Constant_7691047" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="199149584" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1544" name="__module.encoder.layer.13.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1545" name="__module.encoder.layer.13.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1546" name="Constant_6596178" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="199153680" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1547" name="__module.encoder.layer.13.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1097,x.161">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1548" name="__module.encoder.layer.13.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="199157776" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1549" name="__module.encoder.layer.13.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="199157780" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1550" name="__module.encoder.layer.13.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="199157776" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1551" name="__module.encoder.layer.13.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="199157780" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1552" name="__module.encoder.layer.13.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1553" name="__module.encoder.layer.13.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1554" name="__module.encoder.layer.13.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1101,x.163">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1555" name="Constant_6583613" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1102">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1556" name="__module.encoder.layer.13.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1103">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1557" name="Constant_7691049" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="199157784" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1558" name="Convert_7691050" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1559" name="Constant_7691051" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="200206360" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1560" name="__module.encoder.layer.13.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1561" name="__module.encoder.layer.13.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1562" name="Constant_6596179" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="200210456" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1563" name="__module.encoder.layer.13.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1106,x.165">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1564" name="__module.encoder.layer.13.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1565" name="__module.encoder.layer.13.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1110,x.167">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1566" name="Constant_6583636" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1111">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1567" name="__module.encoder.layer.13.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1112">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1568" name="__module.encoder.layer.13.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1113,attn_output.53">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1569" name="__module.encoder.layer.13.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1570" name="__module.encoder.layer.13.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1114,attn_output.55">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1571" name="Constant_6596506" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="1572" name="__module.encoder.layer.13.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1116">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1573" name="__module.encoder.layer.13.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="200214552" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1574" name="__module.encoder.layer.13.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1575" name="__module.encoder.layer.13.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="200218648" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1576" name="__module.encoder.layer.13.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="200218652" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1577" name="__module.encoder.layer.13.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="200218648" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1578" name="__module.encoder.layer.13.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="200218652" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1579" name="__module.encoder.layer.13.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1580" name="Constant_7691053" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="200218656" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1581" name="Convert_7691054" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1582" name="Constant_7691055" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="201267232" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1583" name="__module.encoder.layer.13.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1584" name="__module.encoder.layer.13.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1585" name="Constant_6596180" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="201271328" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1586" name="__module.encoder.layer.13.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1122,input.55">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1587" name="__module.encoder.layer.13.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1124">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1588" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1589" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1590" name="Constant_6596181" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="201275424" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1591" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1592" name="Constant_6596182" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="201279520" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1593" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1128,input_tensor.27">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1594" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="201283616" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1595" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1596" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="201287712" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1597" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="201287716" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1598" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="201287712" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1599" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="201287716" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1600" name="__module.encoder.layer.13.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1601" name="Constant_7691057" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="201287720" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1602" name="Convert_7691058" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1603" name="Constant_7691059" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="205482024" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1604" name="__module.encoder.layer.13.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1605" name="__module.encoder.layer.13.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1606" name="Constant_6596183" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="205498408" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1607" name="__module.encoder.layer.13.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1133">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1608" name="__module.encoder.layer.13.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1134">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1609" name="__module.encoder.layer.13.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="205514792" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1610" name="__module.encoder.layer.13.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1611" name="__module.encoder.layer.13.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="205531176" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1612" name="__module.encoder.layer.13.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="205531180" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1613" name="__module.encoder.layer.13.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="205531176" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1614" name="__module.encoder.layer.13.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="205531180" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1615" name="__module.encoder.layer.13.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1616" name="Constant_7691061" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="205531184" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1617" name="Convert_7691062" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1618" name="Constant_7691063" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="209725488" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1619" name="__module.encoder.layer.13.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1620" name="__module.encoder.layer.13.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1621" name="Constant_6596184" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="209729584" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1622" name="__module.encoder.layer.13.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1140,input.57">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1623" name="__module.encoder.layer.13.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1142">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1624" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1625" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1626" name="Constant_6596185" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="209733680" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1627" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1628" name="Constant_6596186" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="209737776" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1629" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1146,hidden_states.85">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1630" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="209741872" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1631" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1632" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="209745968" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1633" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="209745972" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1634" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="209745968" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1635" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="209745972" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1636" name="__module.encoder.layer.13.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1637" name="Constant_7691065" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="209745976" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1638" name="Convert_7691066" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1639" name="Constant_7691067" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="210794552" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1640" name="__module.encoder.layer.14.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1641" name="__module.encoder.layer.14.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1642" name="Constant_6596187" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="210798648" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1643" name="__module.encoder.layer.14.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1159,x.169">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1644" name="__module.encoder.layer.14.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="210802744" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1645" name="__module.encoder.layer.14.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="210802748" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1646" name="__module.encoder.layer.14.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="210802744" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1647" name="__module.encoder.layer.14.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="210802748" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1648" name="__module.encoder.layer.14.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1649" name="__module.encoder.layer.14.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1650" name="__module.encoder.layer.14.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1163,x.171">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1651" name="Constant_6583816" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1164">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1652" name="__module.encoder.layer.14.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1165">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1653" name="Constant_7691069" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="210802752" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1654" name="Convert_7691070" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1655" name="Constant_7691071" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="211851328" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1656" name="__module.encoder.layer.14.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1657" name="__module.encoder.layer.14.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1658" name="Constant_6596188" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="211855424" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1659" name="__module.encoder.layer.14.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1168,x.173">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1660" name="__module.encoder.layer.14.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="211859520" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1661" name="__module.encoder.layer.14.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="211859524" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1662" name="__module.encoder.layer.14.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="211859520" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1663" name="__module.encoder.layer.14.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="211859524" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1664" name="__module.encoder.layer.14.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1665" name="__module.encoder.layer.14.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1666" name="__module.encoder.layer.14.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1172,x.175">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1667" name="Constant_6583839" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1173">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1668" name="__module.encoder.layer.14.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1174">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1669" name="Constant_7691073" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="211859528" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1670" name="Convert_7691074" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1671" name="Constant_7691075" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="212908104" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1672" name="__module.encoder.layer.14.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1673" name="__module.encoder.layer.14.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1674" name="Constant_6596189" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="212912200" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1675" name="__module.encoder.layer.14.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1177,x.177">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1676" name="__module.encoder.layer.14.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1677" name="__module.encoder.layer.14.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1181,x.179">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1678" name="Constant_6583862" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1182">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1679" name="__module.encoder.layer.14.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1183">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1680" name="__module.encoder.layer.14.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1184,attn_output.57">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1681" name="__module.encoder.layer.14.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1682" name="__module.encoder.layer.14.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1185,attn_output.59">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1683" name="Constant_6596507" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="1684" name="__module.encoder.layer.14.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1187">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1685" name="__module.encoder.layer.14.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="212916296" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1686" name="__module.encoder.layer.14.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1687" name="__module.encoder.layer.14.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="212920392" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1688" name="__module.encoder.layer.14.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="212920396" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1689" name="__module.encoder.layer.14.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="212920392" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1690" name="__module.encoder.layer.14.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="212920396" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1691" name="__module.encoder.layer.14.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1692" name="Constant_7691077" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="212920400" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1693" name="Convert_7691078" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1694" name="Constant_7691079" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="213968976" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1695" name="__module.encoder.layer.14.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1696" name="__module.encoder.layer.14.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1697" name="Constant_6596190" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="213973072" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1698" name="__module.encoder.layer.14.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1193,input.59">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1699" name="__module.encoder.layer.14.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1195">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1700" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1701" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1702" name="Constant_6596191" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="213977168" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1703" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1704" name="Constant_6596192" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="213981264" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1705" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1199,input_tensor.29">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1706" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="213985360" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1707" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1708" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="213989456" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1709" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="213989460" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1710" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="213989456" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1711" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="213989460" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1712" name="__module.encoder.layer.14.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1713" name="Constant_7691081" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="213989464" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1714" name="Convert_7691082" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1715" name="Constant_7691083" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="218183768" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1716" name="__module.encoder.layer.14.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1717" name="__module.encoder.layer.14.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1718" name="Constant_6596193" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="218200152" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1719" name="__module.encoder.layer.14.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1204">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1720" name="__module.encoder.layer.14.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1205">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1721" name="__module.encoder.layer.14.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="218216536" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1722" name="__module.encoder.layer.14.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1723" name="__module.encoder.layer.14.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="218232920" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1724" name="__module.encoder.layer.14.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="218232924" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1725" name="__module.encoder.layer.14.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="218232920" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1726" name="__module.encoder.layer.14.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="218232924" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1727" name="__module.encoder.layer.14.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1728" name="Constant_7691085" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="218232928" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1729" name="Convert_7691086" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1730" name="Constant_7691087" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="222427232" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1731" name="__module.encoder.layer.14.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1732" name="__module.encoder.layer.14.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1733" name="Constant_6596194" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="222431328" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1734" name="__module.encoder.layer.14.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1211,input.61">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1735" name="__module.encoder.layer.14.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1213">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1736" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1737" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1738" name="Constant_6596195" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="222435424" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1739" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1740" name="Constant_6596196" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="222439520" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1741" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1217,hidden_states.91">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1742" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="222443616" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1743" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1744" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="222447712" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1745" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="222447716" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1746" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="222447712" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1747" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="222447716" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1748" name="__module.encoder.layer.14.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1749" name="Constant_7691089" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="222447720" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1750" name="Convert_7691090" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1751" name="Constant_7691091" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="223496296" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1752" name="__module.encoder.layer.15.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1753" name="__module.encoder.layer.15.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1754" name="Constant_6596197" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="223500392" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1755" name="__module.encoder.layer.15.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1230,x.181">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1756" name="__module.encoder.layer.15.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="223504488" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1757" name="__module.encoder.layer.15.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="223504492" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1758" name="__module.encoder.layer.15.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="223504488" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1759" name="__module.encoder.layer.15.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="223504492" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1760" name="__module.encoder.layer.15.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1761" name="__module.encoder.layer.15.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1762" name="__module.encoder.layer.15.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1234,x.183">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1763" name="Constant_6584042" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1235">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1764" name="__module.encoder.layer.15.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1236">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1765" name="Constant_7691093" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="223504496" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1766" name="Convert_7691094" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1767" name="Constant_7691095" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="224553072" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1768" name="__module.encoder.layer.15.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1769" name="__module.encoder.layer.15.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1770" name="Constant_6596198" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="224557168" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1771" name="__module.encoder.layer.15.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1239,x.185">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1772" name="__module.encoder.layer.15.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="224561264" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1773" name="__module.encoder.layer.15.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="224561268" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1774" name="__module.encoder.layer.15.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="224561264" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1775" name="__module.encoder.layer.15.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="224561268" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1776" name="__module.encoder.layer.15.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1777" name="__module.encoder.layer.15.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1778" name="__module.encoder.layer.15.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1243,x.187">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1779" name="Constant_6584065" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1244">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1780" name="__module.encoder.layer.15.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1245">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1781" name="Constant_7691097" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="224561272" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1782" name="Convert_7691098" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1783" name="Constant_7691099" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="225609848" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1784" name="__module.encoder.layer.15.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1785" name="__module.encoder.layer.15.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1786" name="Constant_6596199" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="225613944" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1787" name="__module.encoder.layer.15.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1248,x.189">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1788" name="__module.encoder.layer.15.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1789" name="__module.encoder.layer.15.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1252,x.191">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1790" name="Constant_6584088" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1253">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1791" name="__module.encoder.layer.15.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1254">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1792" name="__module.encoder.layer.15.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1255,attn_output.61">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1793" name="__module.encoder.layer.15.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1794" name="__module.encoder.layer.15.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1256,attn_output.63">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1795" name="Constant_6596508" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="1796" name="__module.encoder.layer.15.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1258">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1797" name="__module.encoder.layer.15.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="225618040" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1798" name="__module.encoder.layer.15.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1799" name="__module.encoder.layer.15.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="225622136" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1800" name="__module.encoder.layer.15.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="225622140" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1801" name="__module.encoder.layer.15.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="225622136" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1802" name="__module.encoder.layer.15.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="225622140" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1803" name="__module.encoder.layer.15.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1804" name="Constant_7691101" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="225622144" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1805" name="Convert_7691102" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1806" name="Constant_7691103" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="226670720" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1807" name="__module.encoder.layer.15.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1808" name="__module.encoder.layer.15.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1809" name="Constant_6596200" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="226674816" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1810" name="__module.encoder.layer.15.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1264,input.63">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1811" name="__module.encoder.layer.15.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1266">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1812" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1813" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1814" name="Constant_6596201" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="226678912" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1815" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1816" name="Constant_6596202" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="226683008" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1817" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1270,input_tensor.31">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1818" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="226687104" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1819" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1820" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="226691200" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1821" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="226691204" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1822" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="226691200" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1823" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="226691204" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1824" name="__module.encoder.layer.15.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1825" name="Constant_7691105" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="226691208" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1826" name="Convert_7691106" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1827" name="Constant_7691107" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="230885512" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1828" name="__module.encoder.layer.15.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1829" name="__module.encoder.layer.15.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1830" name="Constant_6596203" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="230901896" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1831" name="__module.encoder.layer.15.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1275">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1832" name="__module.encoder.layer.15.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1276">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1833" name="__module.encoder.layer.15.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="230918280" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1834" name="__module.encoder.layer.15.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1835" name="__module.encoder.layer.15.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="230934664" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1836" name="__module.encoder.layer.15.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="230934668" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1837" name="__module.encoder.layer.15.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="230934664" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1838" name="__module.encoder.layer.15.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="230934668" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1839" name="__module.encoder.layer.15.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1840" name="Constant_7691109" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="230934672" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1841" name="Convert_7691110" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1842" name="Constant_7691111" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="235128976" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1843" name="__module.encoder.layer.15.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1844" name="__module.encoder.layer.15.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1845" name="Constant_6596204" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="235133072" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1846" name="__module.encoder.layer.15.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1282,input.65">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1847" name="__module.encoder.layer.15.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1284">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1848" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1849" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1850" name="Constant_6596205" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="235137168" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1851" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1852" name="Constant_6596206" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="235141264" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1853" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1288,hidden_states.97">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1854" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="235145360" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1855" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1856" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="235149456" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1857" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="235149460" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1858" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="235149456" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1859" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="235149460" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1860" name="__module.encoder.layer.15.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1861" name="Constant_7691113" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="235149464" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1862" name="Convert_7691114" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1863" name="Constant_7691115" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="236198040" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1864" name="__module.encoder.layer.16.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1865" name="__module.encoder.layer.16.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1866" name="Constant_6596207" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="236202136" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1867" name="__module.encoder.layer.16.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1301,x.193">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1868" name="__module.encoder.layer.16.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="236206232" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1869" name="__module.encoder.layer.16.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="236206236" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1870" name="__module.encoder.layer.16.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="236206232" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1871" name="__module.encoder.layer.16.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="236206236" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1872" name="__module.encoder.layer.16.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1873" name="__module.encoder.layer.16.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1874" name="__module.encoder.layer.16.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1305,x.195">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1875" name="Constant_6584268" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1306">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1876" name="__module.encoder.layer.16.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1307">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1877" name="Constant_7691117" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="236206240" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1878" name="Convert_7691118" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1879" name="Constant_7691119" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="237254816" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1880" name="__module.encoder.layer.16.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1881" name="__module.encoder.layer.16.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1882" name="Constant_6596208" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="237258912" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1883" name="__module.encoder.layer.16.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1310,x.197">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1884" name="__module.encoder.layer.16.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="237263008" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1885" name="__module.encoder.layer.16.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="237263012" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1886" name="__module.encoder.layer.16.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="237263008" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1887" name="__module.encoder.layer.16.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="237263012" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1888" name="__module.encoder.layer.16.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1889" name="__module.encoder.layer.16.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1890" name="__module.encoder.layer.16.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1314,x.199">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1891" name="Constant_6584291" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1315">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1892" name="__module.encoder.layer.16.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1316">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1893" name="Constant_7691121" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="237263016" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1894" name="Convert_7691122" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1895" name="Constant_7691123" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="238311592" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1896" name="__module.encoder.layer.16.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1897" name="__module.encoder.layer.16.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1898" name="Constant_6596209" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="238315688" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1899" name="__module.encoder.layer.16.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1319,x.201">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1900" name="__module.encoder.layer.16.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1901" name="__module.encoder.layer.16.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1323,x.203">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1902" name="Constant_6584314" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1324">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1903" name="__module.encoder.layer.16.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1325">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1904" name="__module.encoder.layer.16.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1326,attn_output.65">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1905" name="__module.encoder.layer.16.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1906" name="__module.encoder.layer.16.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1327,attn_output.67">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1907" name="Constant_6596509" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="1908" name="__module.encoder.layer.16.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1329">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1909" name="__module.encoder.layer.16.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="238319784" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1910" name="__module.encoder.layer.16.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1911" name="__module.encoder.layer.16.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="238323880" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1912" name="__module.encoder.layer.16.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="238323884" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1913" name="__module.encoder.layer.16.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="238323880" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1914" name="__module.encoder.layer.16.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="238323884" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1915" name="__module.encoder.layer.16.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1916" name="Constant_7691125" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="238323888" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1917" name="Convert_7691126" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1918" name="Constant_7691127" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="239372464" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1919" name="__module.encoder.layer.16.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1920" name="__module.encoder.layer.16.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1921" name="Constant_6596210" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="239376560" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1922" name="__module.encoder.layer.16.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1335,input.67">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1923" name="__module.encoder.layer.16.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1337">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1924" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1925" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1926" name="Constant_6596211" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="239380656" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1927" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1928" name="Constant_6596212" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="239384752" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1929" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1341,input_tensor.33">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1930" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="239388848" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1931" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1932" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="239392944" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1933" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="239392948" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1934" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="239392944" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1935" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="239392948" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1936" name="__module.encoder.layer.16.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1937" name="Constant_7691129" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="239392952" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1938" name="Convert_7691130" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1939" name="Constant_7691131" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="243587256" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1940" name="__module.encoder.layer.16.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1941" name="__module.encoder.layer.16.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1942" name="Constant_6596213" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="243603640" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1943" name="__module.encoder.layer.16.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1346">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1944" name="__module.encoder.layer.16.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1347">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1945" name="__module.encoder.layer.16.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="243620024" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1946" name="__module.encoder.layer.16.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1947" name="__module.encoder.layer.16.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="243636408" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1948" name="__module.encoder.layer.16.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="243636412" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1949" name="__module.encoder.layer.16.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="243636408" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1950" name="__module.encoder.layer.16.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="243636412" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1951" name="__module.encoder.layer.16.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1952" name="Constant_7691133" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="243636416" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1953" name="Convert_7691134" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1954" name="Constant_7691135" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="247830720" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1955" name="__module.encoder.layer.16.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="1956" name="__module.encoder.layer.16.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1957" name="Constant_6596214" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="247834816" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1958" name="__module.encoder.layer.16.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1353,input.69">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1959" name="__module.encoder.layer.16.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1355">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1960" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1961" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1962" name="Constant_6596215" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="247838912" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1963" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1964" name="Constant_6596216" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="247843008" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1965" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1359,hidden_states.103">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1966" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="247847104" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1967" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1968" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="247851200" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1969" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="247851204" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1970" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="247851200" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1971" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="247851204" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1972" name="__module.encoder.layer.16.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1973" name="Constant_7691137" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="247851208" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1974" name="Convert_7691138" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1975" name="Constant_7691139" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="248899784" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1976" name="__module.encoder.layer.17.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1977" name="__module.encoder.layer.17.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1978" name="Constant_6596217" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="248903880" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1979" name="__module.encoder.layer.17.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1372,x.205">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1980" name="__module.encoder.layer.17.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="248907976" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1981" name="__module.encoder.layer.17.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="248907980" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1982" name="__module.encoder.layer.17.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="248907976" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1983" name="__module.encoder.layer.17.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="248907980" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1984" name="__module.encoder.layer.17.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1985" name="__module.encoder.layer.17.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1986" name="__module.encoder.layer.17.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1376,x.207">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1987" name="Constant_6584494" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1377">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="1988" name="__module.encoder.layer.17.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1378">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="1989" name="Constant_7691141" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="248907984" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1990" name="Convert_7691142" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1991" name="Constant_7691143" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="249956560" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1992" name="__module.encoder.layer.17.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1993" name="__module.encoder.layer.17.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1994" name="Constant_6596218" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="249960656" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1995" name="__module.encoder.layer.17.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1381,x.209">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="1996" name="__module.encoder.layer.17.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="249964752" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1997" name="__module.encoder.layer.17.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="249964756" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1998" name="__module.encoder.layer.17.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="249964752" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="1999" name="__module.encoder.layer.17.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="249964756" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2000" name="__module.encoder.layer.17.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2001" name="__module.encoder.layer.17.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2002" name="__module.encoder.layer.17.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1385,x.211">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2003" name="Constant_6584517" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1386">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2004" name="__module.encoder.layer.17.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1387">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2005" name="Constant_7691145" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="249964760" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2006" name="Convert_7691146" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2007" name="Constant_7691147" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="251013336" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2008" name="__module.encoder.layer.17.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2009" name="__module.encoder.layer.17.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2010" name="Constant_6596219" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="251017432" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2011" name="__module.encoder.layer.17.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1390,x.213">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2012" name="__module.encoder.layer.17.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2013" name="__module.encoder.layer.17.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1394,x.215">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2014" name="Constant_6584540" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1395">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2015" name="__module.encoder.layer.17.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1396">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2016" name="__module.encoder.layer.17.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1397,attn_output.69">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2017" name="__module.encoder.layer.17.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2018" name="__module.encoder.layer.17.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1398,attn_output.71">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2019" name="Constant_6596510" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="2020" name="__module.encoder.layer.17.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1400">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2021" name="__module.encoder.layer.17.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="251021528" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2022" name="__module.encoder.layer.17.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2023" name="__module.encoder.layer.17.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="251025624" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2024" name="__module.encoder.layer.17.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="251025628" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2025" name="__module.encoder.layer.17.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="251025624" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2026" name="__module.encoder.layer.17.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="251025628" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2027" name="__module.encoder.layer.17.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2028" name="Constant_7691149" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="251025632" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2029" name="Convert_7691150" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2030" name="Constant_7691151" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="252074208" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2031" name="__module.encoder.layer.17.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2032" name="__module.encoder.layer.17.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2033" name="Constant_6596220" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="252078304" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2034" name="__module.encoder.layer.17.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1406,input.71">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2035" name="__module.encoder.layer.17.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1408">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2036" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2037" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2038" name="Constant_6596221" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="252082400" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2039" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2040" name="Constant_6596222" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="252086496" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2041" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1412,input_tensor.35">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2042" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="252090592" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2043" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2044" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="252094688" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2045" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="252094692" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2046" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="252094688" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2047" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="252094692" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2048" name="__module.encoder.layer.17.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2049" name="Constant_7691153" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="252094696" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2050" name="Convert_7691154" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2051" name="Constant_7691155" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="256289000" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2052" name="__module.encoder.layer.17.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2053" name="__module.encoder.layer.17.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2054" name="Constant_6596223" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="256305384" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2055" name="__module.encoder.layer.17.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1417">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2056" name="__module.encoder.layer.17.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1418">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2057" name="__module.encoder.layer.17.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="256321768" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2058" name="__module.encoder.layer.17.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2059" name="__module.encoder.layer.17.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="256338152" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2060" name="__module.encoder.layer.17.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="256338156" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2061" name="__module.encoder.layer.17.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="256338152" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2062" name="__module.encoder.layer.17.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="256338156" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2063" name="__module.encoder.layer.17.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2064" name="Constant_7691157" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="256338160" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2065" name="Convert_7691158" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2066" name="Constant_7691159" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="260532464" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2067" name="__module.encoder.layer.17.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2068" name="__module.encoder.layer.17.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2069" name="Constant_6596224" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="260536560" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2070" name="__module.encoder.layer.17.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1424,input.73">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2071" name="__module.encoder.layer.17.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1426">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2072" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2073" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2074" name="Constant_6596225" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="260540656" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2075" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2076" name="Constant_6596226" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="260544752" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2077" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1430,hidden_states.109">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2078" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="260548848" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2079" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2080" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="260552944" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2081" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="260552948" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2082" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="260552944" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2083" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="260552948" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2084" name="__module.encoder.layer.17.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2085" name="Constant_7691161" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="260552952" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2086" name="Convert_7691162" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2087" name="Constant_7691163" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="261601528" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2088" name="__module.encoder.layer.18.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2089" name="__module.encoder.layer.18.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2090" name="Constant_6596227" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="261605624" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2091" name="__module.encoder.layer.18.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1443,x.217">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2092" name="__module.encoder.layer.18.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="261609720" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2093" name="__module.encoder.layer.18.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="261609724" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2094" name="__module.encoder.layer.18.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="261609720" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2095" name="__module.encoder.layer.18.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="261609724" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2096" name="__module.encoder.layer.18.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2097" name="__module.encoder.layer.18.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2098" name="__module.encoder.layer.18.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1447,x.219">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2099" name="Constant_6584720" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1448">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2100" name="__module.encoder.layer.18.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1449">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2101" name="Constant_7691165" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="261609728" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2102" name="Convert_7691166" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2103" name="Constant_7691167" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="262658304" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2104" name="__module.encoder.layer.18.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2105" name="__module.encoder.layer.18.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2106" name="Constant_6596228" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="262662400" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2107" name="__module.encoder.layer.18.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1452,x.221">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2108" name="__module.encoder.layer.18.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="262666496" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2109" name="__module.encoder.layer.18.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="262666500" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2110" name="__module.encoder.layer.18.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="262666496" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2111" name="__module.encoder.layer.18.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="262666500" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2112" name="__module.encoder.layer.18.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2113" name="__module.encoder.layer.18.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2114" name="__module.encoder.layer.18.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1456,x.223">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2115" name="Constant_6584743" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1457">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2116" name="__module.encoder.layer.18.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1458">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2117" name="Constant_7691169" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="262666504" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2118" name="Convert_7691170" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2119" name="Constant_7691171" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="263715080" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2120" name="__module.encoder.layer.18.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2121" name="__module.encoder.layer.18.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2122" name="Constant_6596229" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="263719176" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2123" name="__module.encoder.layer.18.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1461,x.225">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2124" name="__module.encoder.layer.18.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2125" name="__module.encoder.layer.18.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1465,x.227">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2126" name="Constant_6584766" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1466">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2127" name="__module.encoder.layer.18.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1467">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2128" name="__module.encoder.layer.18.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1468,attn_output.73">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2129" name="__module.encoder.layer.18.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2130" name="__module.encoder.layer.18.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1469,attn_output.75">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2131" name="Constant_6596511" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="2132" name="__module.encoder.layer.18.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1471">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2133" name="__module.encoder.layer.18.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="263723272" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2134" name="__module.encoder.layer.18.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2135" name="__module.encoder.layer.18.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="263727368" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2136" name="__module.encoder.layer.18.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="263727372" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2137" name="__module.encoder.layer.18.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="263727368" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2138" name="__module.encoder.layer.18.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="263727372" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2139" name="__module.encoder.layer.18.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2140" name="Constant_7691173" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="263727376" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2141" name="Convert_7691174" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2142" name="Constant_7691175" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="264775952" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2143" name="__module.encoder.layer.18.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2144" name="__module.encoder.layer.18.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2145" name="Constant_6596230" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="264780048" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2146" name="__module.encoder.layer.18.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1477,input.75">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2147" name="__module.encoder.layer.18.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1479">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2148" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2149" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2150" name="Constant_6596231" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="264784144" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2151" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2152" name="Constant_6596232" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="264788240" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2153" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1483,input_tensor.37">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2154" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="264792336" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2155" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2156" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="264796432" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2157" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="264796436" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2158" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="264796432" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2159" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="264796436" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2160" name="__module.encoder.layer.18.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2161" name="Constant_7691177" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="264796440" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2162" name="Convert_7691178" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2163" name="Constant_7691179" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="268990744" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2164" name="__module.encoder.layer.18.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2165" name="__module.encoder.layer.18.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2166" name="Constant_6596233" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="269007128" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2167" name="__module.encoder.layer.18.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1488">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2168" name="__module.encoder.layer.18.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1489">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2169" name="__module.encoder.layer.18.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="269023512" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2170" name="__module.encoder.layer.18.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2171" name="__module.encoder.layer.18.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="269039896" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2172" name="__module.encoder.layer.18.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="269039900" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2173" name="__module.encoder.layer.18.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="269039896" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2174" name="__module.encoder.layer.18.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="269039900" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2175" name="__module.encoder.layer.18.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2176" name="Constant_7691181" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="269039904" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2177" name="Convert_7691182" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2178" name="Constant_7691183" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="273234208" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2179" name="__module.encoder.layer.18.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2180" name="__module.encoder.layer.18.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2181" name="Constant_6596234" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="273238304" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2182" name="__module.encoder.layer.18.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1495,input.77">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2183" name="__module.encoder.layer.18.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1497">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2184" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2185" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2186" name="Constant_6596235" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="273242400" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2187" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2188" name="Constant_6596236" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="273246496" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2189" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1501,hidden_states.115">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2190" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="273250592" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2191" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2192" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="273254688" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2193" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="273254692" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2194" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="273254688" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2195" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="273254692" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2196" name="__module.encoder.layer.18.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2197" name="Constant_7691185" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="273254696" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2198" name="Convert_7691186" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2199" name="Constant_7691187" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="274303272" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2200" name="__module.encoder.layer.19.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2201" name="__module.encoder.layer.19.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2202" name="Constant_6596237" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="274307368" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2203" name="__module.encoder.layer.19.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1514,x.229">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2204" name="__module.encoder.layer.19.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="274311464" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2205" name="__module.encoder.layer.19.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="274311468" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2206" name="__module.encoder.layer.19.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="274311464" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2207" name="__module.encoder.layer.19.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="274311468" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2208" name="__module.encoder.layer.19.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2209" name="__module.encoder.layer.19.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2210" name="__module.encoder.layer.19.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1518,x.231">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2211" name="Constant_6584946" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1519">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2212" name="__module.encoder.layer.19.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1520">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2213" name="Constant_7691189" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="274311472" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2214" name="Convert_7691190" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2215" name="Constant_7691191" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="275360048" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2216" name="__module.encoder.layer.19.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2217" name="__module.encoder.layer.19.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2218" name="Constant_6596238" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="275364144" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2219" name="__module.encoder.layer.19.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1523,x.233">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2220" name="__module.encoder.layer.19.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="275368240" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2221" name="__module.encoder.layer.19.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="275368244" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2222" name="__module.encoder.layer.19.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="275368240" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2223" name="__module.encoder.layer.19.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="275368244" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2224" name="__module.encoder.layer.19.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2225" name="__module.encoder.layer.19.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2226" name="__module.encoder.layer.19.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1527,x.235">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2227" name="Constant_6584969" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1528">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2228" name="__module.encoder.layer.19.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1529">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2229" name="Constant_7691193" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="275368248" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2230" name="Convert_7691194" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2231" name="Constant_7691195" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="276416824" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2232" name="__module.encoder.layer.19.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2233" name="__module.encoder.layer.19.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2234" name="Constant_6596239" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="276420920" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2235" name="__module.encoder.layer.19.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1532,x.237">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2236" name="__module.encoder.layer.19.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2237" name="__module.encoder.layer.19.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1536,x.239">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2238" name="Constant_6584992" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1537">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2239" name="__module.encoder.layer.19.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1538">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2240" name="__module.encoder.layer.19.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1539,attn_output.77">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2241" name="__module.encoder.layer.19.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2242" name="__module.encoder.layer.19.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1540,attn_output.79">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2243" name="Constant_6596512" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="2244" name="__module.encoder.layer.19.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1542">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2245" name="__module.encoder.layer.19.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="276425016" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2246" name="__module.encoder.layer.19.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2247" name="__module.encoder.layer.19.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="276429112" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2248" name="__module.encoder.layer.19.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="276429116" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2249" name="__module.encoder.layer.19.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="276429112" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2250" name="__module.encoder.layer.19.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="276429116" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2251" name="__module.encoder.layer.19.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2252" name="Constant_7691197" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="276429120" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2253" name="Convert_7691198" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2254" name="Constant_7691199" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="277477696" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2255" name="__module.encoder.layer.19.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2256" name="__module.encoder.layer.19.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2257" name="Constant_6596240" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="277481792" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2258" name="__module.encoder.layer.19.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1548,input.79">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2259" name="__module.encoder.layer.19.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1550">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2260" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2261" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2262" name="Constant_6596241" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="277485888" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2263" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2264" name="Constant_6596242" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="277489984" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2265" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1554,input_tensor.39">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2266" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="277494080" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2267" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2268" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="277498176" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2269" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="277498180" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2270" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="277498176" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2271" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="277498180" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2272" name="__module.encoder.layer.19.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2273" name="Constant_7691201" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="277498184" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2274" name="Convert_7691202" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2275" name="Constant_7691203" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="281692488" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2276" name="__module.encoder.layer.19.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2277" name="__module.encoder.layer.19.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2278" name="Constant_6596243" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="281708872" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2279" name="__module.encoder.layer.19.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1559">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2280" name="__module.encoder.layer.19.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1560">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2281" name="__module.encoder.layer.19.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="281725256" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2282" name="__module.encoder.layer.19.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2283" name="__module.encoder.layer.19.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="281741640" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2284" name="__module.encoder.layer.19.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="281741644" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2285" name="__module.encoder.layer.19.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="281741640" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2286" name="__module.encoder.layer.19.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="281741644" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2287" name="__module.encoder.layer.19.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2288" name="Constant_7691205" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="281741648" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2289" name="Convert_7691206" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2290" name="Constant_7691207" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="285935952" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2291" name="__module.encoder.layer.19.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2292" name="__module.encoder.layer.19.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2293" name="Constant_6596244" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="285940048" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2294" name="__module.encoder.layer.19.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1566,input.81">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2295" name="__module.encoder.layer.19.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1568">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2296" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2297" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2298" name="Constant_6596245" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="285944144" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2299" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2300" name="Constant_6596246" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="285948240" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2301" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1572,hidden_states.121">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2302" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="285952336" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2303" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2304" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="285956432" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2305" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="285956436" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2306" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="285956432" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2307" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="285956436" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2308" name="__module.encoder.layer.19.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2309" name="Constant_7691209" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="285956440" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2310" name="Convert_7691210" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2311" name="Constant_7691211" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="287005016" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2312" name="__module.encoder.layer.20.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2313" name="__module.encoder.layer.20.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2314" name="Constant_6596247" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="287009112" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2315" name="__module.encoder.layer.20.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1585,x.241">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2316" name="__module.encoder.layer.20.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="287013208" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2317" name="__module.encoder.layer.20.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="287013212" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2318" name="__module.encoder.layer.20.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="287013208" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2319" name="__module.encoder.layer.20.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="287013212" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2320" name="__module.encoder.layer.20.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2321" name="__module.encoder.layer.20.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2322" name="__module.encoder.layer.20.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1589,x.243">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2323" name="Constant_6585172" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1590">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2324" name="__module.encoder.layer.20.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1591">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2325" name="Constant_7691213" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="287013216" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2326" name="Convert_7691214" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2327" name="Constant_7691215" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="288061792" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2328" name="__module.encoder.layer.20.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2329" name="__module.encoder.layer.20.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2330" name="Constant_6596248" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="288065888" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2331" name="__module.encoder.layer.20.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1594,x.245">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2332" name="__module.encoder.layer.20.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="288069984" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2333" name="__module.encoder.layer.20.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="288069988" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2334" name="__module.encoder.layer.20.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="288069984" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2335" name="__module.encoder.layer.20.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="288069988" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2336" name="__module.encoder.layer.20.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2337" name="__module.encoder.layer.20.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2338" name="__module.encoder.layer.20.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1598,x.247">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2339" name="Constant_6585195" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1599">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2340" name="__module.encoder.layer.20.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1600">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2341" name="Constant_7691217" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="288069992" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2342" name="Convert_7691218" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2343" name="Constant_7691219" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="289118568" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2344" name="__module.encoder.layer.20.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2345" name="__module.encoder.layer.20.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2346" name="Constant_6596249" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="289122664" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2347" name="__module.encoder.layer.20.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1603,x.249">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2348" name="__module.encoder.layer.20.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2349" name="__module.encoder.layer.20.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1607,x.251">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2350" name="Constant_6585218" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1608">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2351" name="__module.encoder.layer.20.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1609">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2352" name="__module.encoder.layer.20.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1610,attn_output.81">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2353" name="__module.encoder.layer.20.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2354" name="__module.encoder.layer.20.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1611,attn_output.83">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2355" name="Constant_6596513" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="2356" name="__module.encoder.layer.20.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1613">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2357" name="__module.encoder.layer.20.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="289126760" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2358" name="__module.encoder.layer.20.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2359" name="__module.encoder.layer.20.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="289130856" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2360" name="__module.encoder.layer.20.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="289130860" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2361" name="__module.encoder.layer.20.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="289130856" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2362" name="__module.encoder.layer.20.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="289130860" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2363" name="__module.encoder.layer.20.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2364" name="Constant_7691221" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="289130864" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2365" name="Convert_7691222" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2366" name="Constant_7691223" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="290179440" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2367" name="__module.encoder.layer.20.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2368" name="__module.encoder.layer.20.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2369" name="Constant_6596250" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="290183536" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2370" name="__module.encoder.layer.20.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1619,input.83">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2371" name="__module.encoder.layer.20.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1621">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2372" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2373" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2374" name="Constant_6596251" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="290187632" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2375" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2376" name="Constant_6596252" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="290191728" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2377" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1625,input_tensor.41">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2378" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="290195824" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2379" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2380" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="290199920" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2381" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="290199924" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2382" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="290199920" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2383" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="290199924" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2384" name="__module.encoder.layer.20.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2385" name="Constant_7691225" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="290199928" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2386" name="Convert_7691226" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2387" name="Constant_7691227" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="294394232" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2388" name="__module.encoder.layer.20.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2389" name="__module.encoder.layer.20.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2390" name="Constant_6596253" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="294410616" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2391" name="__module.encoder.layer.20.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1630">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2392" name="__module.encoder.layer.20.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1631">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2393" name="__module.encoder.layer.20.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="294427000" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2394" name="__module.encoder.layer.20.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2395" name="__module.encoder.layer.20.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="294443384" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2396" name="__module.encoder.layer.20.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="294443388" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2397" name="__module.encoder.layer.20.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="294443384" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2398" name="__module.encoder.layer.20.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="294443388" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2399" name="__module.encoder.layer.20.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2400" name="Constant_7691229" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="294443392" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2401" name="Convert_7691230" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2402" name="Constant_7691231" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="298637696" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2403" name="__module.encoder.layer.20.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2404" name="__module.encoder.layer.20.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2405" name="Constant_6596254" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="298641792" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2406" name="__module.encoder.layer.20.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1637,input.85">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2407" name="__module.encoder.layer.20.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1639">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2408" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2409" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2410" name="Constant_6596255" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="298645888" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2411" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2412" name="Constant_6596256" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="298649984" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2413" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1643,hidden_states.127">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2414" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="298654080" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2415" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2416" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="298658176" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2417" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="298658180" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2418" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="298658176" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2419" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="298658180" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2420" name="__module.encoder.layer.20.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2421" name="Constant_7691233" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="298658184" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2422" name="Convert_7691234" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2423" name="Constant_7691235" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="299706760" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2424" name="__module.encoder.layer.21.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2425" name="__module.encoder.layer.21.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2426" name="Constant_6596257" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="299710856" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2427" name="__module.encoder.layer.21.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1656,x.253">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2428" name="__module.encoder.layer.21.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="299714952" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2429" name="__module.encoder.layer.21.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="299714956" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2430" name="__module.encoder.layer.21.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="299714952" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2431" name="__module.encoder.layer.21.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="299714956" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2432" name="__module.encoder.layer.21.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2433" name="__module.encoder.layer.21.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2434" name="__module.encoder.layer.21.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1660,x.255">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2435" name="Constant_6585398" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1661">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2436" name="__module.encoder.layer.21.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1662">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2437" name="Constant_7691237" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="299714960" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2438" name="Convert_7691238" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2439" name="Constant_7691239" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="300763536" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2440" name="__module.encoder.layer.21.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2441" name="__module.encoder.layer.21.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2442" name="Constant_6596258" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="300767632" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2443" name="__module.encoder.layer.21.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1665,x.257">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2444" name="__module.encoder.layer.21.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="300771728" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2445" name="__module.encoder.layer.21.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="300771732" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2446" name="__module.encoder.layer.21.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="300771728" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2447" name="__module.encoder.layer.21.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="300771732" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2448" name="__module.encoder.layer.21.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2449" name="__module.encoder.layer.21.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2450" name="__module.encoder.layer.21.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1669,x.259">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2451" name="Constant_6585421" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1670">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2452" name="__module.encoder.layer.21.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1671">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2453" name="Constant_7691241" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="300771736" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2454" name="Convert_7691242" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2455" name="Constant_7691243" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="301820312" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2456" name="__module.encoder.layer.21.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2457" name="__module.encoder.layer.21.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2458" name="Constant_6596259" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="301824408" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2459" name="__module.encoder.layer.21.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1674,x.261">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2460" name="__module.encoder.layer.21.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2461" name="__module.encoder.layer.21.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1678,x.263">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2462" name="Constant_6585444" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1679">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2463" name="__module.encoder.layer.21.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1680">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2464" name="__module.encoder.layer.21.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1681,attn_output.85">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2465" name="__module.encoder.layer.21.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2466" name="__module.encoder.layer.21.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1682,attn_output.87">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2467" name="Constant_6596514" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="2468" name="__module.encoder.layer.21.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1684">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2469" name="__module.encoder.layer.21.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="301828504" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2470" name="__module.encoder.layer.21.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2471" name="__module.encoder.layer.21.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="301832600" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2472" name="__module.encoder.layer.21.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="301832604" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2473" name="__module.encoder.layer.21.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="301832600" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2474" name="__module.encoder.layer.21.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="301832604" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2475" name="__module.encoder.layer.21.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2476" name="Constant_7691245" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="301832608" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2477" name="Convert_7691246" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2478" name="Constant_7691247" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="302881184" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2479" name="__module.encoder.layer.21.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2480" name="__module.encoder.layer.21.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2481" name="Constant_6596260" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="302885280" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2482" name="__module.encoder.layer.21.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1690,input.87">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2483" name="__module.encoder.layer.21.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1692">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2484" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2485" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2486" name="Constant_6596261" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="302889376" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2487" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2488" name="Constant_6596262" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="302893472" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2489" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1696,input_tensor.43">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2490" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="302897568" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2491" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2492" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="302901664" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2493" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="302901668" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2494" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="302901664" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2495" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="302901668" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2496" name="__module.encoder.layer.21.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2497" name="Constant_7691249" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="302901672" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2498" name="Convert_7691250" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2499" name="Constant_7691251" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="307095976" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2500" name="__module.encoder.layer.21.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2501" name="__module.encoder.layer.21.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2502" name="Constant_6596263" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="307112360" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2503" name="__module.encoder.layer.21.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1701">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2504" name="__module.encoder.layer.21.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1702">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2505" name="__module.encoder.layer.21.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="307128744" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2506" name="__module.encoder.layer.21.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2507" name="__module.encoder.layer.21.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="307145128" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2508" name="__module.encoder.layer.21.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="307145132" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2509" name="__module.encoder.layer.21.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="307145128" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2510" name="__module.encoder.layer.21.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="307145132" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2511" name="__module.encoder.layer.21.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2512" name="Constant_7691253" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="307145136" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2513" name="Convert_7691254" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2514" name="Constant_7691255" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="311339440" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2515" name="__module.encoder.layer.21.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2516" name="__module.encoder.layer.21.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2517" name="Constant_6596264" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="311343536" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2518" name="__module.encoder.layer.21.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1708,input.89">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2519" name="__module.encoder.layer.21.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1710">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2520" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2521" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2522" name="Constant_6596265" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="311347632" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2523" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2524" name="Constant_6596266" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="311351728" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2525" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1714,hidden_states.133">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2526" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="311355824" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2527" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2528" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="311359920" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2529" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="311359924" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2530" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="311359920" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2531" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="311359924" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2532" name="__module.encoder.layer.21.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2533" name="Constant_7691257" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="311359928" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2534" name="Convert_7691258" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2535" name="Constant_7691259" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="312408504" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2536" name="__module.encoder.layer.22.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2537" name="__module.encoder.layer.22.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2538" name="Constant_6596267" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="312412600" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2539" name="__module.encoder.layer.22.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1727,x.265">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2540" name="__module.encoder.layer.22.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="312416696" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2541" name="__module.encoder.layer.22.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="312416700" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2542" name="__module.encoder.layer.22.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="312416696" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2543" name="__module.encoder.layer.22.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="312416700" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2544" name="__module.encoder.layer.22.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2545" name="__module.encoder.layer.22.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2546" name="__module.encoder.layer.22.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1731,x.267">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2547" name="Constant_6585624" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1732">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2548" name="__module.encoder.layer.22.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1733">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2549" name="Constant_7691261" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="312416704" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2550" name="Convert_7691262" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2551" name="Constant_7691263" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="313465280" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2552" name="__module.encoder.layer.22.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2553" name="__module.encoder.layer.22.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2554" name="Constant_6596268" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="313469376" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2555" name="__module.encoder.layer.22.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1736,x.269">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2556" name="__module.encoder.layer.22.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="313473472" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2557" name="__module.encoder.layer.22.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="313473476" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2558" name="__module.encoder.layer.22.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="313473472" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2559" name="__module.encoder.layer.22.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="313473476" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2560" name="__module.encoder.layer.22.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2561" name="__module.encoder.layer.22.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2562" name="__module.encoder.layer.22.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1740,x.271">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2563" name="Constant_6585647" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1741">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2564" name="__module.encoder.layer.22.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1742">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2565" name="Constant_7691265" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="313473480" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2566" name="Convert_7691266" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2567" name="Constant_7691267" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="314522056" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2568" name="__module.encoder.layer.22.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2569" name="__module.encoder.layer.22.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2570" name="Constant_6596269" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="314526152" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2571" name="__module.encoder.layer.22.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1745,x.273">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2572" name="__module.encoder.layer.22.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2573" name="__module.encoder.layer.22.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1749,x.275">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2574" name="Constant_6585670" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1750">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2575" name="__module.encoder.layer.22.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1751">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2576" name="__module.encoder.layer.22.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1752,attn_output.89">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2577" name="__module.encoder.layer.22.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2578" name="__module.encoder.layer.22.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1753,attn_output.91">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2579" name="Constant_6596515" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="2580" name="__module.encoder.layer.22.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1755">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2581" name="__module.encoder.layer.22.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="314530248" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2582" name="__module.encoder.layer.22.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2583" name="__module.encoder.layer.22.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="314534344" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2584" name="__module.encoder.layer.22.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="314534348" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2585" name="__module.encoder.layer.22.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="314534344" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2586" name="__module.encoder.layer.22.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="314534348" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2587" name="__module.encoder.layer.22.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2588" name="Constant_7691269" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="314534352" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2589" name="Convert_7691270" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2590" name="Constant_7691271" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="315582928" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2591" name="__module.encoder.layer.22.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2592" name="__module.encoder.layer.22.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2593" name="Constant_6596270" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="315587024" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2594" name="__module.encoder.layer.22.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1761,input.91">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2595" name="__module.encoder.layer.22.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1763">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2596" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2597" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2598" name="Constant_6596271" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="315591120" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2599" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2600" name="Constant_6596272" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="315595216" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2601" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1767,input_tensor.45">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2602" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="315599312" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2603" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2604" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="315603408" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2605" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="315603412" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2606" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="315603408" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2607" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="315603412" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2608" name="__module.encoder.layer.22.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2609" name="Constant_7691273" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="315603416" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2610" name="Convert_7691274" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2611" name="Constant_7691275" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="319797720" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2612" name="__module.encoder.layer.22.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2613" name="__module.encoder.layer.22.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2614" name="Constant_6596273" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="319814104" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2615" name="__module.encoder.layer.22.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1772">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2616" name="__module.encoder.layer.22.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1773">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2617" name="__module.encoder.layer.22.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="319830488" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2618" name="__module.encoder.layer.22.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2619" name="__module.encoder.layer.22.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="319846872" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2620" name="__module.encoder.layer.22.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="319846876" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2621" name="__module.encoder.layer.22.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="319846872" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2622" name="__module.encoder.layer.22.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="319846876" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2623" name="__module.encoder.layer.22.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2624" name="Constant_7691277" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="319846880" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2625" name="Convert_7691278" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2626" name="Constant_7691279" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="324041184" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2627" name="__module.encoder.layer.22.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2628" name="__module.encoder.layer.22.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2629" name="Constant_6596274" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="324045280" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2630" name="__module.encoder.layer.22.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1779,input.93">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2631" name="__module.encoder.layer.22.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1781">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2632" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2633" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2634" name="Constant_6596275" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="324049376" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2635" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2636" name="Constant_6596276" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="324053472" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2637" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1785,hidden_states.139">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2638" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="324057568" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2639" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2640" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="324061664" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2641" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="324061668" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2642" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="324061664" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2643" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="324061668" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2644" name="__module.encoder.layer.22.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2645" name="Constant_7691281" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="324061672" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2646" name="Convert_7691282" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2647" name="Constant_7691283" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="325110248" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2648" name="__module.encoder.layer.23.attention.self.query/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2649" name="__module.encoder.layer.23.attention.self.query/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2650" name="Constant_6596277" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="325114344" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2651" name="__module.encoder.layer.23.attention.self.query/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1798,x.277">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2652" name="__module.encoder.layer.23.attention.self.query/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="325118440" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2653" name="__module.encoder.layer.23.attention.self.query/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="325118444" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2654" name="__module.encoder.layer.23.attention.self.query/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="325118440" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2655" name="__module.encoder.layer.23.attention.self.query/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="325118444" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2656" name="__module.encoder.layer.23.attention.self.query/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2657" name="__module.encoder.layer.23.attention.self/prim::ListConstruct/Concat" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2658" name="__module.encoder.layer.23.attention.self/aten::view/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1802,x.279">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2659" name="Constant_6585850" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1803">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2660" name="__module.encoder.layer.23.attention.self/aten::permute/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1804">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2661" name="Constant_7691285" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="325118448" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2662" name="Convert_7691286" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2663" name="Constant_7691287" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="326167024" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2664" name="__module.encoder.layer.23.attention.self.key/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2665" name="__module.encoder.layer.23.attention.self.key/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2666" name="Constant_6596278" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="326171120" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2667" name="__module.encoder.layer.23.attention.self.key/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1807,x.281">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2668" name="__module.encoder.layer.23.attention.self.key/aten::linear/Add/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="326175216" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2669" name="__module.encoder.layer.23.attention.self.key/aten::linear/Add/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="326175220" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2670" name="__module.encoder.layer.23.attention.self.key/aten::linear/Add/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="326175216" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2671" name="__module.encoder.layer.23.attention.self.key/aten::linear/Add/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="326175220" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2672" name="__module.encoder.layer.23.attention.self.key/aten::linear/Add/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2673" name="__module.encoder.layer.23.attention.self/prim::ListConstruct/Concat_1" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2674" name="__module.encoder.layer.23.attention.self/aten::view/Reshape_1" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1811,x.283">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2675" name="Constant_6585873" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1812">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2676" name="__module.encoder.layer.23.attention.self/aten::permute/Transpose_1" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1813">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2677" name="Constant_7691289" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="326175224" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2678" name="Convert_7691290" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2679" name="Constant_7691291" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="327223800" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2680" name="__module.encoder.layer.23.attention.self.value/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2681" name="__module.encoder.layer.23.attention.self.value/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2682" name="Constant_6596279" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="327227896" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2683" name="__module.encoder.layer.23.attention.self.value/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1816,x.285">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2684" name="__module.encoder.layer.23.attention.self/prim::ListConstruct/Concat_2" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978200" size="32" />
<output>
<port id="0" precision="I64">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2685" name="__module.encoder.layer.23.attention.self/aten::view/Reshape_2" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1820,x">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2686" name="Constant_6585896" type="Const" version="opset1">
<data element_type="i64" shape="4" offset="32978232" size="32" />
<output>
<port id="0" precision="I64" names="1821">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2687" name="__module.encoder.layer.23.attention.self/aten::permute/Transpose_2" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1822">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2688" name="__module.encoder.layer.23.attention.self/aten::scaled_dot_product_attention/ScaledDotProductAttention" type="ScaledDotProductAttention" version="opset13">
<data causal="false" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="3" precision="FP32">
<dim>-1</dim>
<dim>1</dim>
<dim>-1</dim>
<dim>-1</dim>
</port>
</input>
<output>
<port id="4" precision="FP32" names="1823,attn_output.93">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2689" name="__module.encoder.layer.23.attention.self/aten::transpose/ScatterElementsUpdate" type="Const" version="opset1">
<data element_type="i32" shape="4" offset="35091840" size="16" />
<output>
<port id="0" precision="I32">
<dim>4</dim>
</port>
</output>
</layer>
<layer id="2690" name="__module.encoder.layer.23.attention.self/aten::transpose/Transpose" type="Transpose" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>16</dim>
<dim>-1</dim>
<dim>64</dim>
</port>
<port id="1" precision="I32">
<dim>4</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1824,attn_output">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
</output>
</layer>
<layer id="2691" name="Constant_6596516" type="Const" version="opset1">
<data element_type="i64" shape="3" offset="35091856" size="24" />
<output>
<port id="0" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="2692" name="__module.encoder.layer.23.attention.self/aten::reshape/Reshape" type="Reshape" version="opset1">
<data special_zero="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>16</dim>
<dim>64</dim>
</port>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1826">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2693" name="__module.encoder.layer.23.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="327231992" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2694" name="__module.encoder.layer.23.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2695" name="__module.encoder.layer.23.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="327236088" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2696" name="__module.encoder.layer.23.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="327236092" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2697" name="__module.encoder.layer.23.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="327236088" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2698" name="__module.encoder.layer.23.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="327236092" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2699" name="__module.encoder.layer.23.attention.self/aten::reshape/Reshape_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2700" name="Constant_7691293" type="Const" version="opset1">
<data element_type="i8" shape="1024, 1024" offset="327236096" size="1048576" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2701" name="Convert_7691294" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2702" name="Constant_7691295" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="328284672" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2703" name="__module.encoder.layer.23.attention.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2704" name="__module.encoder.layer.23.attention.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2705" name="Constant_6596280" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="328288768" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2706" name="__module.encoder.layer.23.attention.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1832,input.95">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2707" name="__module.encoder.layer.23.attention.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1834">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2708" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2709" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2710" name="Constant_6596281" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="328292864" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2711" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2712" name="Constant_6596282" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="328296960" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2713" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1838,input_tensor">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2714" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="328301056" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2715" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2716" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="328305152" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2717" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="328305156" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2718" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="328305152" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2719" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="328305156" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2720" name="__module.encoder.layer.23.attention.output.LayerNorm/aten::layer_norm/Add_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2721" name="Constant_7691297" type="Const" version="opset1">
<data element_type="i8" shape="4096, 1024" offset="328305160" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2722" name="Convert_7691298" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2723" name="Constant_7691299" type="Const" version="opset1">
<data element_type="f32" shape="4096, 1" offset="332499464" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2724" name="__module.encoder.layer.23.intermediate.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2725" name="__module.encoder.layer.23.intermediate.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>4096</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2726" name="Constant_6596283" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="332515848" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2727" name="__module.encoder.layer.23.intermediate.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1843">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2728" name="__module.encoder.layer.23.intermediate.intermediate_act_fn/aten::gelu/Gelu" type="Gelu" version="opset7">
<data approximation_mode="ERF" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32" names="1844">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2729" name="__module.encoder.layer.23.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/scale" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 4096" offset="332532232" size="16384" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2730" name="__module.encoder.layer.23.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2731" name="__module.encoder.layer.23.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="332548616" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2732" name="__module.encoder.layer.23.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/input_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="332548620" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2733" name="__module.encoder.layer.23.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_low" type="Const" version="opset1">
<data element_type="f32" shape="" offset="332548616" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2734" name="__module.encoder.layer.23.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0/output_high" type="Const" version="opset1">
<data element_type="f32" shape="" offset="332548620" size="4" />
<output>
<port id="0" precision="FP32" />
</output>
</layer>
<layer id="2735" name="__module.encoder.layer.23.intermediate.intermediate_act_fn/aten::gelu/Gelu_0_0/nncf_smooth_quant/fq_output_0" type="FakeQuantize" version="opset1">
<data levels="256" auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32" />
<port id="2" precision="FP32" />
<port id="3" precision="FP32" />
<port id="4" precision="FP32" />
</input>
<output>
<port id="5" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2736" name="Constant_7691301" type="Const" version="opset1">
<data element_type="i8" shape="1024, 4096" offset="332548624" size="4194304" />
<output>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2737" name="Convert_7691302" type="Convert" version="opset1">
<data destination_type="f32" />
<input>
<port id="0" precision="I8">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2738" name="Constant_7691303" type="Const" version="opset1">
<data element_type="f32" shape="1024, 1" offset="336742928" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2739" name="__module.encoder.layer.23.output.dense/aten::linear/MatMul/fq_weights_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</output>
</layer>
<layer id="2740" name="__module.encoder.layer.23.output.dense/aten::linear/MatMul" type="MatMul" version="opset1">
<data transpose_a="false" transpose_b="true" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>4096</dim>
</port>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>4096</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2741" name="Constant_6596284" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="336747024" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2742" name="__module.encoder.layer.23.output.dense/aten::linear/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1850,input">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2743" name="__module.encoder.layer.23.output/aten::add/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="1852">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2744" name="__module.encoder.layer.23.output.LayerNorm/aten::layer_norm/Multiply" type="Const" version="opset1">
<data element_type="i32" shape="1" offset="31909124" size="4" />
<output>
<port id="0" precision="I32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2745" name="__module.encoder.layer.23.output.LayerNorm/aten::layer_norm/MVN" type="MVN" version="opset6">
<data eps="9.999999960041972e-13" normalize_variance="true" eps_mode="INSIDE_SQRT" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="I32">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2746" name="Constant_6596285" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="336751120" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2747" name="__module.encoder.layer.23.output.LayerNorm/aten::layer_norm/Multiply_1" type="Multiply" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2748" name="Constant_6596286" type="Const" version="opset1">
<data element_type="f32" shape="1, 1, 1024" offset="336755216" size="4096" />
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2749" name="__module.encoder.layer.23.output.LayerNorm/aten::layer_norm/Add" type="Add" version="opset1">
<data auto_broadcast="numpy" />
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1024</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="last_hidden_state">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="2750" name="Result_6589048" type="Result" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>-1</dim>
<dim>-1</dim>
<dim>1024</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="14" to-port="0" />
<edge from-layer="1" from-port="0" to-layer="93" to-port="0" />
<edge from-layer="2" from-port="0" to-layer="7" to-port="0" />
<edge from-layer="2" from-port="0" to-layer="24" to-port="0" />
<edge from-layer="3" from-port="0" to-layer="4" to-port="0" />
<edge from-layer="4" from-port="1" to-layer="6" to-port="0" />
<edge from-layer="5" from-port="0" to-layer="6" to-port="1" />
<edge from-layer="6" from-port="2" to-layer="9" to-port="0" />
<edge from-layer="7" from-port="1" to-layer="9" to-port="1" />
<edge from-layer="8" from-port="0" to-layer="9" to-port="2" />
<edge from-layer="9" from-port="3" to-layer="17" to-port="0" />
<edge from-layer="10" from-port="0" to-layer="11" to-port="0" />
<edge from-layer="11" from-port="1" to-layer="13" to-port="0" />
<edge from-layer="12" from-port="0" to-layer="13" to-port="1" />
<edge from-layer="13" from-port="2" to-layer="16" to-port="0" />
<edge from-layer="14" from-port="1" to-layer="16" to-port="1" />
<edge from-layer="15" from-port="0" to-layer="16" to-port="2" />
<edge from-layer="16" from-port="3" to-layer="17" to-port="1" />
<edge from-layer="17" from-port="2" to-layer="34" to-port="0" />
<edge from-layer="18" from-port="0" to-layer="19" to-port="0" />
<edge from-layer="19" from-port="1" to-layer="21" to-port="0" />
<edge from-layer="20" from-port="0" to-layer="21" to-port="1" />
<edge from-layer="21" from-port="2" to-layer="33" to-port="0" />
<edge from-layer="22" from-port="0" to-layer="30" to-port="0" />
<edge from-layer="23" from-port="0" to-layer="30" to-port="1" />
<edge from-layer="24" from-port="1" to-layer="102" to-port="0" />
<edge from-layer="24" from-port="1" to-layer="98" to-port="0" />
<edge from-layer="24" from-port="1" to-layer="27" to-port="0" />
<edge from-layer="25" from-port="0" to-layer="27" to-port="1" />
<edge from-layer="26" from-port="0" to-layer="27" to-port="2" />
<edge from-layer="27" from-port="3" to-layer="30" to-port="2" />
<edge from-layer="28" from-port="0" to-layer="30" to-port="3" />
<edge from-layer="29" from-port="0" to-layer="30" to-port="4" />
<edge from-layer="30" from-port="5" to-layer="31" to-port="0" />
<edge from-layer="31" from-port="1" to-layer="33" to-port="1" />
<edge from-layer="32" from-port="0" to-layer="33" to-port="2" />
<edge from-layer="33" from-port="3" to-layer="34" to-port="1" />
<edge from-layer="34" from-port="2" to-layer="36" to-port="0" />
<edge from-layer="35" from-port="0" to-layer="36" to-port="1" />
<edge from-layer="36" from-port="2" to-layer="38" to-port="0" />
<edge from-layer="37" from-port="0" to-layer="38" to-port="1" />
<edge from-layer="38" from-port="2" to-layer="40" to-port="0" />
<edge from-layer="39" from-port="0" to-layer="40" to-port="1" />
<edge from-layer="40" from-port="2" to-layer="42" to-port="0" />
<edge from-layer="40" from-port="2" to-layer="131" to-port="1" />
<edge from-layer="41" from-port="0" to-layer="42" to-port="1" />
<edge from-layer="42" from-port="2" to-layer="47" to-port="0" />
<edge from-layer="43" from-port="0" to-layer="47" to-port="1" />
<edge from-layer="44" from-port="0" to-layer="47" to-port="2" />
<edge from-layer="45" from-port="0" to-layer="47" to-port="3" />
<edge from-layer="46" from-port="0" to-layer="47" to-port="4" />
<edge from-layer="47" from-port="5" to-layer="84" to-port="0" />
<edge from-layer="47" from-port="5" to-layer="68" to-port="0" />
<edge from-layer="47" from-port="5" to-layer="52" to-port="0" />
<edge from-layer="48" from-port="0" to-layer="49" to-port="0" />
<edge from-layer="49" from-port="1" to-layer="51" to-port="0" />
<edge from-layer="50" from-port="0" to-layer="51" to-port="1" />
<edge from-layer="51" from-port="2" to-layer="52" to-port="1" />
<edge from-layer="52" from-port="2" to-layer="54" to-port="0" />
<edge from-layer="53" from-port="0" to-layer="54" to-port="1" />
<edge from-layer="54" from-port="2" to-layer="59" to-port="0" />
<edge from-layer="55" from-port="0" to-layer="59" to-port="1" />
<edge from-layer="56" from-port="0" to-layer="59" to-port="2" />
<edge from-layer="57" from-port="0" to-layer="59" to-port="3" />
<edge from-layer="58" from-port="0" to-layer="59" to-port="4" />
<edge from-layer="59" from-port="5" to-layer="61" to-port="0" />
<edge from-layer="60" from-port="0" to-layer="61" to-port="1" />
<edge from-layer="61" from-port="2" to-layer="63" to-port="0" />
<edge from-layer="62" from-port="0" to-layer="63" to-port="1" />
<edge from-layer="63" from-port="2" to-layer="112" to-port="0" />
<edge from-layer="64" from-port="0" to-layer="65" to-port="0" />
<edge from-layer="65" from-port="1" to-layer="67" to-port="0" />
<edge from-layer="66" from-port="0" to-layer="67" to-port="1" />
<edge from-layer="67" from-port="2" to-layer="68" to-port="1" />
<edge from-layer="68" from-port="2" to-layer="70" to-port="0" />
<edge from-layer="69" from-port="0" to-layer="70" to-port="1" />
<edge from-layer="70" from-port="2" to-layer="75" to-port="0" />
<edge from-layer="71" from-port="0" to-layer="75" to-port="1" />
<edge from-layer="72" from-port="0" to-layer="75" to-port="2" />
<edge from-layer="73" from-port="0" to-layer="75" to-port="3" />
<edge from-layer="74" from-port="0" to-layer="75" to-port="4" />
<edge from-layer="75" from-port="5" to-layer="77" to-port="0" />
<edge from-layer="76" from-port="0" to-layer="77" to-port="1" />
<edge from-layer="77" from-port="2" to-layer="79" to-port="0" />
<edge from-layer="78" from-port="0" to-layer="79" to-port="1" />
<edge from-layer="79" from-port="2" to-layer="112" to-port="1" />
<edge from-layer="80" from-port="0" to-layer="81" to-port="0" />
<edge from-layer="81" from-port="1" to-layer="83" to-port="0" />
<edge from-layer="82" from-port="0" to-layer="83" to-port="1" />
<edge from-layer="83" from-port="2" to-layer="84" to-port="1" />
<edge from-layer="84" from-port="2" to-layer="86" to-port="0" />
<edge from-layer="85" from-port="0" to-layer="86" to-port="1" />
<edge from-layer="86" from-port="2" to-layer="88" to-port="0" />
<edge from-layer="87" from-port="0" to-layer="88" to-port="1" />
<edge from-layer="88" from-port="2" to-layer="90" to-port="0" />
<edge from-layer="89" from-port="0" to-layer="90" to-port="1" />
<edge from-layer="90" from-port="2" to-layer="112" to-port="2" />
<edge from-layer="91" from-port="0" to-layer="108" to-port="0" />
<edge from-layer="92" from-port="0" to-layer="93" to-port="1" />
<edge from-layer="93" from-port="2" to-layer="95" to-port="0" />
<edge from-layer="94" from-port="0" to-layer="95" to-port="1" />
<edge from-layer="95" from-port="2" to-layer="104" to-port="0" />
<edge from-layer="96" from-port="0" to-layer="98" to-port="1" />
<edge from-layer="97" from-port="0" to-layer="98" to-port="2" />
<edge from-layer="98" from-port="3" to-layer="103" to-port="0" />
<edge from-layer="99" from-port="0" to-layer="103" to-port="1" />
<edge from-layer="100" from-port="0" to-layer="102" to-port="1" />
<edge from-layer="101" from-port="0" to-layer="102" to-port="2" />
<edge from-layer="102" from-port="3" to-layer="103" to-port="2" />
<edge from-layer="103" from-port="3" to-layer="104" to-port="1" />
<edge from-layer="104" from-port="2" to-layer="105" to-port="0" />
<edge from-layer="105" from-port="1" to-layer="107" to-port="0" />
<edge from-layer="106" from-port="0" to-layer="107" to-port="1" />
<edge from-layer="107" from-port="2" to-layer="108" to-port="1" />
<edge from-layer="108" from-port="2" to-layer="111" to-port="2" />
<edge from-layer="108" from-port="2" to-layer="109" to-port="0" />
<edge from-layer="109" from-port="1" to-layer="111" to-port="0" />
<edge from-layer="110" from-port="0" to-layer="111" to-port="1" />
<edge from-layer="111" from-port="3" to-layer="224" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="336" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="448" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="784" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="560" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="672" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="1456" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="1568" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="1680" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="1792" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="1904" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="2016" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="2128" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="2240" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="2352" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="2464" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="2576" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="2688" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="1344" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="1232" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="1120" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="1008" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="896" to-port="3" />
<edge from-layer="111" from-port="3" to-layer="112" to-port="3" />
<edge from-layer="112" from-port="4" to-layer="114" to-port="0" />
<edge from-layer="113" from-port="0" to-layer="114" to-port="1" />
<edge from-layer="114" from-port="2" to-layer="116" to-port="0" />
<edge from-layer="115" from-port="0" to-layer="116" to-port="1" />
<edge from-layer="116" from-port="2" to-layer="118" to-port="0" />
<edge from-layer="117" from-port="0" to-layer="118" to-port="1" />
<edge from-layer="118" from-port="2" to-layer="123" to-port="0" />
<edge from-layer="119" from-port="0" to-layer="123" to-port="1" />
<edge from-layer="120" from-port="0" to-layer="123" to-port="2" />
<edge from-layer="121" from-port="0" to-layer="123" to-port="3" />
<edge from-layer="122" from-port="0" to-layer="123" to-port="4" />
<edge from-layer="123" from-port="5" to-layer="128" to-port="0" />
<edge from-layer="124" from-port="0" to-layer="125" to-port="0" />
<edge from-layer="125" from-port="1" to-layer="127" to-port="0" />
<edge from-layer="126" from-port="0" to-layer="127" to-port="1" />
<edge from-layer="127" from-port="2" to-layer="128" to-port="1" />
<edge from-layer="128" from-port="2" to-layer="130" to-port="0" />
<edge from-layer="129" from-port="0" to-layer="130" to-port="1" />
<edge from-layer="130" from-port="2" to-layer="131" to-port="0" />
<edge from-layer="131" from-port="2" to-layer="133" to-port="0" />
<edge from-layer="132" from-port="0" to-layer="133" to-port="1" />
<edge from-layer="133" from-port="2" to-layer="135" to-port="0" />
<edge from-layer="134" from-port="0" to-layer="135" to-port="1" />
<edge from-layer="135" from-port="2" to-layer="137" to-port="0" />
<edge from-layer="136" from-port="0" to-layer="137" to-port="1" />
<edge from-layer="137" from-port="2" to-layer="167" to-port="1" />
<edge from-layer="137" from-port="2" to-layer="139" to-port="0" />
<edge from-layer="138" from-port="0" to-layer="139" to-port="1" />
<edge from-layer="139" from-port="2" to-layer="144" to-port="0" />
<edge from-layer="140" from-port="0" to-layer="144" to-port="1" />
<edge from-layer="141" from-port="0" to-layer="144" to-port="2" />
<edge from-layer="142" from-port="0" to-layer="144" to-port="3" />
<edge from-layer="143" from-port="0" to-layer="144" to-port="4" />
<edge from-layer="144" from-port="5" to-layer="149" to-port="0" />
<edge from-layer="145" from-port="0" to-layer="146" to-port="0" />
<edge from-layer="146" from-port="1" to-layer="148" to-port="0" />
<edge from-layer="147" from-port="0" to-layer="148" to-port="1" />
<edge from-layer="148" from-port="2" to-layer="149" to-port="1" />
<edge from-layer="149" from-port="2" to-layer="151" to-port="0" />
<edge from-layer="150" from-port="0" to-layer="151" to-port="1" />
<edge from-layer="151" from-port="2" to-layer="152" to-port="0" />
<edge from-layer="152" from-port="1" to-layer="154" to-port="0" />
<edge from-layer="153" from-port="0" to-layer="154" to-port="1" />
<edge from-layer="154" from-port="2" to-layer="159" to-port="0" />
<edge from-layer="155" from-port="0" to-layer="159" to-port="1" />
<edge from-layer="156" from-port="0" to-layer="159" to-port="2" />
<edge from-layer="157" from-port="0" to-layer="159" to-port="3" />
<edge from-layer="158" from-port="0" to-layer="159" to-port="4" />
<edge from-layer="159" from-port="5" to-layer="164" to-port="0" />
<edge from-layer="160" from-port="0" to-layer="161" to-port="0" />
<edge from-layer="161" from-port="1" to-layer="163" to-port="0" />
<edge from-layer="162" from-port="0" to-layer="163" to-port="1" />
<edge from-layer="163" from-port="2" to-layer="164" to-port="1" />
<edge from-layer="164" from-port="2" to-layer="166" to-port="0" />
<edge from-layer="165" from-port="0" to-layer="166" to-port="1" />
<edge from-layer="166" from-port="2" to-layer="167" to-port="0" />
<edge from-layer="167" from-port="2" to-layer="169" to-port="0" />
<edge from-layer="168" from-port="0" to-layer="169" to-port="1" />
<edge from-layer="169" from-port="2" to-layer="171" to-port="0" />
<edge from-layer="170" from-port="0" to-layer="171" to-port="1" />
<edge from-layer="171" from-port="2" to-layer="173" to-port="0" />
<edge from-layer="172" from-port="0" to-layer="173" to-port="1" />
<edge from-layer="173" from-port="2" to-layer="243" to-port="1" />
<edge from-layer="173" from-port="2" to-layer="175" to-port="0" />
<edge from-layer="174" from-port="0" to-layer="175" to-port="1" />
<edge from-layer="175" from-port="2" to-layer="180" to-port="0" />
<edge from-layer="176" from-port="0" to-layer="180" to-port="1" />
<edge from-layer="177" from-port="0" to-layer="180" to-port="2" />
<edge from-layer="178" from-port="0" to-layer="180" to-port="3" />
<edge from-layer="179" from-port="0" to-layer="180" to-port="4" />
<edge from-layer="180" from-port="5" to-layer="185" to-port="0" />
<edge from-layer="180" from-port="5" to-layer="217" to-port="0" />
<edge from-layer="180" from-port="5" to-layer="201" to-port="0" />
<edge from-layer="181" from-port="0" to-layer="182" to-port="0" />
<edge from-layer="182" from-port="1" to-layer="184" to-port="0" />
<edge from-layer="183" from-port="0" to-layer="184" to-port="1" />
<edge from-layer="184" from-port="2" to-layer="185" to-port="1" />
<edge from-layer="185" from-port="2" to-layer="187" to-port="0" />
<edge from-layer="186" from-port="0" to-layer="187" to-port="1" />
<edge from-layer="187" from-port="2" to-layer="192" to-port="0" />
<edge from-layer="188" from-port="0" to-layer="192" to-port="1" />
<edge from-layer="189" from-port="0" to-layer="192" to-port="2" />
<edge from-layer="190" from-port="0" to-layer="192" to-port="3" />
<edge from-layer="191" from-port="0" to-layer="192" to-port="4" />
<edge from-layer="192" from-port="5" to-layer="194" to-port="0" />
<edge from-layer="193" from-port="0" to-layer="194" to-port="1" />
<edge from-layer="194" from-port="2" to-layer="196" to-port="0" />
<edge from-layer="195" from-port="0" to-layer="196" to-port="1" />
<edge from-layer="196" from-port="2" to-layer="224" to-port="0" />
<edge from-layer="197" from-port="0" to-layer="198" to-port="0" />
<edge from-layer="198" from-port="1" to-layer="200" to-port="0" />
<edge from-layer="199" from-port="0" to-layer="200" to-port="1" />
<edge from-layer="200" from-port="2" to-layer="201" to-port="1" />
<edge from-layer="201" from-port="2" to-layer="203" to-port="0" />
<edge from-layer="202" from-port="0" to-layer="203" to-port="1" />
<edge from-layer="203" from-port="2" to-layer="208" to-port="0" />
<edge from-layer="204" from-port="0" to-layer="208" to-port="1" />
<edge from-layer="205" from-port="0" to-layer="208" to-port="2" />
<edge from-layer="206" from-port="0" to-layer="208" to-port="3" />
<edge from-layer="207" from-port="0" to-layer="208" to-port="4" />
<edge from-layer="208" from-port="5" to-layer="210" to-port="0" />
<edge from-layer="209" from-port="0" to-layer="210" to-port="1" />
<edge from-layer="210" from-port="2" to-layer="212" to-port="0" />
<edge from-layer="211" from-port="0" to-layer="212" to-port="1" />
<edge from-layer="212" from-port="2" to-layer="224" to-port="1" />
<edge from-layer="213" from-port="0" to-layer="214" to-port="0" />
<edge from-layer="214" from-port="1" to-layer="216" to-port="0" />
<edge from-layer="215" from-port="0" to-layer="216" to-port="1" />
<edge from-layer="216" from-port="2" to-layer="217" to-port="1" />
<edge from-layer="217" from-port="2" to-layer="219" to-port="0" />
<edge from-layer="218" from-port="0" to-layer="219" to-port="1" />
<edge from-layer="219" from-port="2" to-layer="221" to-port="0" />
<edge from-layer="220" from-port="0" to-layer="221" to-port="1" />
<edge from-layer="221" from-port="2" to-layer="223" to-port="0" />
<edge from-layer="222" from-port="0" to-layer="223" to-port="1" />
<edge from-layer="223" from-port="2" to-layer="224" to-port="2" />
<edge from-layer="224" from-port="4" to-layer="226" to-port="0" />
<edge from-layer="225" from-port="0" to-layer="226" to-port="1" />
<edge from-layer="226" from-port="2" to-layer="228" to-port="0" />
<edge from-layer="227" from-port="0" to-layer="228" to-port="1" />
<edge from-layer="228" from-port="2" to-layer="230" to-port="0" />
<edge from-layer="229" from-port="0" to-layer="230" to-port="1" />
<edge from-layer="230" from-port="2" to-layer="235" to-port="0" />
<edge from-layer="231" from-port="0" to-layer="235" to-port="1" />
<edge from-layer="232" from-port="0" to-layer="235" to-port="2" />
<edge from-layer="233" from-port="0" to-layer="235" to-port="3" />
<edge from-layer="234" from-port="0" to-layer="235" to-port="4" />
<edge from-layer="235" from-port="5" to-layer="240" to-port="0" />
<edge from-layer="236" from-port="0" to-layer="237" to-port="0" />
<edge from-layer="237" from-port="1" to-layer="239" to-port="0" />
<edge from-layer="238" from-port="0" to-layer="239" to-port="1" />
<edge from-layer="239" from-port="2" to-layer="240" to-port="1" />
<edge from-layer="240" from-port="2" to-layer="242" to-port="0" />
<edge from-layer="241" from-port="0" to-layer="242" to-port="1" />
<edge from-layer="242" from-port="2" to-layer="243" to-port="0" />
<edge from-layer="243" from-port="2" to-layer="245" to-port="0" />
<edge from-layer="244" from-port="0" to-layer="245" to-port="1" />
<edge from-layer="245" from-port="2" to-layer="247" to-port="0" />
<edge from-layer="246" from-port="0" to-layer="247" to-port="1" />
<edge from-layer="247" from-port="2" to-layer="249" to-port="0" />
<edge from-layer="248" from-port="0" to-layer="249" to-port="1" />
<edge from-layer="249" from-port="2" to-layer="279" to-port="1" />
<edge from-layer="249" from-port="2" to-layer="251" to-port="0" />
<edge from-layer="250" from-port="0" to-layer="251" to-port="1" />
<edge from-layer="251" from-port="2" to-layer="256" to-port="0" />
<edge from-layer="252" from-port="0" to-layer="256" to-port="1" />
<edge from-layer="253" from-port="0" to-layer="256" to-port="2" />
<edge from-layer="254" from-port="0" to-layer="256" to-port="3" />
<edge from-layer="255" from-port="0" to-layer="256" to-port="4" />
<edge from-layer="256" from-port="5" to-layer="261" to-port="0" />
<edge from-layer="257" from-port="0" to-layer="258" to-port="0" />
<edge from-layer="258" from-port="1" to-layer="260" to-port="0" />
<edge from-layer="259" from-port="0" to-layer="260" to-port="1" />
<edge from-layer="260" from-port="2" to-layer="261" to-port="1" />
<edge from-layer="261" from-port="2" to-layer="263" to-port="0" />
<edge from-layer="262" from-port="0" to-layer="263" to-port="1" />
<edge from-layer="263" from-port="2" to-layer="264" to-port="0" />
<edge from-layer="264" from-port="1" to-layer="266" to-port="0" />
<edge from-layer="265" from-port="0" to-layer="266" to-port="1" />
<edge from-layer="266" from-port="2" to-layer="271" to-port="0" />
<edge from-layer="267" from-port="0" to-layer="271" to-port="1" />
<edge from-layer="268" from-port="0" to-layer="271" to-port="2" />
<edge from-layer="269" from-port="0" to-layer="271" to-port="3" />
<edge from-layer="270" from-port="0" to-layer="271" to-port="4" />
<edge from-layer="271" from-port="5" to-layer="276" to-port="0" />
<edge from-layer="272" from-port="0" to-layer="273" to-port="0" />
<edge from-layer="273" from-port="1" to-layer="275" to-port="0" />
<edge from-layer="274" from-port="0" to-layer="275" to-port="1" />
<edge from-layer="275" from-port="2" to-layer="276" to-port="1" />
<edge from-layer="276" from-port="2" to-layer="278" to-port="0" />
<edge from-layer="277" from-port="0" to-layer="278" to-port="1" />
<edge from-layer="278" from-port="2" to-layer="279" to-port="0" />
<edge from-layer="279" from-port="2" to-layer="281" to-port="0" />
<edge from-layer="280" from-port="0" to-layer="281" to-port="1" />
<edge from-layer="281" from-port="2" to-layer="283" to-port="0" />
<edge from-layer="282" from-port="0" to-layer="283" to-port="1" />
<edge from-layer="283" from-port="2" to-layer="285" to-port="0" />
<edge from-layer="284" from-port="0" to-layer="285" to-port="1" />
<edge from-layer="285" from-port="2" to-layer="355" to-port="1" />
<edge from-layer="285" from-port="2" to-layer="287" to-port="0" />
<edge from-layer="286" from-port="0" to-layer="287" to-port="1" />
<edge from-layer="287" from-port="2" to-layer="292" to-port="0" />
<edge from-layer="288" from-port="0" to-layer="292" to-port="1" />
<edge from-layer="289" from-port="0" to-layer="292" to-port="2" />
<edge from-layer="290" from-port="0" to-layer="292" to-port="3" />
<edge from-layer="291" from-port="0" to-layer="292" to-port="4" />
<edge from-layer="292" from-port="5" to-layer="313" to-port="0" />
<edge from-layer="292" from-port="5" to-layer="297" to-port="0" />
<edge from-layer="292" from-port="5" to-layer="329" to-port="0" />
<edge from-layer="293" from-port="0" to-layer="294" to-port="0" />
<edge from-layer="294" from-port="1" to-layer="296" to-port="0" />
<edge from-layer="295" from-port="0" to-layer="296" to-port="1" />
<edge from-layer="296" from-port="2" to-layer="297" to-port="1" />
<edge from-layer="297" from-port="2" to-layer="299" to-port="0" />
<edge from-layer="298" from-port="0" to-layer="299" to-port="1" />
<edge from-layer="299" from-port="2" to-layer="304" to-port="0" />
<edge from-layer="300" from-port="0" to-layer="304" to-port="1" />
<edge from-layer="301" from-port="0" to-layer="304" to-port="2" />
<edge from-layer="302" from-port="0" to-layer="304" to-port="3" />
<edge from-layer="303" from-port="0" to-layer="304" to-port="4" />
<edge from-layer="304" from-port="5" to-layer="306" to-port="0" />
<edge from-layer="305" from-port="0" to-layer="306" to-port="1" />
<edge from-layer="306" from-port="2" to-layer="308" to-port="0" />
<edge from-layer="307" from-port="0" to-layer="308" to-port="1" />
<edge from-layer="308" from-port="2" to-layer="336" to-port="0" />
<edge from-layer="309" from-port="0" to-layer="310" to-port="0" />
<edge from-layer="310" from-port="1" to-layer="312" to-port="0" />
<edge from-layer="311" from-port="0" to-layer="312" to-port="1" />
<edge from-layer="312" from-port="2" to-layer="313" to-port="1" />
<edge from-layer="313" from-port="2" to-layer="315" to-port="0" />
<edge from-layer="314" from-port="0" to-layer="315" to-port="1" />
<edge from-layer="315" from-port="2" to-layer="320" to-port="0" />
<edge from-layer="316" from-port="0" to-layer="320" to-port="1" />
<edge from-layer="317" from-port="0" to-layer="320" to-port="2" />
<edge from-layer="318" from-port="0" to-layer="320" to-port="3" />
<edge from-layer="319" from-port="0" to-layer="320" to-port="4" />
<edge from-layer="320" from-port="5" to-layer="322" to-port="0" />
<edge from-layer="321" from-port="0" to-layer="322" to-port="1" />
<edge from-layer="322" from-port="2" to-layer="324" to-port="0" />
<edge from-layer="323" from-port="0" to-layer="324" to-port="1" />
<edge from-layer="324" from-port="2" to-layer="336" to-port="1" />
<edge from-layer="325" from-port="0" to-layer="326" to-port="0" />
<edge from-layer="326" from-port="1" to-layer="328" to-port="0" />
<edge from-layer="327" from-port="0" to-layer="328" to-port="1" />
<edge from-layer="328" from-port="2" to-layer="329" to-port="1" />
<edge from-layer="329" from-port="2" to-layer="331" to-port="0" />
<edge from-layer="330" from-port="0" to-layer="331" to-port="1" />
<edge from-layer="331" from-port="2" to-layer="333" to-port="0" />
<edge from-layer="332" from-port="0" to-layer="333" to-port="1" />
<edge from-layer="333" from-port="2" to-layer="335" to-port="0" />
<edge from-layer="334" from-port="0" to-layer="335" to-port="1" />
<edge from-layer="335" from-port="2" to-layer="336" to-port="2" />
<edge from-layer="336" from-port="4" to-layer="338" to-port="0" />
<edge from-layer="337" from-port="0" to-layer="338" to-port="1" />
<edge from-layer="338" from-port="2" to-layer="340" to-port="0" />
<edge from-layer="339" from-port="0" to-layer="340" to-port="1" />
<edge from-layer="340" from-port="2" to-layer="342" to-port="0" />
<edge from-layer="341" from-port="0" to-layer="342" to-port="1" />
<edge from-layer="342" from-port="2" to-layer="347" to-port="0" />
<edge from-layer="343" from-port="0" to-layer="347" to-port="1" />
<edge from-layer="344" from-port="0" to-layer="347" to-port="2" />
<edge from-layer="345" from-port="0" to-layer="347" to-port="3" />
<edge from-layer="346" from-port="0" to-layer="347" to-port="4" />
<edge from-layer="347" from-port="5" to-layer="352" to-port="0" />
<edge from-layer="348" from-port="0" to-layer="349" to-port="0" />
<edge from-layer="349" from-port="1" to-layer="351" to-port="0" />
<edge from-layer="350" from-port="0" to-layer="351" to-port="1" />
<edge from-layer="351" from-port="2" to-layer="352" to-port="1" />
<edge from-layer="352" from-port="2" to-layer="354" to-port="0" />
<edge from-layer="353" from-port="0" to-layer="354" to-port="1" />
<edge from-layer="354" from-port="2" to-layer="355" to-port="0" />
<edge from-layer="355" from-port="2" to-layer="357" to-port="0" />
<edge from-layer="356" from-port="0" to-layer="357" to-port="1" />
<edge from-layer="357" from-port="2" to-layer="359" to-port="0" />
<edge from-layer="358" from-port="0" to-layer="359" to-port="1" />
<edge from-layer="359" from-port="2" to-layer="361" to-port="0" />
<edge from-layer="360" from-port="0" to-layer="361" to-port="1" />
<edge from-layer="361" from-port="2" to-layer="363" to-port="0" />
<edge from-layer="361" from-port="2" to-layer="391" to-port="1" />
<edge from-layer="362" from-port="0" to-layer="363" to-port="1" />
<edge from-layer="363" from-port="2" to-layer="368" to-port="0" />
<edge from-layer="364" from-port="0" to-layer="368" to-port="1" />
<edge from-layer="365" from-port="0" to-layer="368" to-port="2" />
<edge from-layer="366" from-port="0" to-layer="368" to-port="3" />
<edge from-layer="367" from-port="0" to-layer="368" to-port="4" />
<edge from-layer="368" from-port="5" to-layer="373" to-port="0" />
<edge from-layer="369" from-port="0" to-layer="370" to-port="0" />
<edge from-layer="370" from-port="1" to-layer="372" to-port="0" />
<edge from-layer="371" from-port="0" to-layer="372" to-port="1" />
<edge from-layer="372" from-port="2" to-layer="373" to-port="1" />
<edge from-layer="373" from-port="2" to-layer="375" to-port="0" />
<edge from-layer="374" from-port="0" to-layer="375" to-port="1" />
<edge from-layer="375" from-port="2" to-layer="376" to-port="0" />
<edge from-layer="376" from-port="1" to-layer="378" to-port="0" />
<edge from-layer="377" from-port="0" to-layer="378" to-port="1" />
<edge from-layer="378" from-port="2" to-layer="383" to-port="0" />
<edge from-layer="379" from-port="0" to-layer="383" to-port="1" />
<edge from-layer="380" from-port="0" to-layer="383" to-port="2" />
<edge from-layer="381" from-port="0" to-layer="383" to-port="3" />
<edge from-layer="382" from-port="0" to-layer="383" to-port="4" />
<edge from-layer="383" from-port="5" to-layer="388" to-port="0" />
<edge from-layer="384" from-port="0" to-layer="385" to-port="0" />
<edge from-layer="385" from-port="1" to-layer="387" to-port="0" />
<edge from-layer="386" from-port="0" to-layer="387" to-port="1" />
<edge from-layer="387" from-port="2" to-layer="388" to-port="1" />
<edge from-layer="388" from-port="2" to-layer="390" to-port="0" />
<edge from-layer="389" from-port="0" to-layer="390" to-port="1" />
<edge from-layer="390" from-port="2" to-layer="391" to-port="0" />
<edge from-layer="391" from-port="2" to-layer="393" to-port="0" />
<edge from-layer="392" from-port="0" to-layer="393" to-port="1" />
<edge from-layer="393" from-port="2" to-layer="395" to-port="0" />
<edge from-layer="394" from-port="0" to-layer="395" to-port="1" />
<edge from-layer="395" from-port="2" to-layer="397" to-port="0" />
<edge from-layer="396" from-port="0" to-layer="397" to-port="1" />
<edge from-layer="397" from-port="2" to-layer="399" to-port="0" />
<edge from-layer="397" from-port="2" to-layer="467" to-port="1" />
<edge from-layer="398" from-port="0" to-layer="399" to-port="1" />
<edge from-layer="399" from-port="2" to-layer="404" to-port="0" />
<edge from-layer="400" from-port="0" to-layer="404" to-port="1" />
<edge from-layer="401" from-port="0" to-layer="404" to-port="2" />
<edge from-layer="402" from-port="0" to-layer="404" to-port="3" />
<edge from-layer="403" from-port="0" to-layer="404" to-port="4" />
<edge from-layer="404" from-port="5" to-layer="441" to-port="0" />
<edge from-layer="404" from-port="5" to-layer="425" to-port="0" />
<edge from-layer="404" from-port="5" to-layer="409" to-port="0" />
<edge from-layer="405" from-port="0" to-layer="406" to-port="0" />
<edge from-layer="406" from-port="1" to-layer="408" to-port="0" />
<edge from-layer="407" from-port="0" to-layer="408" to-port="1" />
<edge from-layer="408" from-port="2" to-layer="409" to-port="1" />
<edge from-layer="409" from-port="2" to-layer="411" to-port="0" />
<edge from-layer="410" from-port="0" to-layer="411" to-port="1" />
<edge from-layer="411" from-port="2" to-layer="416" to-port="0" />
<edge from-layer="412" from-port="0" to-layer="416" to-port="1" />
<edge from-layer="413" from-port="0" to-layer="416" to-port="2" />
<edge from-layer="414" from-port="0" to-layer="416" to-port="3" />
<edge from-layer="415" from-port="0" to-layer="416" to-port="4" />
<edge from-layer="416" from-port="5" to-layer="418" to-port="0" />
<edge from-layer="417" from-port="0" to-layer="418" to-port="1" />
<edge from-layer="418" from-port="2" to-layer="420" to-port="0" />
<edge from-layer="419" from-port="0" to-layer="420" to-port="1" />
<edge from-layer="420" from-port="2" to-layer="448" to-port="0" />
<edge from-layer="421" from-port="0" to-layer="422" to-port="0" />
<edge from-layer="422" from-port="1" to-layer="424" to-port="0" />
<edge from-layer="423" from-port="0" to-layer="424" to-port="1" />
<edge from-layer="424" from-port="2" to-layer="425" to-port="1" />
<edge from-layer="425" from-port="2" to-layer="427" to-port="0" />
<edge from-layer="426" from-port="0" to-layer="427" to-port="1" />
<edge from-layer="427" from-port="2" to-layer="432" to-port="0" />
<edge from-layer="428" from-port="0" to-layer="432" to-port="1" />
<edge from-layer="429" from-port="0" to-layer="432" to-port="2" />
<edge from-layer="430" from-port="0" to-layer="432" to-port="3" />
<edge from-layer="431" from-port="0" to-layer="432" to-port="4" />
<edge from-layer="432" from-port="5" to-layer="434" to-port="0" />
<edge from-layer="433" from-port="0" to-layer="434" to-port="1" />
<edge from-layer="434" from-port="2" to-layer="436" to-port="0" />
<edge from-layer="435" from-port="0" to-layer="436" to-port="1" />
<edge from-layer="436" from-port="2" to-layer="448" to-port="1" />
<edge from-layer="437" from-port="0" to-layer="438" to-port="0" />
<edge from-layer="438" from-port="1" to-layer="440" to-port="0" />
<edge from-layer="439" from-port="0" to-layer="440" to-port="1" />
<edge from-layer="440" from-port="2" to-layer="441" to-port="1" />
<edge from-layer="441" from-port="2" to-layer="443" to-port="0" />
<edge from-layer="442" from-port="0" to-layer="443" to-port="1" />
<edge from-layer="443" from-port="2" to-layer="445" to-port="0" />
<edge from-layer="444" from-port="0" to-layer="445" to-port="1" />
<edge from-layer="445" from-port="2" to-layer="447" to-port="0" />
<edge from-layer="446" from-port="0" to-layer="447" to-port="1" />
<edge from-layer="447" from-port="2" to-layer="448" to-port="2" />
<edge from-layer="448" from-port="4" to-layer="450" to-port="0" />
<edge from-layer="449" from-port="0" to-layer="450" to-port="1" />
<edge from-layer="450" from-port="2" to-layer="452" to-port="0" />
<edge from-layer="451" from-port="0" to-layer="452" to-port="1" />
<edge from-layer="452" from-port="2" to-layer="454" to-port="0" />
<edge from-layer="453" from-port="0" to-layer="454" to-port="1" />
<edge from-layer="454" from-port="2" to-layer="459" to-port="0" />
<edge from-layer="455" from-port="0" to-layer="459" to-port="1" />
<edge from-layer="456" from-port="0" to-layer="459" to-port="2" />
<edge from-layer="457" from-port="0" to-layer="459" to-port="3" />
<edge from-layer="458" from-port="0" to-layer="459" to-port="4" />
<edge from-layer="459" from-port="5" to-layer="464" to-port="0" />
<edge from-layer="460" from-port="0" to-layer="461" to-port="0" />
<edge from-layer="461" from-port="1" to-layer="463" to-port="0" />
<edge from-layer="462" from-port="0" to-layer="463" to-port="1" />
<edge from-layer="463" from-port="2" to-layer="464" to-port="1" />
<edge from-layer="464" from-port="2" to-layer="466" to-port="0" />
<edge from-layer="465" from-port="0" to-layer="466" to-port="1" />
<edge from-layer="466" from-port="2" to-layer="467" to-port="0" />
<edge from-layer="467" from-port="2" to-layer="469" to-port="0" />
<edge from-layer="468" from-port="0" to-layer="469" to-port="1" />
<edge from-layer="469" from-port="2" to-layer="471" to-port="0" />
<edge from-layer="470" from-port="0" to-layer="471" to-port="1" />
<edge from-layer="471" from-port="2" to-layer="473" to-port="0" />
<edge from-layer="472" from-port="0" to-layer="473" to-port="1" />
<edge from-layer="473" from-port="2" to-layer="475" to-port="0" />
<edge from-layer="473" from-port="2" to-layer="503" to-port="1" />
<edge from-layer="474" from-port="0" to-layer="475" to-port="1" />
<edge from-layer="475" from-port="2" to-layer="480" to-port="0" />
<edge from-layer="476" from-port="0" to-layer="480" to-port="1" />
<edge from-layer="477" from-port="0" to-layer="480" to-port="2" />
<edge from-layer="478" from-port="0" to-layer="480" to-port="3" />
<edge from-layer="479" from-port="0" to-layer="480" to-port="4" />
<edge from-layer="480" from-port="5" to-layer="485" to-port="0" />
<edge from-layer="481" from-port="0" to-layer="482" to-port="0" />
<edge from-layer="482" from-port="1" to-layer="484" to-port="0" />
<edge from-layer="483" from-port="0" to-layer="484" to-port="1" />
<edge from-layer="484" from-port="2" to-layer="485" to-port="1" />
<edge from-layer="485" from-port="2" to-layer="487" to-port="0" />
<edge from-layer="486" from-port="0" to-layer="487" to-port="1" />
<edge from-layer="487" from-port="2" to-layer="488" to-port="0" />
<edge from-layer="488" from-port="1" to-layer="490" to-port="0" />
<edge from-layer="489" from-port="0" to-layer="490" to-port="1" />
<edge from-layer="490" from-port="2" to-layer="495" to-port="0" />
<edge from-layer="491" from-port="0" to-layer="495" to-port="1" />
<edge from-layer="492" from-port="0" to-layer="495" to-port="2" />
<edge from-layer="493" from-port="0" to-layer="495" to-port="3" />
<edge from-layer="494" from-port="0" to-layer="495" to-port="4" />
<edge from-layer="495" from-port="5" to-layer="500" to-port="0" />
<edge from-layer="496" from-port="0" to-layer="497" to-port="0" />
<edge from-layer="497" from-port="1" to-layer="499" to-port="0" />
<edge from-layer="498" from-port="0" to-layer="499" to-port="1" />
<edge from-layer="499" from-port="2" to-layer="500" to-port="1" />
<edge from-layer="500" from-port="2" to-layer="502" to-port="0" />
<edge from-layer="501" from-port="0" to-layer="502" to-port="1" />
<edge from-layer="502" from-port="2" to-layer="503" to-port="0" />
<edge from-layer="503" from-port="2" to-layer="505" to-port="0" />
<edge from-layer="504" from-port="0" to-layer="505" to-port="1" />
<edge from-layer="505" from-port="2" to-layer="507" to-port="0" />
<edge from-layer="506" from-port="0" to-layer="507" to-port="1" />
<edge from-layer="507" from-port="2" to-layer="509" to-port="0" />
<edge from-layer="508" from-port="0" to-layer="509" to-port="1" />
<edge from-layer="509" from-port="2" to-layer="579" to-port="1" />
<edge from-layer="509" from-port="2" to-layer="511" to-port="0" />
<edge from-layer="510" from-port="0" to-layer="511" to-port="1" />
<edge from-layer="511" from-port="2" to-layer="516" to-port="0" />
<edge from-layer="512" from-port="0" to-layer="516" to-port="1" />
<edge from-layer="513" from-port="0" to-layer="516" to-port="2" />
<edge from-layer="514" from-port="0" to-layer="516" to-port="3" />
<edge from-layer="515" from-port="0" to-layer="516" to-port="4" />
<edge from-layer="516" from-port="5" to-layer="521" to-port="0" />
<edge from-layer="516" from-port="5" to-layer="537" to-port="0" />
<edge from-layer="516" from-port="5" to-layer="553" to-port="0" />
<edge from-layer="517" from-port="0" to-layer="518" to-port="0" />
<edge from-layer="518" from-port="1" to-layer="520" to-port="0" />
<edge from-layer="519" from-port="0" to-layer="520" to-port="1" />
<edge from-layer="520" from-port="2" to-layer="521" to-port="1" />
<edge from-layer="521" from-port="2" to-layer="523" to-port="0" />
<edge from-layer="522" from-port="0" to-layer="523" to-port="1" />
<edge from-layer="523" from-port="2" to-layer="528" to-port="0" />
<edge from-layer="524" from-port="0" to-layer="528" to-port="1" />
<edge from-layer="525" from-port="0" to-layer="528" to-port="2" />
<edge from-layer="526" from-port="0" to-layer="528" to-port="3" />
<edge from-layer="527" from-port="0" to-layer="528" to-port="4" />
<edge from-layer="528" from-port="5" to-layer="530" to-port="0" />
<edge from-layer="529" from-port="0" to-layer="530" to-port="1" />
<edge from-layer="530" from-port="2" to-layer="532" to-port="0" />
<edge from-layer="531" from-port="0" to-layer="532" to-port="1" />
<edge from-layer="532" from-port="2" to-layer="560" to-port="0" />
<edge from-layer="533" from-port="0" to-layer="534" to-port="0" />
<edge from-layer="534" from-port="1" to-layer="536" to-port="0" />
<edge from-layer="535" from-port="0" to-layer="536" to-port="1" />
<edge from-layer="536" from-port="2" to-layer="537" to-port="1" />
<edge from-layer="537" from-port="2" to-layer="539" to-port="0" />
<edge from-layer="538" from-port="0" to-layer="539" to-port="1" />
<edge from-layer="539" from-port="2" to-layer="544" to-port="0" />
<edge from-layer="540" from-port="0" to-layer="544" to-port="1" />
<edge from-layer="541" from-port="0" to-layer="544" to-port="2" />
<edge from-layer="542" from-port="0" to-layer="544" to-port="3" />
<edge from-layer="543" from-port="0" to-layer="544" to-port="4" />
<edge from-layer="544" from-port="5" to-layer="546" to-port="0" />
<edge from-layer="545" from-port="0" to-layer="546" to-port="1" />
<edge from-layer="546" from-port="2" to-layer="548" to-port="0" />
<edge from-layer="547" from-port="0" to-layer="548" to-port="1" />
<edge from-layer="548" from-port="2" to-layer="560" to-port="1" />
<edge from-layer="549" from-port="0" to-layer="550" to-port="0" />
<edge from-layer="550" from-port="1" to-layer="552" to-port="0" />
<edge from-layer="551" from-port="0" to-layer="552" to-port="1" />
<edge from-layer="552" from-port="2" to-layer="553" to-port="1" />
<edge from-layer="553" from-port="2" to-layer="555" to-port="0" />
<edge from-layer="554" from-port="0" to-layer="555" to-port="1" />
<edge from-layer="555" from-port="2" to-layer="557" to-port="0" />
<edge from-layer="556" from-port="0" to-layer="557" to-port="1" />
<edge from-layer="557" from-port="2" to-layer="559" to-port="0" />
<edge from-layer="558" from-port="0" to-layer="559" to-port="1" />
<edge from-layer="559" from-port="2" to-layer="560" to-port="2" />
<edge from-layer="560" from-port="4" to-layer="562" to-port="0" />
<edge from-layer="561" from-port="0" to-layer="562" to-port="1" />
<edge from-layer="562" from-port="2" to-layer="564" to-port="0" />
<edge from-layer="563" from-port="0" to-layer="564" to-port="1" />
<edge from-layer="564" from-port="2" to-layer="566" to-port="0" />
<edge from-layer="565" from-port="0" to-layer="566" to-port="1" />
<edge from-layer="566" from-port="2" to-layer="571" to-port="0" />
<edge from-layer="567" from-port="0" to-layer="571" to-port="1" />
<edge from-layer="568" from-port="0" to-layer="571" to-port="2" />
<edge from-layer="569" from-port="0" to-layer="571" to-port="3" />
<edge from-layer="570" from-port="0" to-layer="571" to-port="4" />
<edge from-layer="571" from-port="5" to-layer="576" to-port="0" />
<edge from-layer="572" from-port="0" to-layer="573" to-port="0" />
<edge from-layer="573" from-port="1" to-layer="575" to-port="0" />
<edge from-layer="574" from-port="0" to-layer="575" to-port="1" />
<edge from-layer="575" from-port="2" to-layer="576" to-port="1" />
<edge from-layer="576" from-port="2" to-layer="578" to-port="0" />
<edge from-layer="577" from-port="0" to-layer="578" to-port="1" />
<edge from-layer="578" from-port="2" to-layer="579" to-port="0" />
<edge from-layer="579" from-port="2" to-layer="581" to-port="0" />
<edge from-layer="580" from-port="0" to-layer="581" to-port="1" />
<edge from-layer="581" from-port="2" to-layer="583" to-port="0" />
<edge from-layer="582" from-port="0" to-layer="583" to-port="1" />
<edge from-layer="583" from-port="2" to-layer="585" to-port="0" />
<edge from-layer="584" from-port="0" to-layer="585" to-port="1" />
<edge from-layer="585" from-port="2" to-layer="615" to-port="1" />
<edge from-layer="585" from-port="2" to-layer="587" to-port="0" />
<edge from-layer="586" from-port="0" to-layer="587" to-port="1" />
<edge from-layer="587" from-port="2" to-layer="592" to-port="0" />
<edge from-layer="588" from-port="0" to-layer="592" to-port="1" />
<edge from-layer="589" from-port="0" to-layer="592" to-port="2" />
<edge from-layer="590" from-port="0" to-layer="592" to-port="3" />
<edge from-layer="591" from-port="0" to-layer="592" to-port="4" />
<edge from-layer="592" from-port="5" to-layer="597" to-port="0" />
<edge from-layer="593" from-port="0" to-layer="594" to-port="0" />
<edge from-layer="594" from-port="1" to-layer="596" to-port="0" />
<edge from-layer="595" from-port="0" to-layer="596" to-port="1" />
<edge from-layer="596" from-port="2" to-layer="597" to-port="1" />
<edge from-layer="597" from-port="2" to-layer="599" to-port="0" />
<edge from-layer="598" from-port="0" to-layer="599" to-port="1" />
<edge from-layer="599" from-port="2" to-layer="600" to-port="0" />
<edge from-layer="600" from-port="1" to-layer="602" to-port="0" />
<edge from-layer="601" from-port="0" to-layer="602" to-port="1" />
<edge from-layer="602" from-port="2" to-layer="607" to-port="0" />
<edge from-layer="603" from-port="0" to-layer="607" to-port="1" />
<edge from-layer="604" from-port="0" to-layer="607" to-port="2" />
<edge from-layer="605" from-port="0" to-layer="607" to-port="3" />
<edge from-layer="606" from-port="0" to-layer="607" to-port="4" />
<edge from-layer="607" from-port="5" to-layer="612" to-port="0" />
<edge from-layer="608" from-port="0" to-layer="609" to-port="0" />
<edge from-layer="609" from-port="1" to-layer="611" to-port="0" />
<edge from-layer="610" from-port="0" to-layer="611" to-port="1" />
<edge from-layer="611" from-port="2" to-layer="612" to-port="1" />
<edge from-layer="612" from-port="2" to-layer="614" to-port="0" />
<edge from-layer="613" from-port="0" to-layer="614" to-port="1" />
<edge from-layer="614" from-port="2" to-layer="615" to-port="0" />
<edge from-layer="615" from-port="2" to-layer="617" to-port="0" />
<edge from-layer="616" from-port="0" to-layer="617" to-port="1" />
<edge from-layer="617" from-port="2" to-layer="619" to-port="0" />
<edge from-layer="618" from-port="0" to-layer="619" to-port="1" />
<edge from-layer="619" from-port="2" to-layer="621" to-port="0" />
<edge from-layer="620" from-port="0" to-layer="621" to-port="1" />
<edge from-layer="621" from-port="2" to-layer="623" to-port="0" />
<edge from-layer="621" from-port="2" to-layer="691" to-port="1" />
<edge from-layer="622" from-port="0" to-layer="623" to-port="1" />
<edge from-layer="623" from-port="2" to-layer="628" to-port="0" />
<edge from-layer="624" from-port="0" to-layer="628" to-port="1" />
<edge from-layer="625" from-port="0" to-layer="628" to-port="2" />
<edge from-layer="626" from-port="0" to-layer="628" to-port="3" />
<edge from-layer="627" from-port="0" to-layer="628" to-port="4" />
<edge from-layer="628" from-port="5" to-layer="665" to-port="0" />
<edge from-layer="628" from-port="5" to-layer="649" to-port="0" />
<edge from-layer="628" from-port="5" to-layer="633" to-port="0" />
<edge from-layer="629" from-port="0" to-layer="630" to-port="0" />
<edge from-layer="630" from-port="1" to-layer="632" to-port="0" />
<edge from-layer="631" from-port="0" to-layer="632" to-port="1" />
<edge from-layer="632" from-port="2" to-layer="633" to-port="1" />
<edge from-layer="633" from-port="2" to-layer="635" to-port="0" />
<edge from-layer="634" from-port="0" to-layer="635" to-port="1" />
<edge from-layer="635" from-port="2" to-layer="640" to-port="0" />
<edge from-layer="636" from-port="0" to-layer="640" to-port="1" />
<edge from-layer="637" from-port="0" to-layer="640" to-port="2" />
<edge from-layer="638" from-port="0" to-layer="640" to-port="3" />
<edge from-layer="639" from-port="0" to-layer="640" to-port="4" />
<edge from-layer="640" from-port="5" to-layer="642" to-port="0" />
<edge from-layer="641" from-port="0" to-layer="642" to-port="1" />
<edge from-layer="642" from-port="2" to-layer="644" to-port="0" />
<edge from-layer="643" from-port="0" to-layer="644" to-port="1" />
<edge from-layer="644" from-port="2" to-layer="672" to-port="0" />
<edge from-layer="645" from-port="0" to-layer="646" to-port="0" />
<edge from-layer="646" from-port="1" to-layer="648" to-port="0" />
<edge from-layer="647" from-port="0" to-layer="648" to-port="1" />
<edge from-layer="648" from-port="2" to-layer="649" to-port="1" />
<edge from-layer="649" from-port="2" to-layer="651" to-port="0" />
<edge from-layer="650" from-port="0" to-layer="651" to-port="1" />
<edge from-layer="651" from-port="2" to-layer="656" to-port="0" />
<edge from-layer="652" from-port="0" to-layer="656" to-port="1" />
<edge from-layer="653" from-port="0" to-layer="656" to-port="2" />
<edge from-layer="654" from-port="0" to-layer="656" to-port="3" />
<edge from-layer="655" from-port="0" to-layer="656" to-port="4" />
<edge from-layer="656" from-port="5" to-layer="658" to-port="0" />
<edge from-layer="657" from-port="0" to-layer="658" to-port="1" />
<edge from-layer="658" from-port="2" to-layer="660" to-port="0" />
<edge from-layer="659" from-port="0" to-layer="660" to-port="1" />
<edge from-layer="660" from-port="2" to-layer="672" to-port="1" />
<edge from-layer="661" from-port="0" to-layer="662" to-port="0" />
<edge from-layer="662" from-port="1" to-layer="664" to-port="0" />
<edge from-layer="663" from-port="0" to-layer="664" to-port="1" />
<edge from-layer="664" from-port="2" to-layer="665" to-port="1" />
<edge from-layer="665" from-port="2" to-layer="667" to-port="0" />
<edge from-layer="666" from-port="0" to-layer="667" to-port="1" />
<edge from-layer="667" from-port="2" to-layer="669" to-port="0" />
<edge from-layer="668" from-port="0" to-layer="669" to-port="1" />
<edge from-layer="669" from-port="2" to-layer="671" to-port="0" />
<edge from-layer="670" from-port="0" to-layer="671" to-port="1" />
<edge from-layer="671" from-port="2" to-layer="672" to-port="2" />
<edge from-layer="672" from-port="4" to-layer="674" to-port="0" />
<edge from-layer="673" from-port="0" to-layer="674" to-port="1" />
<edge from-layer="674" from-port="2" to-layer="676" to-port="0" />
<edge from-layer="675" from-port="0" to-layer="676" to-port="1" />
<edge from-layer="676" from-port="2" to-layer="678" to-port="0" />
<edge from-layer="677" from-port="0" to-layer="678" to-port="1" />
<edge from-layer="678" from-port="2" to-layer="683" to-port="0" />
<edge from-layer="679" from-port="0" to-layer="683" to-port="1" />
<edge from-layer="680" from-port="0" to-layer="683" to-port="2" />
<edge from-layer="681" from-port="0" to-layer="683" to-port="3" />
<edge from-layer="682" from-port="0" to-layer="683" to-port="4" />
<edge from-layer="683" from-port="5" to-layer="688" to-port="0" />
<edge from-layer="684" from-port="0" to-layer="685" to-port="0" />
<edge from-layer="685" from-port="1" to-layer="687" to-port="0" />
<edge from-layer="686" from-port="0" to-layer="687" to-port="1" />
<edge from-layer="687" from-port="2" to-layer="688" to-port="1" />
<edge from-layer="688" from-port="2" to-layer="690" to-port="0" />
<edge from-layer="689" from-port="0" to-layer="690" to-port="1" />
<edge from-layer="690" from-port="2" to-layer="691" to-port="0" />
<edge from-layer="691" from-port="2" to-layer="693" to-port="0" />
<edge from-layer="692" from-port="0" to-layer="693" to-port="1" />
<edge from-layer="693" from-port="2" to-layer="695" to-port="0" />
<edge from-layer="694" from-port="0" to-layer="695" to-port="1" />
<edge from-layer="695" from-port="2" to-layer="697" to-port="0" />
<edge from-layer="696" from-port="0" to-layer="697" to-port="1" />
<edge from-layer="697" from-port="2" to-layer="699" to-port="0" />
<edge from-layer="697" from-port="2" to-layer="727" to-port="1" />
<edge from-layer="698" from-port="0" to-layer="699" to-port="1" />
<edge from-layer="699" from-port="2" to-layer="704" to-port="0" />
<edge from-layer="700" from-port="0" to-layer="704" to-port="1" />
<edge from-layer="701" from-port="0" to-layer="704" to-port="2" />
<edge from-layer="702" from-port="0" to-layer="704" to-port="3" />
<edge from-layer="703" from-port="0" to-layer="704" to-port="4" />
<edge from-layer="704" from-port="5" to-layer="709" to-port="0" />
<edge from-layer="705" from-port="0" to-layer="706" to-port="0" />
<edge from-layer="706" from-port="1" to-layer="708" to-port="0" />
<edge from-layer="707" from-port="0" to-layer="708" to-port="1" />
<edge from-layer="708" from-port="2" to-layer="709" to-port="1" />
<edge from-layer="709" from-port="2" to-layer="711" to-port="0" />
<edge from-layer="710" from-port="0" to-layer="711" to-port="1" />
<edge from-layer="711" from-port="2" to-layer="712" to-port="0" />
<edge from-layer="712" from-port="1" to-layer="714" to-port="0" />
<edge from-layer="713" from-port="0" to-layer="714" to-port="1" />
<edge from-layer="714" from-port="2" to-layer="719" to-port="0" />
<edge from-layer="715" from-port="0" to-layer="719" to-port="1" />
<edge from-layer="716" from-port="0" to-layer="719" to-port="2" />
<edge from-layer="717" from-port="0" to-layer="719" to-port="3" />
<edge from-layer="718" from-port="0" to-layer="719" to-port="4" />
<edge from-layer="719" from-port="5" to-layer="724" to-port="0" />
<edge from-layer="720" from-port="0" to-layer="721" to-port="0" />
<edge from-layer="721" from-port="1" to-layer="723" to-port="0" />
<edge from-layer="722" from-port="0" to-layer="723" to-port="1" />
<edge from-layer="723" from-port="2" to-layer="724" to-port="1" />
<edge from-layer="724" from-port="2" to-layer="726" to-port="0" />
<edge from-layer="725" from-port="0" to-layer="726" to-port="1" />
<edge from-layer="726" from-port="2" to-layer="727" to-port="0" />
<edge from-layer="727" from-port="2" to-layer="729" to-port="0" />
<edge from-layer="728" from-port="0" to-layer="729" to-port="1" />
<edge from-layer="729" from-port="2" to-layer="731" to-port="0" />
<edge from-layer="730" from-port="0" to-layer="731" to-port="1" />
<edge from-layer="731" from-port="2" to-layer="733" to-port="0" />
<edge from-layer="732" from-port="0" to-layer="733" to-port="1" />
<edge from-layer="733" from-port="2" to-layer="735" to-port="0" />
<edge from-layer="733" from-port="2" to-layer="803" to-port="1" />
<edge from-layer="734" from-port="0" to-layer="735" to-port="1" />
<edge from-layer="735" from-port="2" to-layer="740" to-port="0" />
<edge from-layer="736" from-port="0" to-layer="740" to-port="1" />
<edge from-layer="737" from-port="0" to-layer="740" to-port="2" />
<edge from-layer="738" from-port="0" to-layer="740" to-port="3" />
<edge from-layer="739" from-port="0" to-layer="740" to-port="4" />
<edge from-layer="740" from-port="5" to-layer="777" to-port="0" />
<edge from-layer="740" from-port="5" to-layer="761" to-port="0" />
<edge from-layer="740" from-port="5" to-layer="745" to-port="0" />
<edge from-layer="741" from-port="0" to-layer="742" to-port="0" />
<edge from-layer="742" from-port="1" to-layer="744" to-port="0" />
<edge from-layer="743" from-port="0" to-layer="744" to-port="1" />
<edge from-layer="744" from-port="2" to-layer="745" to-port="1" />
<edge from-layer="745" from-port="2" to-layer="747" to-port="0" />
<edge from-layer="746" from-port="0" to-layer="747" to-port="1" />
<edge from-layer="747" from-port="2" to-layer="752" to-port="0" />
<edge from-layer="748" from-port="0" to-layer="752" to-port="1" />
<edge from-layer="749" from-port="0" to-layer="752" to-port="2" />
<edge from-layer="750" from-port="0" to-layer="752" to-port="3" />
<edge from-layer="751" from-port="0" to-layer="752" to-port="4" />
<edge from-layer="752" from-port="5" to-layer="754" to-port="0" />
<edge from-layer="753" from-port="0" to-layer="754" to-port="1" />
<edge from-layer="754" from-port="2" to-layer="756" to-port="0" />
<edge from-layer="755" from-port="0" to-layer="756" to-port="1" />
<edge from-layer="756" from-port="2" to-layer="784" to-port="0" />
<edge from-layer="757" from-port="0" to-layer="758" to-port="0" />
<edge from-layer="758" from-port="1" to-layer="760" to-port="0" />
<edge from-layer="759" from-port="0" to-layer="760" to-port="1" />
<edge from-layer="760" from-port="2" to-layer="761" to-port="1" />
<edge from-layer="761" from-port="2" to-layer="763" to-port="0" />
<edge from-layer="762" from-port="0" to-layer="763" to-port="1" />
<edge from-layer="763" from-port="2" to-layer="768" to-port="0" />
<edge from-layer="764" from-port="0" to-layer="768" to-port="1" />
<edge from-layer="765" from-port="0" to-layer="768" to-port="2" />
<edge from-layer="766" from-port="0" to-layer="768" to-port="3" />
<edge from-layer="767" from-port="0" to-layer="768" to-port="4" />
<edge from-layer="768" from-port="5" to-layer="770" to-port="0" />
<edge from-layer="769" from-port="0" to-layer="770" to-port="1" />
<edge from-layer="770" from-port="2" to-layer="772" to-port="0" />
<edge from-layer="771" from-port="0" to-layer="772" to-port="1" />
<edge from-layer="772" from-port="2" to-layer="784" to-port="1" />
<edge from-layer="773" from-port="0" to-layer="774" to-port="0" />
<edge from-layer="774" from-port="1" to-layer="776" to-port="0" />
<edge from-layer="775" from-port="0" to-layer="776" to-port="1" />
<edge from-layer="776" from-port="2" to-layer="777" to-port="1" />
<edge from-layer="777" from-port="2" to-layer="779" to-port="0" />
<edge from-layer="778" from-port="0" to-layer="779" to-port="1" />
<edge from-layer="779" from-port="2" to-layer="781" to-port="0" />
<edge from-layer="780" from-port="0" to-layer="781" to-port="1" />
<edge from-layer="781" from-port="2" to-layer="783" to-port="0" />
<edge from-layer="782" from-port="0" to-layer="783" to-port="1" />
<edge from-layer="783" from-port="2" to-layer="784" to-port="2" />
<edge from-layer="784" from-port="4" to-layer="786" to-port="0" />
<edge from-layer="785" from-port="0" to-layer="786" to-port="1" />
<edge from-layer="786" from-port="2" to-layer="788" to-port="0" />
<edge from-layer="787" from-port="0" to-layer="788" to-port="1" />
<edge from-layer="788" from-port="2" to-layer="790" to-port="0" />
<edge from-layer="789" from-port="0" to-layer="790" to-port="1" />
<edge from-layer="790" from-port="2" to-layer="795" to-port="0" />
<edge from-layer="791" from-port="0" to-layer="795" to-port="1" />
<edge from-layer="792" from-port="0" to-layer="795" to-port="2" />
<edge from-layer="793" from-port="0" to-layer="795" to-port="3" />
<edge from-layer="794" from-port="0" to-layer="795" to-port="4" />
<edge from-layer="795" from-port="5" to-layer="800" to-port="0" />
<edge from-layer="796" from-port="0" to-layer="797" to-port="0" />
<edge from-layer="797" from-port="1" to-layer="799" to-port="0" />
<edge from-layer="798" from-port="0" to-layer="799" to-port="1" />
<edge from-layer="799" from-port="2" to-layer="800" to-port="1" />
<edge from-layer="800" from-port="2" to-layer="802" to-port="0" />
<edge from-layer="801" from-port="0" to-layer="802" to-port="1" />
<edge from-layer="802" from-port="2" to-layer="803" to-port="0" />
<edge from-layer="803" from-port="2" to-layer="805" to-port="0" />
<edge from-layer="804" from-port="0" to-layer="805" to-port="1" />
<edge from-layer="805" from-port="2" to-layer="807" to-port="0" />
<edge from-layer="806" from-port="0" to-layer="807" to-port="1" />
<edge from-layer="807" from-port="2" to-layer="809" to-port="0" />
<edge from-layer="808" from-port="0" to-layer="809" to-port="1" />
<edge from-layer="809" from-port="2" to-layer="839" to-port="1" />
<edge from-layer="809" from-port="2" to-layer="811" to-port="0" />
<edge from-layer="810" from-port="0" to-layer="811" to-port="1" />
<edge from-layer="811" from-port="2" to-layer="816" to-port="0" />
<edge from-layer="812" from-port="0" to-layer="816" to-port="1" />
<edge from-layer="813" from-port="0" to-layer="816" to-port="2" />
<edge from-layer="814" from-port="0" to-layer="816" to-port="3" />
<edge from-layer="815" from-port="0" to-layer="816" to-port="4" />
<edge from-layer="816" from-port="5" to-layer="821" to-port="0" />
<edge from-layer="817" from-port="0" to-layer="818" to-port="0" />
<edge from-layer="818" from-port="1" to-layer="820" to-port="0" />
<edge from-layer="819" from-port="0" to-layer="820" to-port="1" />
<edge from-layer="820" from-port="2" to-layer="821" to-port="1" />
<edge from-layer="821" from-port="2" to-layer="823" to-port="0" />
<edge from-layer="822" from-port="0" to-layer="823" to-port="1" />
<edge from-layer="823" from-port="2" to-layer="824" to-port="0" />
<edge from-layer="824" from-port="1" to-layer="826" to-port="0" />
<edge from-layer="825" from-port="0" to-layer="826" to-port="1" />
<edge from-layer="826" from-port="2" to-layer="831" to-port="0" />
<edge from-layer="827" from-port="0" to-layer="831" to-port="1" />
<edge from-layer="828" from-port="0" to-layer="831" to-port="2" />
<edge from-layer="829" from-port="0" to-layer="831" to-port="3" />
<edge from-layer="830" from-port="0" to-layer="831" to-port="4" />
<edge from-layer="831" from-port="5" to-layer="836" to-port="0" />
<edge from-layer="832" from-port="0" to-layer="833" to-port="0" />
<edge from-layer="833" from-port="1" to-layer="835" to-port="0" />
<edge from-layer="834" from-port="0" to-layer="835" to-port="1" />
<edge from-layer="835" from-port="2" to-layer="836" to-port="1" />
<edge from-layer="836" from-port="2" to-layer="838" to-port="0" />
<edge from-layer="837" from-port="0" to-layer="838" to-port="1" />
<edge from-layer="838" from-port="2" to-layer="839" to-port="0" />
<edge from-layer="839" from-port="2" to-layer="841" to-port="0" />
<edge from-layer="840" from-port="0" to-layer="841" to-port="1" />
<edge from-layer="841" from-port="2" to-layer="843" to-port="0" />
<edge from-layer="842" from-port="0" to-layer="843" to-port="1" />
<edge from-layer="843" from-port="2" to-layer="845" to-port="0" />
<edge from-layer="844" from-port="0" to-layer="845" to-port="1" />
<edge from-layer="845" from-port="2" to-layer="915" to-port="1" />
<edge from-layer="845" from-port="2" to-layer="847" to-port="0" />
<edge from-layer="846" from-port="0" to-layer="847" to-port="1" />
<edge from-layer="847" from-port="2" to-layer="852" to-port="0" />
<edge from-layer="848" from-port="0" to-layer="852" to-port="1" />
<edge from-layer="849" from-port="0" to-layer="852" to-port="2" />
<edge from-layer="850" from-port="0" to-layer="852" to-port="3" />
<edge from-layer="851" from-port="0" to-layer="852" to-port="4" />
<edge from-layer="852" from-port="5" to-layer="889" to-port="0" />
<edge from-layer="852" from-port="5" to-layer="873" to-port="0" />
<edge from-layer="852" from-port="5" to-layer="857" to-port="0" />
<edge from-layer="853" from-port="0" to-layer="854" to-port="0" />
<edge from-layer="854" from-port="1" to-layer="856" to-port="0" />
<edge from-layer="855" from-port="0" to-layer="856" to-port="1" />
<edge from-layer="856" from-port="2" to-layer="857" to-port="1" />
<edge from-layer="857" from-port="2" to-layer="859" to-port="0" />
<edge from-layer="858" from-port="0" to-layer="859" to-port="1" />
<edge from-layer="859" from-port="2" to-layer="864" to-port="0" />
<edge from-layer="860" from-port="0" to-layer="864" to-port="1" />
<edge from-layer="861" from-port="0" to-layer="864" to-port="2" />
<edge from-layer="862" from-port="0" to-layer="864" to-port="3" />
<edge from-layer="863" from-port="0" to-layer="864" to-port="4" />
<edge from-layer="864" from-port="5" to-layer="866" to-port="0" />
<edge from-layer="865" from-port="0" to-layer="866" to-port="1" />
<edge from-layer="866" from-port="2" to-layer="868" to-port="0" />
<edge from-layer="867" from-port="0" to-layer="868" to-port="1" />
<edge from-layer="868" from-port="2" to-layer="896" to-port="0" />
<edge from-layer="869" from-port="0" to-layer="870" to-port="0" />
<edge from-layer="870" from-port="1" to-layer="872" to-port="0" />
<edge from-layer="871" from-port="0" to-layer="872" to-port="1" />
<edge from-layer="872" from-port="2" to-layer="873" to-port="1" />
<edge from-layer="873" from-port="2" to-layer="875" to-port="0" />
<edge from-layer="874" from-port="0" to-layer="875" to-port="1" />
<edge from-layer="875" from-port="2" to-layer="880" to-port="0" />
<edge from-layer="876" from-port="0" to-layer="880" to-port="1" />
<edge from-layer="877" from-port="0" to-layer="880" to-port="2" />
<edge from-layer="878" from-port="0" to-layer="880" to-port="3" />
<edge from-layer="879" from-port="0" to-layer="880" to-port="4" />
<edge from-layer="880" from-port="5" to-layer="882" to-port="0" />
<edge from-layer="881" from-port="0" to-layer="882" to-port="1" />
<edge from-layer="882" from-port="2" to-layer="884" to-port="0" />
<edge from-layer="883" from-port="0" to-layer="884" to-port="1" />
<edge from-layer="884" from-port="2" to-layer="896" to-port="1" />
<edge from-layer="885" from-port="0" to-layer="886" to-port="0" />
<edge from-layer="886" from-port="1" to-layer="888" to-port="0" />
<edge from-layer="887" from-port="0" to-layer="888" to-port="1" />
<edge from-layer="888" from-port="2" to-layer="889" to-port="1" />
<edge from-layer="889" from-port="2" to-layer="891" to-port="0" />
<edge from-layer="890" from-port="0" to-layer="891" to-port="1" />
<edge from-layer="891" from-port="2" to-layer="893" to-port="0" />
<edge from-layer="892" from-port="0" to-layer="893" to-port="1" />
<edge from-layer="893" from-port="2" to-layer="895" to-port="0" />
<edge from-layer="894" from-port="0" to-layer="895" to-port="1" />
<edge from-layer="895" from-port="2" to-layer="896" to-port="2" />
<edge from-layer="896" from-port="4" to-layer="898" to-port="0" />
<edge from-layer="897" from-port="0" to-layer="898" to-port="1" />
<edge from-layer="898" from-port="2" to-layer="900" to-port="0" />
<edge from-layer="899" from-port="0" to-layer="900" to-port="1" />
<edge from-layer="900" from-port="2" to-layer="902" to-port="0" />
<edge from-layer="901" from-port="0" to-layer="902" to-port="1" />
<edge from-layer="902" from-port="2" to-layer="907" to-port="0" />
<edge from-layer="903" from-port="0" to-layer="907" to-port="1" />
<edge from-layer="904" from-port="0" to-layer="907" to-port="2" />
<edge from-layer="905" from-port="0" to-layer="907" to-port="3" />
<edge from-layer="906" from-port="0" to-layer="907" to-port="4" />
<edge from-layer="907" from-port="5" to-layer="912" to-port="0" />
<edge from-layer="908" from-port="0" to-layer="909" to-port="0" />
<edge from-layer="909" from-port="1" to-layer="911" to-port="0" />
<edge from-layer="910" from-port="0" to-layer="911" to-port="1" />
<edge from-layer="911" from-port="2" to-layer="912" to-port="1" />
<edge from-layer="912" from-port="2" to-layer="914" to-port="0" />
<edge from-layer="913" from-port="0" to-layer="914" to-port="1" />
<edge from-layer="914" from-port="2" to-layer="915" to-port="0" />
<edge from-layer="915" from-port="2" to-layer="917" to-port="0" />
<edge from-layer="916" from-port="0" to-layer="917" to-port="1" />
<edge from-layer="917" from-port="2" to-layer="919" to-port="0" />
<edge from-layer="918" from-port="0" to-layer="919" to-port="1" />
<edge from-layer="919" from-port="2" to-layer="921" to-port="0" />
<edge from-layer="920" from-port="0" to-layer="921" to-port="1" />
<edge from-layer="921" from-port="2" to-layer="951" to-port="1" />
<edge from-layer="921" from-port="2" to-layer="923" to-port="0" />
<edge from-layer="922" from-port="0" to-layer="923" to-port="1" />
<edge from-layer="923" from-port="2" to-layer="928" to-port="0" />
<edge from-layer="924" from-port="0" to-layer="928" to-port="1" />
<edge from-layer="925" from-port="0" to-layer="928" to-port="2" />
<edge from-layer="926" from-port="0" to-layer="928" to-port="3" />
<edge from-layer="927" from-port="0" to-layer="928" to-port="4" />
<edge from-layer="928" from-port="5" to-layer="933" to-port="0" />
<edge from-layer="929" from-port="0" to-layer="930" to-port="0" />
<edge from-layer="930" from-port="1" to-layer="932" to-port="0" />
<edge from-layer="931" from-port="0" to-layer="932" to-port="1" />
<edge from-layer="932" from-port="2" to-layer="933" to-port="1" />
<edge from-layer="933" from-port="2" to-layer="935" to-port="0" />
<edge from-layer="934" from-port="0" to-layer="935" to-port="1" />
<edge from-layer="935" from-port="2" to-layer="936" to-port="0" />
<edge from-layer="936" from-port="1" to-layer="938" to-port="0" />
<edge from-layer="937" from-port="0" to-layer="938" to-port="1" />
<edge from-layer="938" from-port="2" to-layer="943" to-port="0" />
<edge from-layer="939" from-port="0" to-layer="943" to-port="1" />
<edge from-layer="940" from-port="0" to-layer="943" to-port="2" />
<edge from-layer="941" from-port="0" to-layer="943" to-port="3" />
<edge from-layer="942" from-port="0" to-layer="943" to-port="4" />
<edge from-layer="943" from-port="5" to-layer="948" to-port="0" />
<edge from-layer="944" from-port="0" to-layer="945" to-port="0" />
<edge from-layer="945" from-port="1" to-layer="947" to-port="0" />
<edge from-layer="946" from-port="0" to-layer="947" to-port="1" />
<edge from-layer="947" from-port="2" to-layer="948" to-port="1" />
<edge from-layer="948" from-port="2" to-layer="950" to-port="0" />
<edge from-layer="949" from-port="0" to-layer="950" to-port="1" />
<edge from-layer="950" from-port="2" to-layer="951" to-port="0" />
<edge from-layer="951" from-port="2" to-layer="953" to-port="0" />
<edge from-layer="952" from-port="0" to-layer="953" to-port="1" />
<edge from-layer="953" from-port="2" to-layer="955" to-port="0" />
<edge from-layer="954" from-port="0" to-layer="955" to-port="1" />
<edge from-layer="955" from-port="2" to-layer="957" to-port="0" />
<edge from-layer="956" from-port="0" to-layer="957" to-port="1" />
<edge from-layer="957" from-port="2" to-layer="1027" to-port="1" />
<edge from-layer="957" from-port="2" to-layer="959" to-port="0" />
<edge from-layer="958" from-port="0" to-layer="959" to-port="1" />
<edge from-layer="959" from-port="2" to-layer="964" to-port="0" />
<edge from-layer="960" from-port="0" to-layer="964" to-port="1" />
<edge from-layer="961" from-port="0" to-layer="964" to-port="2" />
<edge from-layer="962" from-port="0" to-layer="964" to-port="3" />
<edge from-layer="963" from-port="0" to-layer="964" to-port="4" />
<edge from-layer="964" from-port="5" to-layer="985" to-port="0" />
<edge from-layer="964" from-port="5" to-layer="1001" to-port="0" />
<edge from-layer="964" from-port="5" to-layer="969" to-port="0" />
<edge from-layer="965" from-port="0" to-layer="966" to-port="0" />
<edge from-layer="966" from-port="1" to-layer="968" to-port="0" />
<edge from-layer="967" from-port="0" to-layer="968" to-port="1" />
<edge from-layer="968" from-port="2" to-layer="969" to-port="1" />
<edge from-layer="969" from-port="2" to-layer="971" to-port="0" />
<edge from-layer="970" from-port="0" to-layer="971" to-port="1" />
<edge from-layer="971" from-port="2" to-layer="976" to-port="0" />
<edge from-layer="972" from-port="0" to-layer="976" to-port="1" />
<edge from-layer="973" from-port="0" to-layer="976" to-port="2" />
<edge from-layer="974" from-port="0" to-layer="976" to-port="3" />
<edge from-layer="975" from-port="0" to-layer="976" to-port="4" />
<edge from-layer="976" from-port="5" to-layer="978" to-port="0" />
<edge from-layer="977" from-port="0" to-layer="978" to-port="1" />
<edge from-layer="978" from-port="2" to-layer="980" to-port="0" />
<edge from-layer="979" from-port="0" to-layer="980" to-port="1" />
<edge from-layer="980" from-port="2" to-layer="1008" to-port="0" />
<edge from-layer="981" from-port="0" to-layer="982" to-port="0" />
<edge from-layer="982" from-port="1" to-layer="984" to-port="0" />
<edge from-layer="983" from-port="0" to-layer="984" to-port="1" />
<edge from-layer="984" from-port="2" to-layer="985" to-port="1" />
<edge from-layer="985" from-port="2" to-layer="987" to-port="0" />
<edge from-layer="986" from-port="0" to-layer="987" to-port="1" />
<edge from-layer="987" from-port="2" to-layer="992" to-port="0" />
<edge from-layer="988" from-port="0" to-layer="992" to-port="1" />
<edge from-layer="989" from-port="0" to-layer="992" to-port="2" />
<edge from-layer="990" from-port="0" to-layer="992" to-port="3" />
<edge from-layer="991" from-port="0" to-layer="992" to-port="4" />
<edge from-layer="992" from-port="5" to-layer="994" to-port="0" />
<edge from-layer="993" from-port="0" to-layer="994" to-port="1" />
<edge from-layer="994" from-port="2" to-layer="996" to-port="0" />
<edge from-layer="995" from-port="0" to-layer="996" to-port="1" />
<edge from-layer="996" from-port="2" to-layer="1008" to-port="1" />
<edge from-layer="997" from-port="0" to-layer="998" to-port="0" />
<edge from-layer="998" from-port="1" to-layer="1000" to-port="0" />
<edge from-layer="999" from-port="0" to-layer="1000" to-port="1" />
<edge from-layer="1000" from-port="2" to-layer="1001" to-port="1" />
<edge from-layer="1001" from-port="2" to-layer="1003" to-port="0" />
<edge from-layer="1002" from-port="0" to-layer="1003" to-port="1" />
<edge from-layer="1003" from-port="2" to-layer="1005" to-port="0" />
<edge from-layer="1004" from-port="0" to-layer="1005" to-port="1" />
<edge from-layer="1005" from-port="2" to-layer="1007" to-port="0" />
<edge from-layer="1006" from-port="0" to-layer="1007" to-port="1" />
<edge from-layer="1007" from-port="2" to-layer="1008" to-port="2" />
<edge from-layer="1008" from-port="4" to-layer="1010" to-port="0" />
<edge from-layer="1009" from-port="0" to-layer="1010" to-port="1" />
<edge from-layer="1010" from-port="2" to-layer="1012" to-port="0" />
<edge from-layer="1011" from-port="0" to-layer="1012" to-port="1" />
<edge from-layer="1012" from-port="2" to-layer="1014" to-port="0" />
<edge from-layer="1013" from-port="0" to-layer="1014" to-port="1" />
<edge from-layer="1014" from-port="2" to-layer="1019" to-port="0" />
<edge from-layer="1015" from-port="0" to-layer="1019" to-port="1" />
<edge from-layer="1016" from-port="0" to-layer="1019" to-port="2" />
<edge from-layer="1017" from-port="0" to-layer="1019" to-port="3" />
<edge from-layer="1018" from-port="0" to-layer="1019" to-port="4" />
<edge from-layer="1019" from-port="5" to-layer="1024" to-port="0" />
<edge from-layer="1020" from-port="0" to-layer="1021" to-port="0" />
<edge from-layer="1021" from-port="1" to-layer="1023" to-port="0" />
<edge from-layer="1022" from-port="0" to-layer="1023" to-port="1" />
<edge from-layer="1023" from-port="2" to-layer="1024" to-port="1" />
<edge from-layer="1024" from-port="2" to-layer="1026" to-port="0" />
<edge from-layer="1025" from-port="0" to-layer="1026" to-port="1" />
<edge from-layer="1026" from-port="2" to-layer="1027" to-port="0" />
<edge from-layer="1027" from-port="2" to-layer="1029" to-port="0" />
<edge from-layer="1028" from-port="0" to-layer="1029" to-port="1" />
<edge from-layer="1029" from-port="2" to-layer="1031" to-port="0" />
<edge from-layer="1030" from-port="0" to-layer="1031" to-port="1" />
<edge from-layer="1031" from-port="2" to-layer="1033" to-port="0" />
<edge from-layer="1032" from-port="0" to-layer="1033" to-port="1" />
<edge from-layer="1033" from-port="2" to-layer="1063" to-port="1" />
<edge from-layer="1033" from-port="2" to-layer="1035" to-port="0" />
<edge from-layer="1034" from-port="0" to-layer="1035" to-port="1" />
<edge from-layer="1035" from-port="2" to-layer="1040" to-port="0" />
<edge from-layer="1036" from-port="0" to-layer="1040" to-port="1" />
<edge from-layer="1037" from-port="0" to-layer="1040" to-port="2" />
<edge from-layer="1038" from-port="0" to-layer="1040" to-port="3" />
<edge from-layer="1039" from-port="0" to-layer="1040" to-port="4" />
<edge from-layer="1040" from-port="5" to-layer="1045" to-port="0" />
<edge from-layer="1041" from-port="0" to-layer="1042" to-port="0" />
<edge from-layer="1042" from-port="1" to-layer="1044" to-port="0" />
<edge from-layer="1043" from-port="0" to-layer="1044" to-port="1" />
<edge from-layer="1044" from-port="2" to-layer="1045" to-port="1" />
<edge from-layer="1045" from-port="2" to-layer="1047" to-port="0" />
<edge from-layer="1046" from-port="0" to-layer="1047" to-port="1" />
<edge from-layer="1047" from-port="2" to-layer="1048" to-port="0" />
<edge from-layer="1048" from-port="1" to-layer="1050" to-port="0" />
<edge from-layer="1049" from-port="0" to-layer="1050" to-port="1" />
<edge from-layer="1050" from-port="2" to-layer="1055" to-port="0" />
<edge from-layer="1051" from-port="0" to-layer="1055" to-port="1" />
<edge from-layer="1052" from-port="0" to-layer="1055" to-port="2" />
<edge from-layer="1053" from-port="0" to-layer="1055" to-port="3" />
<edge from-layer="1054" from-port="0" to-layer="1055" to-port="4" />
<edge from-layer="1055" from-port="5" to-layer="1060" to-port="0" />
<edge from-layer="1056" from-port="0" to-layer="1057" to-port="0" />
<edge from-layer="1057" from-port="1" to-layer="1059" to-port="0" />
<edge from-layer="1058" from-port="0" to-layer="1059" to-port="1" />
<edge from-layer="1059" from-port="2" to-layer="1060" to-port="1" />
<edge from-layer="1060" from-port="2" to-layer="1062" to-port="0" />
<edge from-layer="1061" from-port="0" to-layer="1062" to-port="1" />
<edge from-layer="1062" from-port="2" to-layer="1063" to-port="0" />
<edge from-layer="1063" from-port="2" to-layer="1065" to-port="0" />
<edge from-layer="1064" from-port="0" to-layer="1065" to-port="1" />
<edge from-layer="1065" from-port="2" to-layer="1067" to-port="0" />
<edge from-layer="1066" from-port="0" to-layer="1067" to-port="1" />
<edge from-layer="1067" from-port="2" to-layer="1069" to-port="0" />
<edge from-layer="1068" from-port="0" to-layer="1069" to-port="1" />
<edge from-layer="1069" from-port="2" to-layer="1071" to-port="0" />
<edge from-layer="1069" from-port="2" to-layer="1139" to-port="1" />
<edge from-layer="1070" from-port="0" to-layer="1071" to-port="1" />
<edge from-layer="1071" from-port="2" to-layer="1076" to-port="0" />
<edge from-layer="1072" from-port="0" to-layer="1076" to-port="1" />
<edge from-layer="1073" from-port="0" to-layer="1076" to-port="2" />
<edge from-layer="1074" from-port="0" to-layer="1076" to-port="3" />
<edge from-layer="1075" from-port="0" to-layer="1076" to-port="4" />
<edge from-layer="1076" from-port="5" to-layer="1097" to-port="0" />
<edge from-layer="1076" from-port="5" to-layer="1113" to-port="0" />
<edge from-layer="1076" from-port="5" to-layer="1081" to-port="0" />
<edge from-layer="1077" from-port="0" to-layer="1078" to-port="0" />
<edge from-layer="1078" from-port="1" to-layer="1080" to-port="0" />
<edge from-layer="1079" from-port="0" to-layer="1080" to-port="1" />
<edge from-layer="1080" from-port="2" to-layer="1081" to-port="1" />
<edge from-layer="1081" from-port="2" to-layer="1083" to-port="0" />
<edge from-layer="1082" from-port="0" to-layer="1083" to-port="1" />
<edge from-layer="1083" from-port="2" to-layer="1088" to-port="0" />
<edge from-layer="1084" from-port="0" to-layer="1088" to-port="1" />
<edge from-layer="1085" from-port="0" to-layer="1088" to-port="2" />
<edge from-layer="1086" from-port="0" to-layer="1088" to-port="3" />
<edge from-layer="1087" from-port="0" to-layer="1088" to-port="4" />
<edge from-layer="1088" from-port="5" to-layer="1090" to-port="0" />
<edge from-layer="1089" from-port="0" to-layer="1090" to-port="1" />
<edge from-layer="1090" from-port="2" to-layer="1092" to-port="0" />
<edge from-layer="1091" from-port="0" to-layer="1092" to-port="1" />
<edge from-layer="1092" from-port="2" to-layer="1120" to-port="0" />
<edge from-layer="1093" from-port="0" to-layer="1094" to-port="0" />
<edge from-layer="1094" from-port="1" to-layer="1096" to-port="0" />
<edge from-layer="1095" from-port="0" to-layer="1096" to-port="1" />
<edge from-layer="1096" from-port="2" to-layer="1097" to-port="1" />
<edge from-layer="1097" from-port="2" to-layer="1099" to-port="0" />
<edge from-layer="1098" from-port="0" to-layer="1099" to-port="1" />
<edge from-layer="1099" from-port="2" to-layer="1104" to-port="0" />
<edge from-layer="1100" from-port="0" to-layer="1104" to-port="1" />
<edge from-layer="1101" from-port="0" to-layer="1104" to-port="2" />
<edge from-layer="1102" from-port="0" to-layer="1104" to-port="3" />
<edge from-layer="1103" from-port="0" to-layer="1104" to-port="4" />
<edge from-layer="1104" from-port="5" to-layer="1106" to-port="0" />
<edge from-layer="1105" from-port="0" to-layer="1106" to-port="1" />
<edge from-layer="1106" from-port="2" to-layer="1108" to-port="0" />
<edge from-layer="1107" from-port="0" to-layer="1108" to-port="1" />
<edge from-layer="1108" from-port="2" to-layer="1120" to-port="1" />
<edge from-layer="1109" from-port="0" to-layer="1110" to-port="0" />
<edge from-layer="1110" from-port="1" to-layer="1112" to-port="0" />
<edge from-layer="1111" from-port="0" to-layer="1112" to-port="1" />
<edge from-layer="1112" from-port="2" to-layer="1113" to-port="1" />
<edge from-layer="1113" from-port="2" to-layer="1115" to-port="0" />
<edge from-layer="1114" from-port="0" to-layer="1115" to-port="1" />
<edge from-layer="1115" from-port="2" to-layer="1117" to-port="0" />
<edge from-layer="1116" from-port="0" to-layer="1117" to-port="1" />
<edge from-layer="1117" from-port="2" to-layer="1119" to-port="0" />
<edge from-layer="1118" from-port="0" to-layer="1119" to-port="1" />
<edge from-layer="1119" from-port="2" to-layer="1120" to-port="2" />
<edge from-layer="1120" from-port="4" to-layer="1122" to-port="0" />
<edge from-layer="1121" from-port="0" to-layer="1122" to-port="1" />
<edge from-layer="1122" from-port="2" to-layer="1124" to-port="0" />
<edge from-layer="1123" from-port="0" to-layer="1124" to-port="1" />
<edge from-layer="1124" from-port="2" to-layer="1126" to-port="0" />
<edge from-layer="1125" from-port="0" to-layer="1126" to-port="1" />
<edge from-layer="1126" from-port="2" to-layer="1131" to-port="0" />
<edge from-layer="1127" from-port="0" to-layer="1131" to-port="1" />
<edge from-layer="1128" from-port="0" to-layer="1131" to-port="2" />
<edge from-layer="1129" from-port="0" to-layer="1131" to-port="3" />
<edge from-layer="1130" from-port="0" to-layer="1131" to-port="4" />
<edge from-layer="1131" from-port="5" to-layer="1136" to-port="0" />
<edge from-layer="1132" from-port="0" to-layer="1133" to-port="0" />
<edge from-layer="1133" from-port="1" to-layer="1135" to-port="0" />
<edge from-layer="1134" from-port="0" to-layer="1135" to-port="1" />
<edge from-layer="1135" from-port="2" to-layer="1136" to-port="1" />
<edge from-layer="1136" from-port="2" to-layer="1138" to-port="0" />
<edge from-layer="1137" from-port="0" to-layer="1138" to-port="1" />
<edge from-layer="1138" from-port="2" to-layer="1139" to-port="0" />
<edge from-layer="1139" from-port="2" to-layer="1141" to-port="0" />
<edge from-layer="1140" from-port="0" to-layer="1141" to-port="1" />
<edge from-layer="1141" from-port="2" to-layer="1143" to-port="0" />
<edge from-layer="1142" from-port="0" to-layer="1143" to-port="1" />
<edge from-layer="1143" from-port="2" to-layer="1145" to-port="0" />
<edge from-layer="1144" from-port="0" to-layer="1145" to-port="1" />
<edge from-layer="1145" from-port="2" to-layer="1147" to-port="0" />
<edge from-layer="1145" from-port="2" to-layer="1175" to-port="1" />
<edge from-layer="1146" from-port="0" to-layer="1147" to-port="1" />
<edge from-layer="1147" from-port="2" to-layer="1152" to-port="0" />
<edge from-layer="1148" from-port="0" to-layer="1152" to-port="1" />
<edge from-layer="1149" from-port="0" to-layer="1152" to-port="2" />
<edge from-layer="1150" from-port="0" to-layer="1152" to-port="3" />
<edge from-layer="1151" from-port="0" to-layer="1152" to-port="4" />
<edge from-layer="1152" from-port="5" to-layer="1157" to-port="0" />
<edge from-layer="1153" from-port="0" to-layer="1154" to-port="0" />
<edge from-layer="1154" from-port="1" to-layer="1156" to-port="0" />
<edge from-layer="1155" from-port="0" to-layer="1156" to-port="1" />
<edge from-layer="1156" from-port="2" to-layer="1157" to-port="1" />
<edge from-layer="1157" from-port="2" to-layer="1159" to-port="0" />
<edge from-layer="1158" from-port="0" to-layer="1159" to-port="1" />
<edge from-layer="1159" from-port="2" to-layer="1160" to-port="0" />
<edge from-layer="1160" from-port="1" to-layer="1162" to-port="0" />
<edge from-layer="1161" from-port="0" to-layer="1162" to-port="1" />
<edge from-layer="1162" from-port="2" to-layer="1167" to-port="0" />
<edge from-layer="1163" from-port="0" to-layer="1167" to-port="1" />
<edge from-layer="1164" from-port="0" to-layer="1167" to-port="2" />
<edge from-layer="1165" from-port="0" to-layer="1167" to-port="3" />
<edge from-layer="1166" from-port="0" to-layer="1167" to-port="4" />
<edge from-layer="1167" from-port="5" to-layer="1172" to-port="0" />
<edge from-layer="1168" from-port="0" to-layer="1169" to-port="0" />
<edge from-layer="1169" from-port="1" to-layer="1171" to-port="0" />
<edge from-layer="1170" from-port="0" to-layer="1171" to-port="1" />
<edge from-layer="1171" from-port="2" to-layer="1172" to-port="1" />
<edge from-layer="1172" from-port="2" to-layer="1174" to-port="0" />
<edge from-layer="1173" from-port="0" to-layer="1174" to-port="1" />
<edge from-layer="1174" from-port="2" to-layer="1175" to-port="0" />
<edge from-layer="1175" from-port="2" to-layer="1177" to-port="0" />
<edge from-layer="1176" from-port="0" to-layer="1177" to-port="1" />
<edge from-layer="1177" from-port="2" to-layer="1179" to-port="0" />
<edge from-layer="1178" from-port="0" to-layer="1179" to-port="1" />
<edge from-layer="1179" from-port="2" to-layer="1181" to-port="0" />
<edge from-layer="1180" from-port="0" to-layer="1181" to-port="1" />
<edge from-layer="1181" from-port="2" to-layer="1251" to-port="1" />
<edge from-layer="1181" from-port="2" to-layer="1183" to-port="0" />
<edge from-layer="1182" from-port="0" to-layer="1183" to-port="1" />
<edge from-layer="1183" from-port="2" to-layer="1188" to-port="0" />
<edge from-layer="1184" from-port="0" to-layer="1188" to-port="1" />
<edge from-layer="1185" from-port="0" to-layer="1188" to-port="2" />
<edge from-layer="1186" from-port="0" to-layer="1188" to-port="3" />
<edge from-layer="1187" from-port="0" to-layer="1188" to-port="4" />
<edge from-layer="1188" from-port="5" to-layer="1225" to-port="0" />
<edge from-layer="1188" from-port="5" to-layer="1193" to-port="0" />
<edge from-layer="1188" from-port="5" to-layer="1209" to-port="0" />
<edge from-layer="1189" from-port="0" to-layer="1190" to-port="0" />
<edge from-layer="1190" from-port="1" to-layer="1192" to-port="0" />
<edge from-layer="1191" from-port="0" to-layer="1192" to-port="1" />
<edge from-layer="1192" from-port="2" to-layer="1193" to-port="1" />
<edge from-layer="1193" from-port="2" to-layer="1195" to-port="0" />
<edge from-layer="1194" from-port="0" to-layer="1195" to-port="1" />
<edge from-layer="1195" from-port="2" to-layer="1200" to-port="0" />
<edge from-layer="1196" from-port="0" to-layer="1200" to-port="1" />
<edge from-layer="1197" from-port="0" to-layer="1200" to-port="2" />
<edge from-layer="1198" from-port="0" to-layer="1200" to-port="3" />
<edge from-layer="1199" from-port="0" to-layer="1200" to-port="4" />
<edge from-layer="1200" from-port="5" to-layer="1202" to-port="0" />
<edge from-layer="1201" from-port="0" to-layer="1202" to-port="1" />
<edge from-layer="1202" from-port="2" to-layer="1204" to-port="0" />
<edge from-layer="1203" from-port="0" to-layer="1204" to-port="1" />
<edge from-layer="1204" from-port="2" to-layer="1232" to-port="0" />
<edge from-layer="1205" from-port="0" to-layer="1206" to-port="0" />
<edge from-layer="1206" from-port="1" to-layer="1208" to-port="0" />
<edge from-layer="1207" from-port="0" to-layer="1208" to-port="1" />
<edge from-layer="1208" from-port="2" to-layer="1209" to-port="1" />
<edge from-layer="1209" from-port="2" to-layer="1211" to-port="0" />
<edge from-layer="1210" from-port="0" to-layer="1211" to-port="1" />
<edge from-layer="1211" from-port="2" to-layer="1216" to-port="0" />
<edge from-layer="1212" from-port="0" to-layer="1216" to-port="1" />
<edge from-layer="1213" from-port="0" to-layer="1216" to-port="2" />
<edge from-layer="1214" from-port="0" to-layer="1216" to-port="3" />
<edge from-layer="1215" from-port="0" to-layer="1216" to-port="4" />
<edge from-layer="1216" from-port="5" to-layer="1218" to-port="0" />
<edge from-layer="1217" from-port="0" to-layer="1218" to-port="1" />
<edge from-layer="1218" from-port="2" to-layer="1220" to-port="0" />
<edge from-layer="1219" from-port="0" to-layer="1220" to-port="1" />
<edge from-layer="1220" from-port="2" to-layer="1232" to-port="1" />
<edge from-layer="1221" from-port="0" to-layer="1222" to-port="0" />
<edge from-layer="1222" from-port="1" to-layer="1224" to-port="0" />
<edge from-layer="1223" from-port="0" to-layer="1224" to-port="1" />
<edge from-layer="1224" from-port="2" to-layer="1225" to-port="1" />
<edge from-layer="1225" from-port="2" to-layer="1227" to-port="0" />
<edge from-layer="1226" from-port="0" to-layer="1227" to-port="1" />
<edge from-layer="1227" from-port="2" to-layer="1229" to-port="0" />
<edge from-layer="1228" from-port="0" to-layer="1229" to-port="1" />
<edge from-layer="1229" from-port="2" to-layer="1231" to-port="0" />
<edge from-layer="1230" from-port="0" to-layer="1231" to-port="1" />
<edge from-layer="1231" from-port="2" to-layer="1232" to-port="2" />
<edge from-layer="1232" from-port="4" to-layer="1234" to-port="0" />
<edge from-layer="1233" from-port="0" to-layer="1234" to-port="1" />
<edge from-layer="1234" from-port="2" to-layer="1236" to-port="0" />
<edge from-layer="1235" from-port="0" to-layer="1236" to-port="1" />
<edge from-layer="1236" from-port="2" to-layer="1238" to-port="0" />
<edge from-layer="1237" from-port="0" to-layer="1238" to-port="1" />
<edge from-layer="1238" from-port="2" to-layer="1243" to-port="0" />
<edge from-layer="1239" from-port="0" to-layer="1243" to-port="1" />
<edge from-layer="1240" from-port="0" to-layer="1243" to-port="2" />
<edge from-layer="1241" from-port="0" to-layer="1243" to-port="3" />
<edge from-layer="1242" from-port="0" to-layer="1243" to-port="4" />
<edge from-layer="1243" from-port="5" to-layer="1248" to-port="0" />
<edge from-layer="1244" from-port="0" to-layer="1245" to-port="0" />
<edge from-layer="1245" from-port="1" to-layer="1247" to-port="0" />
<edge from-layer="1246" from-port="0" to-layer="1247" to-port="1" />
<edge from-layer="1247" from-port="2" to-layer="1248" to-port="1" />
<edge from-layer="1248" from-port="2" to-layer="1250" to-port="0" />
<edge from-layer="1249" from-port="0" to-layer="1250" to-port="1" />
<edge from-layer="1250" from-port="2" to-layer="1251" to-port="0" />
<edge from-layer="1251" from-port="2" to-layer="1253" to-port="0" />
<edge from-layer="1252" from-port="0" to-layer="1253" to-port="1" />
<edge from-layer="1253" from-port="2" to-layer="1255" to-port="0" />
<edge from-layer="1254" from-port="0" to-layer="1255" to-port="1" />
<edge from-layer="1255" from-port="2" to-layer="1257" to-port="0" />
<edge from-layer="1256" from-port="0" to-layer="1257" to-port="1" />
<edge from-layer="1257" from-port="2" to-layer="1259" to-port="0" />
<edge from-layer="1257" from-port="2" to-layer="1287" to-port="1" />
<edge from-layer="1258" from-port="0" to-layer="1259" to-port="1" />
<edge from-layer="1259" from-port="2" to-layer="1264" to-port="0" />
<edge from-layer="1260" from-port="0" to-layer="1264" to-port="1" />
<edge from-layer="1261" from-port="0" to-layer="1264" to-port="2" />
<edge from-layer="1262" from-port="0" to-layer="1264" to-port="3" />
<edge from-layer="1263" from-port="0" to-layer="1264" to-port="4" />
<edge from-layer="1264" from-port="5" to-layer="1269" to-port="0" />
<edge from-layer="1265" from-port="0" to-layer="1266" to-port="0" />
<edge from-layer="1266" from-port="1" to-layer="1268" to-port="0" />
<edge from-layer="1267" from-port="0" to-layer="1268" to-port="1" />
<edge from-layer="1268" from-port="2" to-layer="1269" to-port="1" />
<edge from-layer="1269" from-port="2" to-layer="1271" to-port="0" />
<edge from-layer="1270" from-port="0" to-layer="1271" to-port="1" />
<edge from-layer="1271" from-port="2" to-layer="1272" to-port="0" />
<edge from-layer="1272" from-port="1" to-layer="1274" to-port="0" />
<edge from-layer="1273" from-port="0" to-layer="1274" to-port="1" />
<edge from-layer="1274" from-port="2" to-layer="1279" to-port="0" />
<edge from-layer="1275" from-port="0" to-layer="1279" to-port="1" />
<edge from-layer="1276" from-port="0" to-layer="1279" to-port="2" />
<edge from-layer="1277" from-port="0" to-layer="1279" to-port="3" />
<edge from-layer="1278" from-port="0" to-layer="1279" to-port="4" />
<edge from-layer="1279" from-port="5" to-layer="1284" to-port="0" />
<edge from-layer="1280" from-port="0" to-layer="1281" to-port="0" />
<edge from-layer="1281" from-port="1" to-layer="1283" to-port="0" />
<edge from-layer="1282" from-port="0" to-layer="1283" to-port="1" />
<edge from-layer="1283" from-port="2" to-layer="1284" to-port="1" />
<edge from-layer="1284" from-port="2" to-layer="1286" to-port="0" />
<edge from-layer="1285" from-port="0" to-layer="1286" to-port="1" />
<edge from-layer="1286" from-port="2" to-layer="1287" to-port="0" />
<edge from-layer="1287" from-port="2" to-layer="1289" to-port="0" />
<edge from-layer="1288" from-port="0" to-layer="1289" to-port="1" />
<edge from-layer="1289" from-port="2" to-layer="1291" to-port="0" />
<edge from-layer="1290" from-port="0" to-layer="1291" to-port="1" />
<edge from-layer="1291" from-port="2" to-layer="1293" to-port="0" />
<edge from-layer="1292" from-port="0" to-layer="1293" to-port="1" />
<edge from-layer="1293" from-port="2" to-layer="1363" to-port="1" />
<edge from-layer="1293" from-port="2" to-layer="1295" to-port="0" />
<edge from-layer="1294" from-port="0" to-layer="1295" to-port="1" />
<edge from-layer="1295" from-port="2" to-layer="1300" to-port="0" />
<edge from-layer="1296" from-port="0" to-layer="1300" to-port="1" />
<edge from-layer="1297" from-port="0" to-layer="1300" to-port="2" />
<edge from-layer="1298" from-port="0" to-layer="1300" to-port="3" />
<edge from-layer="1299" from-port="0" to-layer="1300" to-port="4" />
<edge from-layer="1300" from-port="5" to-layer="1305" to-port="0" />
<edge from-layer="1300" from-port="5" to-layer="1337" to-port="0" />
<edge from-layer="1300" from-port="5" to-layer="1321" to-port="0" />
<edge from-layer="1301" from-port="0" to-layer="1302" to-port="0" />
<edge from-layer="1302" from-port="1" to-layer="1304" to-port="0" />
<edge from-layer="1303" from-port="0" to-layer="1304" to-port="1" />
<edge from-layer="1304" from-port="2" to-layer="1305" to-port="1" />
<edge from-layer="1305" from-port="2" to-layer="1307" to-port="0" />
<edge from-layer="1306" from-port="0" to-layer="1307" to-port="1" />
<edge from-layer="1307" from-port="2" to-layer="1312" to-port="0" />
<edge from-layer="1308" from-port="0" to-layer="1312" to-port="1" />
<edge from-layer="1309" from-port="0" to-layer="1312" to-port="2" />
<edge from-layer="1310" from-port="0" to-layer="1312" to-port="3" />
<edge from-layer="1311" from-port="0" to-layer="1312" to-port="4" />
<edge from-layer="1312" from-port="5" to-layer="1314" to-port="0" />
<edge from-layer="1313" from-port="0" to-layer="1314" to-port="1" />
<edge from-layer="1314" from-port="2" to-layer="1316" to-port="0" />
<edge from-layer="1315" from-port="0" to-layer="1316" to-port="1" />
<edge from-layer="1316" from-port="2" to-layer="1344" to-port="0" />
<edge from-layer="1317" from-port="0" to-layer="1318" to-port="0" />
<edge from-layer="1318" from-port="1" to-layer="1320" to-port="0" />
<edge from-layer="1319" from-port="0" to-layer="1320" to-port="1" />
<edge from-layer="1320" from-port="2" to-layer="1321" to-port="1" />
<edge from-layer="1321" from-port="2" to-layer="1323" to-port="0" />
<edge from-layer="1322" from-port="0" to-layer="1323" to-port="1" />
<edge from-layer="1323" from-port="2" to-layer="1328" to-port="0" />
<edge from-layer="1324" from-port="0" to-layer="1328" to-port="1" />
<edge from-layer="1325" from-port="0" to-layer="1328" to-port="2" />
<edge from-layer="1326" from-port="0" to-layer="1328" to-port="3" />
<edge from-layer="1327" from-port="0" to-layer="1328" to-port="4" />
<edge from-layer="1328" from-port="5" to-layer="1330" to-port="0" />
<edge from-layer="1329" from-port="0" to-layer="1330" to-port="1" />
<edge from-layer="1330" from-port="2" to-layer="1332" to-port="0" />
<edge from-layer="1331" from-port="0" to-layer="1332" to-port="1" />
<edge from-layer="1332" from-port="2" to-layer="1344" to-port="1" />
<edge from-layer="1333" from-port="0" to-layer="1334" to-port="0" />
<edge from-layer="1334" from-port="1" to-layer="1336" to-port="0" />
<edge from-layer="1335" from-port="0" to-layer="1336" to-port="1" />
<edge from-layer="1336" from-port="2" to-layer="1337" to-port="1" />
<edge from-layer="1337" from-port="2" to-layer="1339" to-port="0" />
<edge from-layer="1338" from-port="0" to-layer="1339" to-port="1" />
<edge from-layer="1339" from-port="2" to-layer="1341" to-port="0" />
<edge from-layer="1340" from-port="0" to-layer="1341" to-port="1" />
<edge from-layer="1341" from-port="2" to-layer="1343" to-port="0" />
<edge from-layer="1342" from-port="0" to-layer="1343" to-port="1" />
<edge from-layer="1343" from-port="2" to-layer="1344" to-port="2" />
<edge from-layer="1344" from-port="4" to-layer="1346" to-port="0" />
<edge from-layer="1345" from-port="0" to-layer="1346" to-port="1" />
<edge from-layer="1346" from-port="2" to-layer="1348" to-port="0" />
<edge from-layer="1347" from-port="0" to-layer="1348" to-port="1" />
<edge from-layer="1348" from-port="2" to-layer="1350" to-port="0" />
<edge from-layer="1349" from-port="0" to-layer="1350" to-port="1" />
<edge from-layer="1350" from-port="2" to-layer="1355" to-port="0" />
<edge from-layer="1351" from-port="0" to-layer="1355" to-port="1" />
<edge from-layer="1352" from-port="0" to-layer="1355" to-port="2" />
<edge from-layer="1353" from-port="0" to-layer="1355" to-port="3" />
<edge from-layer="1354" from-port="0" to-layer="1355" to-port="4" />
<edge from-layer="1355" from-port="5" to-layer="1360" to-port="0" />
<edge from-layer="1356" from-port="0" to-layer="1357" to-port="0" />
<edge from-layer="1357" from-port="1" to-layer="1359" to-port="0" />
<edge from-layer="1358" from-port="0" to-layer="1359" to-port="1" />
<edge from-layer="1359" from-port="2" to-layer="1360" to-port="1" />
<edge from-layer="1360" from-port="2" to-layer="1362" to-port="0" />
<edge from-layer="1361" from-port="0" to-layer="1362" to-port="1" />
<edge from-layer="1362" from-port="2" to-layer="1363" to-port="0" />
<edge from-layer="1363" from-port="2" to-layer="1365" to-port="0" />
<edge from-layer="1364" from-port="0" to-layer="1365" to-port="1" />
<edge from-layer="1365" from-port="2" to-layer="1367" to-port="0" />
<edge from-layer="1366" from-port="0" to-layer="1367" to-port="1" />
<edge from-layer="1367" from-port="2" to-layer="1369" to-port="0" />
<edge from-layer="1368" from-port="0" to-layer="1369" to-port="1" />
<edge from-layer="1369" from-port="2" to-layer="1371" to-port="0" />
<edge from-layer="1369" from-port="2" to-layer="1399" to-port="1" />
<edge from-layer="1370" from-port="0" to-layer="1371" to-port="1" />
<edge from-layer="1371" from-port="2" to-layer="1376" to-port="0" />
<edge from-layer="1372" from-port="0" to-layer="1376" to-port="1" />
<edge from-layer="1373" from-port="0" to-layer="1376" to-port="2" />
<edge from-layer="1374" from-port="0" to-layer="1376" to-port="3" />
<edge from-layer="1375" from-port="0" to-layer="1376" to-port="4" />
<edge from-layer="1376" from-port="5" to-layer="1381" to-port="0" />
<edge from-layer="1377" from-port="0" to-layer="1378" to-port="0" />
<edge from-layer="1378" from-port="1" to-layer="1380" to-port="0" />
<edge from-layer="1379" from-port="0" to-layer="1380" to-port="1" />
<edge from-layer="1380" from-port="2" to-layer="1381" to-port="1" />
<edge from-layer="1381" from-port="2" to-layer="1383" to-port="0" />
<edge from-layer="1382" from-port="0" to-layer="1383" to-port="1" />
<edge from-layer="1383" from-port="2" to-layer="1384" to-port="0" />
<edge from-layer="1384" from-port="1" to-layer="1386" to-port="0" />
<edge from-layer="1385" from-port="0" to-layer="1386" to-port="1" />
<edge from-layer="1386" from-port="2" to-layer="1391" to-port="0" />
<edge from-layer="1387" from-port="0" to-layer="1391" to-port="1" />
<edge from-layer="1388" from-port="0" to-layer="1391" to-port="2" />
<edge from-layer="1389" from-port="0" to-layer="1391" to-port="3" />
<edge from-layer="1390" from-port="0" to-layer="1391" to-port="4" />
<edge from-layer="1391" from-port="5" to-layer="1396" to-port="0" />
<edge from-layer="1392" from-port="0" to-layer="1393" to-port="0" />
<edge from-layer="1393" from-port="1" to-layer="1395" to-port="0" />
<edge from-layer="1394" from-port="0" to-layer="1395" to-port="1" />
<edge from-layer="1395" from-port="2" to-layer="1396" to-port="1" />
<edge from-layer="1396" from-port="2" to-layer="1398" to-port="0" />
<edge from-layer="1397" from-port="0" to-layer="1398" to-port="1" />
<edge from-layer="1398" from-port="2" to-layer="1399" to-port="0" />
<edge from-layer="1399" from-port="2" to-layer="1401" to-port="0" />
<edge from-layer="1400" from-port="0" to-layer="1401" to-port="1" />
<edge from-layer="1401" from-port="2" to-layer="1403" to-port="0" />
<edge from-layer="1402" from-port="0" to-layer="1403" to-port="1" />
<edge from-layer="1403" from-port="2" to-layer="1405" to-port="0" />
<edge from-layer="1404" from-port="0" to-layer="1405" to-port="1" />
<edge from-layer="1405" from-port="2" to-layer="1407" to-port="0" />
<edge from-layer="1405" from-port="2" to-layer="1475" to-port="1" />
<edge from-layer="1406" from-port="0" to-layer="1407" to-port="1" />
<edge from-layer="1407" from-port="2" to-layer="1412" to-port="0" />
<edge from-layer="1408" from-port="0" to-layer="1412" to-port="1" />
<edge from-layer="1409" from-port="0" to-layer="1412" to-port="2" />
<edge from-layer="1410" from-port="0" to-layer="1412" to-port="3" />
<edge from-layer="1411" from-port="0" to-layer="1412" to-port="4" />
<edge from-layer="1412" from-port="5" to-layer="1449" to-port="0" />
<edge from-layer="1412" from-port="5" to-layer="1433" to-port="0" />
<edge from-layer="1412" from-port="5" to-layer="1417" to-port="0" />
<edge from-layer="1413" from-port="0" to-layer="1414" to-port="0" />
<edge from-layer="1414" from-port="1" to-layer="1416" to-port="0" />
<edge from-layer="1415" from-port="0" to-layer="1416" to-port="1" />
<edge from-layer="1416" from-port="2" to-layer="1417" to-port="1" />
<edge from-layer="1417" from-port="2" to-layer="1419" to-port="0" />
<edge from-layer="1418" from-port="0" to-layer="1419" to-port="1" />
<edge from-layer="1419" from-port="2" to-layer="1424" to-port="0" />
<edge from-layer="1420" from-port="0" to-layer="1424" to-port="1" />
<edge from-layer="1421" from-port="0" to-layer="1424" to-port="2" />
<edge from-layer="1422" from-port="0" to-layer="1424" to-port="3" />
<edge from-layer="1423" from-port="0" to-layer="1424" to-port="4" />
<edge from-layer="1424" from-port="5" to-layer="1426" to-port="0" />
<edge from-layer="1425" from-port="0" to-layer="1426" to-port="1" />
<edge from-layer="1426" from-port="2" to-layer="1428" to-port="0" />
<edge from-layer="1427" from-port="0" to-layer="1428" to-port="1" />
<edge from-layer="1428" from-port="2" to-layer="1456" to-port="0" />
<edge from-layer="1429" from-port="0" to-layer="1430" to-port="0" />
<edge from-layer="1430" from-port="1" to-layer="1432" to-port="0" />
<edge from-layer="1431" from-port="0" to-layer="1432" to-port="1" />
<edge from-layer="1432" from-port="2" to-layer="1433" to-port="1" />
<edge from-layer="1433" from-port="2" to-layer="1435" to-port="0" />
<edge from-layer="1434" from-port="0" to-layer="1435" to-port="1" />
<edge from-layer="1435" from-port="2" to-layer="1440" to-port="0" />
<edge from-layer="1436" from-port="0" to-layer="1440" to-port="1" />
<edge from-layer="1437" from-port="0" to-layer="1440" to-port="2" />
<edge from-layer="1438" from-port="0" to-layer="1440" to-port="3" />
<edge from-layer="1439" from-port="0" to-layer="1440" to-port="4" />
<edge from-layer="1440" from-port="5" to-layer="1442" to-port="0" />
<edge from-layer="1441" from-port="0" to-layer="1442" to-port="1" />
<edge from-layer="1442" from-port="2" to-layer="1444" to-port="0" />
<edge from-layer="1443" from-port="0" to-layer="1444" to-port="1" />
<edge from-layer="1444" from-port="2" to-layer="1456" to-port="1" />
<edge from-layer="1445" from-port="0" to-layer="1446" to-port="0" />
<edge from-layer="1446" from-port="1" to-layer="1448" to-port="0" />
<edge from-layer="1447" from-port="0" to-layer="1448" to-port="1" />
<edge from-layer="1448" from-port="2" to-layer="1449" to-port="1" />
<edge from-layer="1449" from-port="2" to-layer="1451" to-port="0" />
<edge from-layer="1450" from-port="0" to-layer="1451" to-port="1" />
<edge from-layer="1451" from-port="2" to-layer="1453" to-port="0" />
<edge from-layer="1452" from-port="0" to-layer="1453" to-port="1" />
<edge from-layer="1453" from-port="2" to-layer="1455" to-port="0" />
<edge from-layer="1454" from-port="0" to-layer="1455" to-port="1" />
<edge from-layer="1455" from-port="2" to-layer="1456" to-port="2" />
<edge from-layer="1456" from-port="4" to-layer="1458" to-port="0" />
<edge from-layer="1457" from-port="0" to-layer="1458" to-port="1" />
<edge from-layer="1458" from-port="2" to-layer="1460" to-port="0" />
<edge from-layer="1459" from-port="0" to-layer="1460" to-port="1" />
<edge from-layer="1460" from-port="2" to-layer="1462" to-port="0" />
<edge from-layer="1461" from-port="0" to-layer="1462" to-port="1" />
<edge from-layer="1462" from-port="2" to-layer="1467" to-port="0" />
<edge from-layer="1463" from-port="0" to-layer="1467" to-port="1" />
<edge from-layer="1464" from-port="0" to-layer="1467" to-port="2" />
<edge from-layer="1465" from-port="0" to-layer="1467" to-port="3" />
<edge from-layer="1466" from-port="0" to-layer="1467" to-port="4" />
<edge from-layer="1467" from-port="5" to-layer="1472" to-port="0" />
<edge from-layer="1468" from-port="0" to-layer="1469" to-port="0" />
<edge from-layer="1469" from-port="1" to-layer="1471" to-port="0" />
<edge from-layer="1470" from-port="0" to-layer="1471" to-port="1" />
<edge from-layer="1471" from-port="2" to-layer="1472" to-port="1" />
<edge from-layer="1472" from-port="2" to-layer="1474" to-port="0" />
<edge from-layer="1473" from-port="0" to-layer="1474" to-port="1" />
<edge from-layer="1474" from-port="2" to-layer="1475" to-port="0" />
<edge from-layer="1475" from-port="2" to-layer="1477" to-port="0" />
<edge from-layer="1476" from-port="0" to-layer="1477" to-port="1" />
<edge from-layer="1477" from-port="2" to-layer="1479" to-port="0" />
<edge from-layer="1478" from-port="0" to-layer="1479" to-port="1" />
<edge from-layer="1479" from-port="2" to-layer="1481" to-port="0" />
<edge from-layer="1480" from-port="0" to-layer="1481" to-port="1" />
<edge from-layer="1481" from-port="2" to-layer="1483" to-port="0" />
<edge from-layer="1481" from-port="2" to-layer="1511" to-port="1" />
<edge from-layer="1482" from-port="0" to-layer="1483" to-port="1" />
<edge from-layer="1483" from-port="2" to-layer="1488" to-port="0" />
<edge from-layer="1484" from-port="0" to-layer="1488" to-port="1" />
<edge from-layer="1485" from-port="0" to-layer="1488" to-port="2" />
<edge from-layer="1486" from-port="0" to-layer="1488" to-port="3" />
<edge from-layer="1487" from-port="0" to-layer="1488" to-port="4" />
<edge from-layer="1488" from-port="5" to-layer="1493" to-port="0" />
<edge from-layer="1489" from-port="0" to-layer="1490" to-port="0" />
<edge from-layer="1490" from-port="1" to-layer="1492" to-port="0" />
<edge from-layer="1491" from-port="0" to-layer="1492" to-port="1" />
<edge from-layer="1492" from-port="2" to-layer="1493" to-port="1" />
<edge from-layer="1493" from-port="2" to-layer="1495" to-port="0" />
<edge from-layer="1494" from-port="0" to-layer="1495" to-port="1" />
<edge from-layer="1495" from-port="2" to-layer="1496" to-port="0" />
<edge from-layer="1496" from-port="1" to-layer="1498" to-port="0" />
<edge from-layer="1497" from-port="0" to-layer="1498" to-port="1" />
<edge from-layer="1498" from-port="2" to-layer="1503" to-port="0" />
<edge from-layer="1499" from-port="0" to-layer="1503" to-port="1" />
<edge from-layer="1500" from-port="0" to-layer="1503" to-port="2" />
<edge from-layer="1501" from-port="0" to-layer="1503" to-port="3" />
<edge from-layer="1502" from-port="0" to-layer="1503" to-port="4" />
<edge from-layer="1503" from-port="5" to-layer="1508" to-port="0" />
<edge from-layer="1504" from-port="0" to-layer="1505" to-port="0" />
<edge from-layer="1505" from-port="1" to-layer="1507" to-port="0" />
<edge from-layer="1506" from-port="0" to-layer="1507" to-port="1" />
<edge from-layer="1507" from-port="2" to-layer="1508" to-port="1" />
<edge from-layer="1508" from-port="2" to-layer="1510" to-port="0" />
<edge from-layer="1509" from-port="0" to-layer="1510" to-port="1" />
<edge from-layer="1510" from-port="2" to-layer="1511" to-port="0" />
<edge from-layer="1511" from-port="2" to-layer="1513" to-port="0" />
<edge from-layer="1512" from-port="0" to-layer="1513" to-port="1" />
<edge from-layer="1513" from-port="2" to-layer="1515" to-port="0" />
<edge from-layer="1514" from-port="0" to-layer="1515" to-port="1" />
<edge from-layer="1515" from-port="2" to-layer="1517" to-port="0" />
<edge from-layer="1516" from-port="0" to-layer="1517" to-port="1" />
<edge from-layer="1517" from-port="2" to-layer="1587" to-port="1" />
<edge from-layer="1517" from-port="2" to-layer="1519" to-port="0" />
<edge from-layer="1518" from-port="0" to-layer="1519" to-port="1" />
<edge from-layer="1519" from-port="2" to-layer="1524" to-port="0" />
<edge from-layer="1520" from-port="0" to-layer="1524" to-port="1" />
<edge from-layer="1521" from-port="0" to-layer="1524" to-port="2" />
<edge from-layer="1522" from-port="0" to-layer="1524" to-port="3" />
<edge from-layer="1523" from-port="0" to-layer="1524" to-port="4" />
<edge from-layer="1524" from-port="5" to-layer="1529" to-port="0" />
<edge from-layer="1524" from-port="5" to-layer="1561" to-port="0" />
<edge from-layer="1524" from-port="5" to-layer="1545" to-port="0" />
<edge from-layer="1525" from-port="0" to-layer="1526" to-port="0" />
<edge from-layer="1526" from-port="1" to-layer="1528" to-port="0" />
<edge from-layer="1527" from-port="0" to-layer="1528" to-port="1" />
<edge from-layer="1528" from-port="2" to-layer="1529" to-port="1" />
<edge from-layer="1529" from-port="2" to-layer="1531" to-port="0" />
<edge from-layer="1530" from-port="0" to-layer="1531" to-port="1" />
<edge from-layer="1531" from-port="2" to-layer="1536" to-port="0" />
<edge from-layer="1532" from-port="0" to-layer="1536" to-port="1" />
<edge from-layer="1533" from-port="0" to-layer="1536" to-port="2" />
<edge from-layer="1534" from-port="0" to-layer="1536" to-port="3" />
<edge from-layer="1535" from-port="0" to-layer="1536" to-port="4" />
<edge from-layer="1536" from-port="5" to-layer="1538" to-port="0" />
<edge from-layer="1537" from-port="0" to-layer="1538" to-port="1" />
<edge from-layer="1538" from-port="2" to-layer="1540" to-port="0" />
<edge from-layer="1539" from-port="0" to-layer="1540" to-port="1" />
<edge from-layer="1540" from-port="2" to-layer="1568" to-port="0" />
<edge from-layer="1541" from-port="0" to-layer="1542" to-port="0" />
<edge from-layer="1542" from-port="1" to-layer="1544" to-port="0" />
<edge from-layer="1543" from-port="0" to-layer="1544" to-port="1" />
<edge from-layer="1544" from-port="2" to-layer="1545" to-port="1" />
<edge from-layer="1545" from-port="2" to-layer="1547" to-port="0" />
<edge from-layer="1546" from-port="0" to-layer="1547" to-port="1" />
<edge from-layer="1547" from-port="2" to-layer="1552" to-port="0" />
<edge from-layer="1548" from-port="0" to-layer="1552" to-port="1" />
<edge from-layer="1549" from-port="0" to-layer="1552" to-port="2" />
<edge from-layer="1550" from-port="0" to-layer="1552" to-port="3" />
<edge from-layer="1551" from-port="0" to-layer="1552" to-port="4" />
<edge from-layer="1552" from-port="5" to-layer="1554" to-port="0" />
<edge from-layer="1553" from-port="0" to-layer="1554" to-port="1" />
<edge from-layer="1554" from-port="2" to-layer="1556" to-port="0" />
<edge from-layer="1555" from-port="0" to-layer="1556" to-port="1" />
<edge from-layer="1556" from-port="2" to-layer="1568" to-port="1" />
<edge from-layer="1557" from-port="0" to-layer="1558" to-port="0" />
<edge from-layer="1558" from-port="1" to-layer="1560" to-port="0" />
<edge from-layer="1559" from-port="0" to-layer="1560" to-port="1" />
<edge from-layer="1560" from-port="2" to-layer="1561" to-port="1" />
<edge from-layer="1561" from-port="2" to-layer="1563" to-port="0" />
<edge from-layer="1562" from-port="0" to-layer="1563" to-port="1" />
<edge from-layer="1563" from-port="2" to-layer="1565" to-port="0" />
<edge from-layer="1564" from-port="0" to-layer="1565" to-port="1" />
<edge from-layer="1565" from-port="2" to-layer="1567" to-port="0" />
<edge from-layer="1566" from-port="0" to-layer="1567" to-port="1" />
<edge from-layer="1567" from-port="2" to-layer="1568" to-port="2" />
<edge from-layer="1568" from-port="4" to-layer="1570" to-port="0" />
<edge from-layer="1569" from-port="0" to-layer="1570" to-port="1" />
<edge from-layer="1570" from-port="2" to-layer="1572" to-port="0" />
<edge from-layer="1571" from-port="0" to-layer="1572" to-port="1" />
<edge from-layer="1572" from-port="2" to-layer="1574" to-port="0" />
<edge from-layer="1573" from-port="0" to-layer="1574" to-port="1" />
<edge from-layer="1574" from-port="2" to-layer="1579" to-port="0" />
<edge from-layer="1575" from-port="0" to-layer="1579" to-port="1" />
<edge from-layer="1576" from-port="0" to-layer="1579" to-port="2" />
<edge from-layer="1577" from-port="0" to-layer="1579" to-port="3" />
<edge from-layer="1578" from-port="0" to-layer="1579" to-port="4" />
<edge from-layer="1579" from-port="5" to-layer="1584" to-port="0" />
<edge from-layer="1580" from-port="0" to-layer="1581" to-port="0" />
<edge from-layer="1581" from-port="1" to-layer="1583" to-port="0" />
<edge from-layer="1582" from-port="0" to-layer="1583" to-port="1" />
<edge from-layer="1583" from-port="2" to-layer="1584" to-port="1" />
<edge from-layer="1584" from-port="2" to-layer="1586" to-port="0" />
<edge from-layer="1585" from-port="0" to-layer="1586" to-port="1" />
<edge from-layer="1586" from-port="2" to-layer="1587" to-port="0" />
<edge from-layer="1587" from-port="2" to-layer="1589" to-port="0" />
<edge from-layer="1588" from-port="0" to-layer="1589" to-port="1" />
<edge from-layer="1589" from-port="2" to-layer="1591" to-port="0" />
<edge from-layer="1590" from-port="0" to-layer="1591" to-port="1" />
<edge from-layer="1591" from-port="2" to-layer="1593" to-port="0" />
<edge from-layer="1592" from-port="0" to-layer="1593" to-port="1" />
<edge from-layer="1593" from-port="2" to-layer="1595" to-port="0" />
<edge from-layer="1593" from-port="2" to-layer="1623" to-port="1" />
<edge from-layer="1594" from-port="0" to-layer="1595" to-port="1" />
<edge from-layer="1595" from-port="2" to-layer="1600" to-port="0" />
<edge from-layer="1596" from-port="0" to-layer="1600" to-port="1" />
<edge from-layer="1597" from-port="0" to-layer="1600" to-port="2" />
<edge from-layer="1598" from-port="0" to-layer="1600" to-port="3" />
<edge from-layer="1599" from-port="0" to-layer="1600" to-port="4" />
<edge from-layer="1600" from-port="5" to-layer="1605" to-port="0" />
<edge from-layer="1601" from-port="0" to-layer="1602" to-port="0" />
<edge from-layer="1602" from-port="1" to-layer="1604" to-port="0" />
<edge from-layer="1603" from-port="0" to-layer="1604" to-port="1" />
<edge from-layer="1604" from-port="2" to-layer="1605" to-port="1" />
<edge from-layer="1605" from-port="2" to-layer="1607" to-port="0" />
<edge from-layer="1606" from-port="0" to-layer="1607" to-port="1" />
<edge from-layer="1607" from-port="2" to-layer="1608" to-port="0" />
<edge from-layer="1608" from-port="1" to-layer="1610" to-port="0" />
<edge from-layer="1609" from-port="0" to-layer="1610" to-port="1" />
<edge from-layer="1610" from-port="2" to-layer="1615" to-port="0" />
<edge from-layer="1611" from-port="0" to-layer="1615" to-port="1" />
<edge from-layer="1612" from-port="0" to-layer="1615" to-port="2" />
<edge from-layer="1613" from-port="0" to-layer="1615" to-port="3" />
<edge from-layer="1614" from-port="0" to-layer="1615" to-port="4" />
<edge from-layer="1615" from-port="5" to-layer="1620" to-port="0" />
<edge from-layer="1616" from-port="0" to-layer="1617" to-port="0" />
<edge from-layer="1617" from-port="1" to-layer="1619" to-port="0" />
<edge from-layer="1618" from-port="0" to-layer="1619" to-port="1" />
<edge from-layer="1619" from-port="2" to-layer="1620" to-port="1" />
<edge from-layer="1620" from-port="2" to-layer="1622" to-port="0" />
<edge from-layer="1621" from-port="0" to-layer="1622" to-port="1" />
<edge from-layer="1622" from-port="2" to-layer="1623" to-port="0" />
<edge from-layer="1623" from-port="2" to-layer="1625" to-port="0" />
<edge from-layer="1624" from-port="0" to-layer="1625" to-port="1" />
<edge from-layer="1625" from-port="2" to-layer="1627" to-port="0" />
<edge from-layer="1626" from-port="0" to-layer="1627" to-port="1" />
<edge from-layer="1627" from-port="2" to-layer="1629" to-port="0" />
<edge from-layer="1628" from-port="0" to-layer="1629" to-port="1" />
<edge from-layer="1629" from-port="2" to-layer="1699" to-port="1" />
<edge from-layer="1629" from-port="2" to-layer="1631" to-port="0" />
<edge from-layer="1630" from-port="0" to-layer="1631" to-port="1" />
<edge from-layer="1631" from-port="2" to-layer="1636" to-port="0" />
<edge from-layer="1632" from-port="0" to-layer="1636" to-port="1" />
<edge from-layer="1633" from-port="0" to-layer="1636" to-port="2" />
<edge from-layer="1634" from-port="0" to-layer="1636" to-port="3" />
<edge from-layer="1635" from-port="0" to-layer="1636" to-port="4" />
<edge from-layer="1636" from-port="5" to-layer="1673" to-port="0" />
<edge from-layer="1636" from-port="5" to-layer="1657" to-port="0" />
<edge from-layer="1636" from-port="5" to-layer="1641" to-port="0" />
<edge from-layer="1637" from-port="0" to-layer="1638" to-port="0" />
<edge from-layer="1638" from-port="1" to-layer="1640" to-port="0" />
<edge from-layer="1639" from-port="0" to-layer="1640" to-port="1" />
<edge from-layer="1640" from-port="2" to-layer="1641" to-port="1" />
<edge from-layer="1641" from-port="2" to-layer="1643" to-port="0" />
<edge from-layer="1642" from-port="0" to-layer="1643" to-port="1" />
<edge from-layer="1643" from-port="2" to-layer="1648" to-port="0" />
<edge from-layer="1644" from-port="0" to-layer="1648" to-port="1" />
<edge from-layer="1645" from-port="0" to-layer="1648" to-port="2" />
<edge from-layer="1646" from-port="0" to-layer="1648" to-port="3" />
<edge from-layer="1647" from-port="0" to-layer="1648" to-port="4" />
<edge from-layer="1648" from-port="5" to-layer="1650" to-port="0" />
<edge from-layer="1649" from-port="0" to-layer="1650" to-port="1" />
<edge from-layer="1650" from-port="2" to-layer="1652" to-port="0" />
<edge from-layer="1651" from-port="0" to-layer="1652" to-port="1" />
<edge from-layer="1652" from-port="2" to-layer="1680" to-port="0" />
<edge from-layer="1653" from-port="0" to-layer="1654" to-port="0" />
<edge from-layer="1654" from-port="1" to-layer="1656" to-port="0" />
<edge from-layer="1655" from-port="0" to-layer="1656" to-port="1" />
<edge from-layer="1656" from-port="2" to-layer="1657" to-port="1" />
<edge from-layer="1657" from-port="2" to-layer="1659" to-port="0" />
<edge from-layer="1658" from-port="0" to-layer="1659" to-port="1" />
<edge from-layer="1659" from-port="2" to-layer="1664" to-port="0" />
<edge from-layer="1660" from-port="0" to-layer="1664" to-port="1" />
<edge from-layer="1661" from-port="0" to-layer="1664" to-port="2" />
<edge from-layer="1662" from-port="0" to-layer="1664" to-port="3" />
<edge from-layer="1663" from-port="0" to-layer="1664" to-port="4" />
<edge from-layer="1664" from-port="5" to-layer="1666" to-port="0" />
<edge from-layer="1665" from-port="0" to-layer="1666" to-port="1" />
<edge from-layer="1666" from-port="2" to-layer="1668" to-port="0" />
<edge from-layer="1667" from-port="0" to-layer="1668" to-port="1" />
<edge from-layer="1668" from-port="2" to-layer="1680" to-port="1" />
<edge from-layer="1669" from-port="0" to-layer="1670" to-port="0" />
<edge from-layer="1670" from-port="1" to-layer="1672" to-port="0" />
<edge from-layer="1671" from-port="0" to-layer="1672" to-port="1" />
<edge from-layer="1672" from-port="2" to-layer="1673" to-port="1" />
<edge from-layer="1673" from-port="2" to-layer="1675" to-port="0" />
<edge from-layer="1674" from-port="0" to-layer="1675" to-port="1" />
<edge from-layer="1675" from-port="2" to-layer="1677" to-port="0" />
<edge from-layer="1676" from-port="0" to-layer="1677" to-port="1" />
<edge from-layer="1677" from-port="2" to-layer="1679" to-port="0" />
<edge from-layer="1678" from-port="0" to-layer="1679" to-port="1" />
<edge from-layer="1679" from-port="2" to-layer="1680" to-port="2" />
<edge from-layer="1680" from-port="4" to-layer="1682" to-port="0" />
<edge from-layer="1681" from-port="0" to-layer="1682" to-port="1" />
<edge from-layer="1682" from-port="2" to-layer="1684" to-port="0" />
<edge from-layer="1683" from-port="0" to-layer="1684" to-port="1" />
<edge from-layer="1684" from-port="2" to-layer="1686" to-port="0" />
<edge from-layer="1685" from-port="0" to-layer="1686" to-port="1" />
<edge from-layer="1686" from-port="2" to-layer="1691" to-port="0" />
<edge from-layer="1687" from-port="0" to-layer="1691" to-port="1" />
<edge from-layer="1688" from-port="0" to-layer="1691" to-port="2" />
<edge from-layer="1689" from-port="0" to-layer="1691" to-port="3" />
<edge from-layer="1690" from-port="0" to-layer="1691" to-port="4" />
<edge from-layer="1691" from-port="5" to-layer="1696" to-port="0" />
<edge from-layer="1692" from-port="0" to-layer="1693" to-port="0" />
<edge from-layer="1693" from-port="1" to-layer="1695" to-port="0" />
<edge from-layer="1694" from-port="0" to-layer="1695" to-port="1" />
<edge from-layer="1695" from-port="2" to-layer="1696" to-port="1" />
<edge from-layer="1696" from-port="2" to-layer="1698" to-port="0" />
<edge from-layer="1697" from-port="0" to-layer="1698" to-port="1" />
<edge from-layer="1698" from-port="2" to-layer="1699" to-port="0" />
<edge from-layer="1699" from-port="2" to-layer="1701" to-port="0" />
<edge from-layer="1700" from-port="0" to-layer="1701" to-port="1" />
<edge from-layer="1701" from-port="2" to-layer="1703" to-port="0" />
<edge from-layer="1702" from-port="0" to-layer="1703" to-port="1" />
<edge from-layer="1703" from-port="2" to-layer="1705" to-port="0" />
<edge from-layer="1704" from-port="0" to-layer="1705" to-port="1" />
<edge from-layer="1705" from-port="2" to-layer="1707" to-port="0" />
<edge from-layer="1705" from-port="2" to-layer="1735" to-port="1" />
<edge from-layer="1706" from-port="0" to-layer="1707" to-port="1" />
<edge from-layer="1707" from-port="2" to-layer="1712" to-port="0" />
<edge from-layer="1708" from-port="0" to-layer="1712" to-port="1" />
<edge from-layer="1709" from-port="0" to-layer="1712" to-port="2" />
<edge from-layer="1710" from-port="0" to-layer="1712" to-port="3" />
<edge from-layer="1711" from-port="0" to-layer="1712" to-port="4" />
<edge from-layer="1712" from-port="5" to-layer="1717" to-port="0" />
<edge from-layer="1713" from-port="0" to-layer="1714" to-port="0" />
<edge from-layer="1714" from-port="1" to-layer="1716" to-port="0" />
<edge from-layer="1715" from-port="0" to-layer="1716" to-port="1" />
<edge from-layer="1716" from-port="2" to-layer="1717" to-port="1" />
<edge from-layer="1717" from-port="2" to-layer="1719" to-port="0" />
<edge from-layer="1718" from-port="0" to-layer="1719" to-port="1" />
<edge from-layer="1719" from-port="2" to-layer="1720" to-port="0" />
<edge from-layer="1720" from-port="1" to-layer="1722" to-port="0" />
<edge from-layer="1721" from-port="0" to-layer="1722" to-port="1" />
<edge from-layer="1722" from-port="2" to-layer="1727" to-port="0" />
<edge from-layer="1723" from-port="0" to-layer="1727" to-port="1" />
<edge from-layer="1724" from-port="0" to-layer="1727" to-port="2" />
<edge from-layer="1725" from-port="0" to-layer="1727" to-port="3" />
<edge from-layer="1726" from-port="0" to-layer="1727" to-port="4" />
<edge from-layer="1727" from-port="5" to-layer="1732" to-port="0" />
<edge from-layer="1728" from-port="0" to-layer="1729" to-port="0" />
<edge from-layer="1729" from-port="1" to-layer="1731" to-port="0" />
<edge from-layer="1730" from-port="0" to-layer="1731" to-port="1" />
<edge from-layer="1731" from-port="2" to-layer="1732" to-port="1" />
<edge from-layer="1732" from-port="2" to-layer="1734" to-port="0" />
<edge from-layer="1733" from-port="0" to-layer="1734" to-port="1" />
<edge from-layer="1734" from-port="2" to-layer="1735" to-port="0" />
<edge from-layer="1735" from-port="2" to-layer="1737" to-port="0" />
<edge from-layer="1736" from-port="0" to-layer="1737" to-port="1" />
<edge from-layer="1737" from-port="2" to-layer="1739" to-port="0" />
<edge from-layer="1738" from-port="0" to-layer="1739" to-port="1" />
<edge from-layer="1739" from-port="2" to-layer="1741" to-port="0" />
<edge from-layer="1740" from-port="0" to-layer="1741" to-port="1" />
<edge from-layer="1741" from-port="2" to-layer="1811" to-port="1" />
<edge from-layer="1741" from-port="2" to-layer="1743" to-port="0" />
<edge from-layer="1742" from-port="0" to-layer="1743" to-port="1" />
<edge from-layer="1743" from-port="2" to-layer="1748" to-port="0" />
<edge from-layer="1744" from-port="0" to-layer="1748" to-port="1" />
<edge from-layer="1745" from-port="0" to-layer="1748" to-port="2" />
<edge from-layer="1746" from-port="0" to-layer="1748" to-port="3" />
<edge from-layer="1747" from-port="0" to-layer="1748" to-port="4" />
<edge from-layer="1748" from-port="5" to-layer="1753" to-port="0" />
<edge from-layer="1748" from-port="5" to-layer="1785" to-port="0" />
<edge from-layer="1748" from-port="5" to-layer="1769" to-port="0" />
<edge from-layer="1749" from-port="0" to-layer="1750" to-port="0" />
<edge from-layer="1750" from-port="1" to-layer="1752" to-port="0" />
<edge from-layer="1751" from-port="0" to-layer="1752" to-port="1" />
<edge from-layer="1752" from-port="2" to-layer="1753" to-port="1" />
<edge from-layer="1753" from-port="2" to-layer="1755" to-port="0" />
<edge from-layer="1754" from-port="0" to-layer="1755" to-port="1" />
<edge from-layer="1755" from-port="2" to-layer="1760" to-port="0" />
<edge from-layer="1756" from-port="0" to-layer="1760" to-port="1" />
<edge from-layer="1757" from-port="0" to-layer="1760" to-port="2" />
<edge from-layer="1758" from-port="0" to-layer="1760" to-port="3" />
<edge from-layer="1759" from-port="0" to-layer="1760" to-port="4" />
<edge from-layer="1760" from-port="5" to-layer="1762" to-port="0" />
<edge from-layer="1761" from-port="0" to-layer="1762" to-port="1" />
<edge from-layer="1762" from-port="2" to-layer="1764" to-port="0" />
<edge from-layer="1763" from-port="0" to-layer="1764" to-port="1" />
<edge from-layer="1764" from-port="2" to-layer="1792" to-port="0" />
<edge from-layer="1765" from-port="0" to-layer="1766" to-port="0" />
<edge from-layer="1766" from-port="1" to-layer="1768" to-port="0" />
<edge from-layer="1767" from-port="0" to-layer="1768" to-port="1" />
<edge from-layer="1768" from-port="2" to-layer="1769" to-port="1" />
<edge from-layer="1769" from-port="2" to-layer="1771" to-port="0" />
<edge from-layer="1770" from-port="0" to-layer="1771" to-port="1" />
<edge from-layer="1771" from-port="2" to-layer="1776" to-port="0" />
<edge from-layer="1772" from-port="0" to-layer="1776" to-port="1" />
<edge from-layer="1773" from-port="0" to-layer="1776" to-port="2" />
<edge from-layer="1774" from-port="0" to-layer="1776" to-port="3" />
<edge from-layer="1775" from-port="0" to-layer="1776" to-port="4" />
<edge from-layer="1776" from-port="5" to-layer="1778" to-port="0" />
<edge from-layer="1777" from-port="0" to-layer="1778" to-port="1" />
<edge from-layer="1778" from-port="2" to-layer="1780" to-port="0" />
<edge from-layer="1779" from-port="0" to-layer="1780" to-port="1" />
<edge from-layer="1780" from-port="2" to-layer="1792" to-port="1" />
<edge from-layer="1781" from-port="0" to-layer="1782" to-port="0" />
<edge from-layer="1782" from-port="1" to-layer="1784" to-port="0" />
<edge from-layer="1783" from-port="0" to-layer="1784" to-port="1" />
<edge from-layer="1784" from-port="2" to-layer="1785" to-port="1" />
<edge from-layer="1785" from-port="2" to-layer="1787" to-port="0" />
<edge from-layer="1786" from-port="0" to-layer="1787" to-port="1" />
<edge from-layer="1787" from-port="2" to-layer="1789" to-port="0" />
<edge from-layer="1788" from-port="0" to-layer="1789" to-port="1" />
<edge from-layer="1789" from-port="2" to-layer="1791" to-port="0" />
<edge from-layer="1790" from-port="0" to-layer="1791" to-port="1" />
<edge from-layer="1791" from-port="2" to-layer="1792" to-port="2" />
<edge from-layer="1792" from-port="4" to-layer="1794" to-port="0" />
<edge from-layer="1793" from-port="0" to-layer="1794" to-port="1" />
<edge from-layer="1794" from-port="2" to-layer="1796" to-port="0" />
<edge from-layer="1795" from-port="0" to-layer="1796" to-port="1" />
<edge from-layer="1796" from-port="2" to-layer="1798" to-port="0" />
<edge from-layer="1797" from-port="0" to-layer="1798" to-port="1" />
<edge from-layer="1798" from-port="2" to-layer="1803" to-port="0" />
<edge from-layer="1799" from-port="0" to-layer="1803" to-port="1" />
<edge from-layer="1800" from-port="0" to-layer="1803" to-port="2" />
<edge from-layer="1801" from-port="0" to-layer="1803" to-port="3" />
<edge from-layer="1802" from-port="0" to-layer="1803" to-port="4" />
<edge from-layer="1803" from-port="5" to-layer="1808" to-port="0" />
<edge from-layer="1804" from-port="0" to-layer="1805" to-port="0" />
<edge from-layer="1805" from-port="1" to-layer="1807" to-port="0" />
<edge from-layer="1806" from-port="0" to-layer="1807" to-port="1" />
<edge from-layer="1807" from-port="2" to-layer="1808" to-port="1" />
<edge from-layer="1808" from-port="2" to-layer="1810" to-port="0" />
<edge from-layer="1809" from-port="0" to-layer="1810" to-port="1" />
<edge from-layer="1810" from-port="2" to-layer="1811" to-port="0" />
<edge from-layer="1811" from-port="2" to-layer="1813" to-port="0" />
<edge from-layer="1812" from-port="0" to-layer="1813" to-port="1" />
<edge from-layer="1813" from-port="2" to-layer="1815" to-port="0" />
<edge from-layer="1814" from-port="0" to-layer="1815" to-port="1" />
<edge from-layer="1815" from-port="2" to-layer="1817" to-port="0" />
<edge from-layer="1816" from-port="0" to-layer="1817" to-port="1" />
<edge from-layer="1817" from-port="2" to-layer="1847" to-port="1" />
<edge from-layer="1817" from-port="2" to-layer="1819" to-port="0" />
<edge from-layer="1818" from-port="0" to-layer="1819" to-port="1" />
<edge from-layer="1819" from-port="2" to-layer="1824" to-port="0" />
<edge from-layer="1820" from-port="0" to-layer="1824" to-port="1" />
<edge from-layer="1821" from-port="0" to-layer="1824" to-port="2" />
<edge from-layer="1822" from-port="0" to-layer="1824" to-port="3" />
<edge from-layer="1823" from-port="0" to-layer="1824" to-port="4" />
<edge from-layer="1824" from-port="5" to-layer="1829" to-port="0" />
<edge from-layer="1825" from-port="0" to-layer="1826" to-port="0" />
<edge from-layer="1826" from-port="1" to-layer="1828" to-port="0" />
<edge from-layer="1827" from-port="0" to-layer="1828" to-port="1" />
<edge from-layer="1828" from-port="2" to-layer="1829" to-port="1" />
<edge from-layer="1829" from-port="2" to-layer="1831" to-port="0" />
<edge from-layer="1830" from-port="0" to-layer="1831" to-port="1" />
<edge from-layer="1831" from-port="2" to-layer="1832" to-port="0" />
<edge from-layer="1832" from-port="1" to-layer="1834" to-port="0" />
<edge from-layer="1833" from-port="0" to-layer="1834" to-port="1" />
<edge from-layer="1834" from-port="2" to-layer="1839" to-port="0" />
<edge from-layer="1835" from-port="0" to-layer="1839" to-port="1" />
<edge from-layer="1836" from-port="0" to-layer="1839" to-port="2" />
<edge from-layer="1837" from-port="0" to-layer="1839" to-port="3" />
<edge from-layer="1838" from-port="0" to-layer="1839" to-port="4" />
<edge from-layer="1839" from-port="5" to-layer="1844" to-port="0" />
<edge from-layer="1840" from-port="0" to-layer="1841" to-port="0" />
<edge from-layer="1841" from-port="1" to-layer="1843" to-port="0" />
<edge from-layer="1842" from-port="0" to-layer="1843" to-port="1" />
<edge from-layer="1843" from-port="2" to-layer="1844" to-port="1" />
<edge from-layer="1844" from-port="2" to-layer="1846" to-port="0" />
<edge from-layer="1845" from-port="0" to-layer="1846" to-port="1" />
<edge from-layer="1846" from-port="2" to-layer="1847" to-port="0" />
<edge from-layer="1847" from-port="2" to-layer="1849" to-port="0" />
<edge from-layer="1848" from-port="0" to-layer="1849" to-port="1" />
<edge from-layer="1849" from-port="2" to-layer="1851" to-port="0" />
<edge from-layer="1850" from-port="0" to-layer="1851" to-port="1" />
<edge from-layer="1851" from-port="2" to-layer="1853" to-port="0" />
<edge from-layer="1852" from-port="0" to-layer="1853" to-port="1" />
<edge from-layer="1853" from-port="2" to-layer="1923" to-port="1" />
<edge from-layer="1853" from-port="2" to-layer="1855" to-port="0" />
<edge from-layer="1854" from-port="0" to-layer="1855" to-port="1" />
<edge from-layer="1855" from-port="2" to-layer="1860" to-port="0" />
<edge from-layer="1856" from-port="0" to-layer="1860" to-port="1" />
<edge from-layer="1857" from-port="0" to-layer="1860" to-port="2" />
<edge from-layer="1858" from-port="0" to-layer="1860" to-port="3" />
<edge from-layer="1859" from-port="0" to-layer="1860" to-port="4" />
<edge from-layer="1860" from-port="5" to-layer="1865" to-port="0" />
<edge from-layer="1860" from-port="5" to-layer="1897" to-port="0" />
<edge from-layer="1860" from-port="5" to-layer="1881" to-port="0" />
<edge from-layer="1861" from-port="0" to-layer="1862" to-port="0" />
<edge from-layer="1862" from-port="1" to-layer="1864" to-port="0" />
<edge from-layer="1863" from-port="0" to-layer="1864" to-port="1" />
<edge from-layer="1864" from-port="2" to-layer="1865" to-port="1" />
<edge from-layer="1865" from-port="2" to-layer="1867" to-port="0" />
<edge from-layer="1866" from-port="0" to-layer="1867" to-port="1" />
<edge from-layer="1867" from-port="2" to-layer="1872" to-port="0" />
<edge from-layer="1868" from-port="0" to-layer="1872" to-port="1" />
<edge from-layer="1869" from-port="0" to-layer="1872" to-port="2" />
<edge from-layer="1870" from-port="0" to-layer="1872" to-port="3" />
<edge from-layer="1871" from-port="0" to-layer="1872" to-port="4" />
<edge from-layer="1872" from-port="5" to-layer="1874" to-port="0" />
<edge from-layer="1873" from-port="0" to-layer="1874" to-port="1" />
<edge from-layer="1874" from-port="2" to-layer="1876" to-port="0" />
<edge from-layer="1875" from-port="0" to-layer="1876" to-port="1" />
<edge from-layer="1876" from-port="2" to-layer="1904" to-port="0" />
<edge from-layer="1877" from-port="0" to-layer="1878" to-port="0" />
<edge from-layer="1878" from-port="1" to-layer="1880" to-port="0" />
<edge from-layer="1879" from-port="0" to-layer="1880" to-port="1" />
<edge from-layer="1880" from-port="2" to-layer="1881" to-port="1" />
<edge from-layer="1881" from-port="2" to-layer="1883" to-port="0" />
<edge from-layer="1882" from-port="0" to-layer="1883" to-port="1" />
<edge from-layer="1883" from-port="2" to-layer="1888" to-port="0" />
<edge from-layer="1884" from-port="0" to-layer="1888" to-port="1" />
<edge from-layer="1885" from-port="0" to-layer="1888" to-port="2" />
<edge from-layer="1886" from-port="0" to-layer="1888" to-port="3" />
<edge from-layer="1887" from-port="0" to-layer="1888" to-port="4" />
<edge from-layer="1888" from-port="5" to-layer="1890" to-port="0" />
<edge from-layer="1889" from-port="0" to-layer="1890" to-port="1" />
<edge from-layer="1890" from-port="2" to-layer="1892" to-port="0" />
<edge from-layer="1891" from-port="0" to-layer="1892" to-port="1" />
<edge from-layer="1892" from-port="2" to-layer="1904" to-port="1" />
<edge from-layer="1893" from-port="0" to-layer="1894" to-port="0" />
<edge from-layer="1894" from-port="1" to-layer="1896" to-port="0" />
<edge from-layer="1895" from-port="0" to-layer="1896" to-port="1" />
<edge from-layer="1896" from-port="2" to-layer="1897" to-port="1" />
<edge from-layer="1897" from-port="2" to-layer="1899" to-port="0" />
<edge from-layer="1898" from-port="0" to-layer="1899" to-port="1" />
<edge from-layer="1899" from-port="2" to-layer="1901" to-port="0" />
<edge from-layer="1900" from-port="0" to-layer="1901" to-port="1" />
<edge from-layer="1901" from-port="2" to-layer="1903" to-port="0" />
<edge from-layer="1902" from-port="0" to-layer="1903" to-port="1" />
<edge from-layer="1903" from-port="2" to-layer="1904" to-port="2" />
<edge from-layer="1904" from-port="4" to-layer="1906" to-port="0" />
<edge from-layer="1905" from-port="0" to-layer="1906" to-port="1" />
<edge from-layer="1906" from-port="2" to-layer="1908" to-port="0" />
<edge from-layer="1907" from-port="0" to-layer="1908" to-port="1" />
<edge from-layer="1908" from-port="2" to-layer="1910" to-port="0" />
<edge from-layer="1909" from-port="0" to-layer="1910" to-port="1" />
<edge from-layer="1910" from-port="2" to-layer="1915" to-port="0" />
<edge from-layer="1911" from-port="0" to-layer="1915" to-port="1" />
<edge from-layer="1912" from-port="0" to-layer="1915" to-port="2" />
<edge from-layer="1913" from-port="0" to-layer="1915" to-port="3" />
<edge from-layer="1914" from-port="0" to-layer="1915" to-port="4" />
<edge from-layer="1915" from-port="5" to-layer="1920" to-port="0" />
<edge from-layer="1916" from-port="0" to-layer="1917" to-port="0" />
<edge from-layer="1917" from-port="1" to-layer="1919" to-port="0" />
<edge from-layer="1918" from-port="0" to-layer="1919" to-port="1" />
<edge from-layer="1919" from-port="2" to-layer="1920" to-port="1" />
<edge from-layer="1920" from-port="2" to-layer="1922" to-port="0" />
<edge from-layer="1921" from-port="0" to-layer="1922" to-port="1" />
<edge from-layer="1922" from-port="2" to-layer="1923" to-port="0" />
<edge from-layer="1923" from-port="2" to-layer="1925" to-port="0" />
<edge from-layer="1924" from-port="0" to-layer="1925" to-port="1" />
<edge from-layer="1925" from-port="2" to-layer="1927" to-port="0" />
<edge from-layer="1926" from-port="0" to-layer="1927" to-port="1" />
<edge from-layer="1927" from-port="2" to-layer="1929" to-port="0" />
<edge from-layer="1928" from-port="0" to-layer="1929" to-port="1" />
<edge from-layer="1929" from-port="2" to-layer="1931" to-port="0" />
<edge from-layer="1929" from-port="2" to-layer="1959" to-port="1" />
<edge from-layer="1930" from-port="0" to-layer="1931" to-port="1" />
<edge from-layer="1931" from-port="2" to-layer="1936" to-port="0" />
<edge from-layer="1932" from-port="0" to-layer="1936" to-port="1" />
<edge from-layer="1933" from-port="0" to-layer="1936" to-port="2" />
<edge from-layer="1934" from-port="0" to-layer="1936" to-port="3" />
<edge from-layer="1935" from-port="0" to-layer="1936" to-port="4" />
<edge from-layer="1936" from-port="5" to-layer="1941" to-port="0" />
<edge from-layer="1937" from-port="0" to-layer="1938" to-port="0" />
<edge from-layer="1938" from-port="1" to-layer="1940" to-port="0" />
<edge from-layer="1939" from-port="0" to-layer="1940" to-port="1" />
<edge from-layer="1940" from-port="2" to-layer="1941" to-port="1" />
<edge from-layer="1941" from-port="2" to-layer="1943" to-port="0" />
<edge from-layer="1942" from-port="0" to-layer="1943" to-port="1" />
<edge from-layer="1943" from-port="2" to-layer="1944" to-port="0" />
<edge from-layer="1944" from-port="1" to-layer="1946" to-port="0" />
<edge from-layer="1945" from-port="0" to-layer="1946" to-port="1" />
<edge from-layer="1946" from-port="2" to-layer="1951" to-port="0" />
<edge from-layer="1947" from-port="0" to-layer="1951" to-port="1" />
<edge from-layer="1948" from-port="0" to-layer="1951" to-port="2" />
<edge from-layer="1949" from-port="0" to-layer="1951" to-port="3" />
<edge from-layer="1950" from-port="0" to-layer="1951" to-port="4" />
<edge from-layer="1951" from-port="5" to-layer="1956" to-port="0" />
<edge from-layer="1952" from-port="0" to-layer="1953" to-port="0" />
<edge from-layer="1953" from-port="1" to-layer="1955" to-port="0" />
<edge from-layer="1954" from-port="0" to-layer="1955" to-port="1" />
<edge from-layer="1955" from-port="2" to-layer="1956" to-port="1" />
<edge from-layer="1956" from-port="2" to-layer="1958" to-port="0" />
<edge from-layer="1957" from-port="0" to-layer="1958" to-port="1" />
<edge from-layer="1958" from-port="2" to-layer="1959" to-port="0" />
<edge from-layer="1959" from-port="2" to-layer="1961" to-port="0" />
<edge from-layer="1960" from-port="0" to-layer="1961" to-port="1" />
<edge from-layer="1961" from-port="2" to-layer="1963" to-port="0" />
<edge from-layer="1962" from-port="0" to-layer="1963" to-port="1" />
<edge from-layer="1963" from-port="2" to-layer="1965" to-port="0" />
<edge from-layer="1964" from-port="0" to-layer="1965" to-port="1" />
<edge from-layer="1965" from-port="2" to-layer="2035" to-port="1" />
<edge from-layer="1965" from-port="2" to-layer="1967" to-port="0" />
<edge from-layer="1966" from-port="0" to-layer="1967" to-port="1" />
<edge from-layer="1967" from-port="2" to-layer="1972" to-port="0" />
<edge from-layer="1968" from-port="0" to-layer="1972" to-port="1" />
<edge from-layer="1969" from-port="0" to-layer="1972" to-port="2" />
<edge from-layer="1970" from-port="0" to-layer="1972" to-port="3" />
<edge from-layer="1971" from-port="0" to-layer="1972" to-port="4" />
<edge from-layer="1972" from-port="5" to-layer="1977" to-port="0" />
<edge from-layer="1972" from-port="5" to-layer="1993" to-port="0" />
<edge from-layer="1972" from-port="5" to-layer="2009" to-port="0" />
<edge from-layer="1973" from-port="0" to-layer="1974" to-port="0" />
<edge from-layer="1974" from-port="1" to-layer="1976" to-port="0" />
<edge from-layer="1975" from-port="0" to-layer="1976" to-port="1" />
<edge from-layer="1976" from-port="2" to-layer="1977" to-port="1" />
<edge from-layer="1977" from-port="2" to-layer="1979" to-port="0" />
<edge from-layer="1978" from-port="0" to-layer="1979" to-port="1" />
<edge from-layer="1979" from-port="2" to-layer="1984" to-port="0" />
<edge from-layer="1980" from-port="0" to-layer="1984" to-port="1" />
<edge from-layer="1981" from-port="0" to-layer="1984" to-port="2" />
<edge from-layer="1982" from-port="0" to-layer="1984" to-port="3" />
<edge from-layer="1983" from-port="0" to-layer="1984" to-port="4" />
<edge from-layer="1984" from-port="5" to-layer="1986" to-port="0" />
<edge from-layer="1985" from-port="0" to-layer="1986" to-port="1" />
<edge from-layer="1986" from-port="2" to-layer="1988" to-port="0" />
<edge from-layer="1987" from-port="0" to-layer="1988" to-port="1" />
<edge from-layer="1988" from-port="2" to-layer="2016" to-port="0" />
<edge from-layer="1989" from-port="0" to-layer="1990" to-port="0" />
<edge from-layer="1990" from-port="1" to-layer="1992" to-port="0" />
<edge from-layer="1991" from-port="0" to-layer="1992" to-port="1" />
<edge from-layer="1992" from-port="2" to-layer="1993" to-port="1" />
<edge from-layer="1993" from-port="2" to-layer="1995" to-port="0" />
<edge from-layer="1994" from-port="0" to-layer="1995" to-port="1" />
<edge from-layer="1995" from-port="2" to-layer="2000" to-port="0" />
<edge from-layer="1996" from-port="0" to-layer="2000" to-port="1" />
<edge from-layer="1997" from-port="0" to-layer="2000" to-port="2" />
<edge from-layer="1998" from-port="0" to-layer="2000" to-port="3" />
<edge from-layer="1999" from-port="0" to-layer="2000" to-port="4" />
<edge from-layer="2000" from-port="5" to-layer="2002" to-port="0" />
<edge from-layer="2001" from-port="0" to-layer="2002" to-port="1" />
<edge from-layer="2002" from-port="2" to-layer="2004" to-port="0" />
<edge from-layer="2003" from-port="0" to-layer="2004" to-port="1" />
<edge from-layer="2004" from-port="2" to-layer="2016" to-port="1" />
<edge from-layer="2005" from-port="0" to-layer="2006" to-port="0" />
<edge from-layer="2006" from-port="1" to-layer="2008" to-port="0" />
<edge from-layer="2007" from-port="0" to-layer="2008" to-port="1" />
<edge from-layer="2008" from-port="2" to-layer="2009" to-port="1" />
<edge from-layer="2009" from-port="2" to-layer="2011" to-port="0" />
<edge from-layer="2010" from-port="0" to-layer="2011" to-port="1" />
<edge from-layer="2011" from-port="2" to-layer="2013" to-port="0" />
<edge from-layer="2012" from-port="0" to-layer="2013" to-port="1" />
<edge from-layer="2013" from-port="2" to-layer="2015" to-port="0" />
<edge from-layer="2014" from-port="0" to-layer="2015" to-port="1" />
<edge from-layer="2015" from-port="2" to-layer="2016" to-port="2" />
<edge from-layer="2016" from-port="4" to-layer="2018" to-port="0" />
<edge from-layer="2017" from-port="0" to-layer="2018" to-port="1" />
<edge from-layer="2018" from-port="2" to-layer="2020" to-port="0" />
<edge from-layer="2019" from-port="0" to-layer="2020" to-port="1" />
<edge from-layer="2020" from-port="2" to-layer="2022" to-port="0" />
<edge from-layer="2021" from-port="0" to-layer="2022" to-port="1" />
<edge from-layer="2022" from-port="2" to-layer="2027" to-port="0" />
<edge from-layer="2023" from-port="0" to-layer="2027" to-port="1" />
<edge from-layer="2024" from-port="0" to-layer="2027" to-port="2" />
<edge from-layer="2025" from-port="0" to-layer="2027" to-port="3" />
<edge from-layer="2026" from-port="0" to-layer="2027" to-port="4" />
<edge from-layer="2027" from-port="5" to-layer="2032" to-port="0" />
<edge from-layer="2028" from-port="0" to-layer="2029" to-port="0" />
<edge from-layer="2029" from-port="1" to-layer="2031" to-port="0" />
<edge from-layer="2030" from-port="0" to-layer="2031" to-port="1" />
<edge from-layer="2031" from-port="2" to-layer="2032" to-port="1" />
<edge from-layer="2032" from-port="2" to-layer="2034" to-port="0" />
<edge from-layer="2033" from-port="0" to-layer="2034" to-port="1" />
<edge from-layer="2034" from-port="2" to-layer="2035" to-port="0" />
<edge from-layer="2035" from-port="2" to-layer="2037" to-port="0" />
<edge from-layer="2036" from-port="0" to-layer="2037" to-port="1" />
<edge from-layer="2037" from-port="2" to-layer="2039" to-port="0" />
<edge from-layer="2038" from-port="0" to-layer="2039" to-port="1" />
<edge from-layer="2039" from-port="2" to-layer="2041" to-port="0" />
<edge from-layer="2040" from-port="0" to-layer="2041" to-port="1" />
<edge from-layer="2041" from-port="2" to-layer="2043" to-port="0" />
<edge from-layer="2041" from-port="2" to-layer="2071" to-port="1" />
<edge from-layer="2042" from-port="0" to-layer="2043" to-port="1" />
<edge from-layer="2043" from-port="2" to-layer="2048" to-port="0" />
<edge from-layer="2044" from-port="0" to-layer="2048" to-port="1" />
<edge from-layer="2045" from-port="0" to-layer="2048" to-port="2" />
<edge from-layer="2046" from-port="0" to-layer="2048" to-port="3" />
<edge from-layer="2047" from-port="0" to-layer="2048" to-port="4" />
<edge from-layer="2048" from-port="5" to-layer="2053" to-port="0" />
<edge from-layer="2049" from-port="0" to-layer="2050" to-port="0" />
<edge from-layer="2050" from-port="1" to-layer="2052" to-port="0" />
<edge from-layer="2051" from-port="0" to-layer="2052" to-port="1" />
<edge from-layer="2052" from-port="2" to-layer="2053" to-port="1" />
<edge from-layer="2053" from-port="2" to-layer="2055" to-port="0" />
<edge from-layer="2054" from-port="0" to-layer="2055" to-port="1" />
<edge from-layer="2055" from-port="2" to-layer="2056" to-port="0" />
<edge from-layer="2056" from-port="1" to-layer="2058" to-port="0" />
<edge from-layer="2057" from-port="0" to-layer="2058" to-port="1" />
<edge from-layer="2058" from-port="2" to-layer="2063" to-port="0" />
<edge from-layer="2059" from-port="0" to-layer="2063" to-port="1" />
<edge from-layer="2060" from-port="0" to-layer="2063" to-port="2" />
<edge from-layer="2061" from-port="0" to-layer="2063" to-port="3" />
<edge from-layer="2062" from-port="0" to-layer="2063" to-port="4" />
<edge from-layer="2063" from-port="5" to-layer="2068" to-port="0" />
<edge from-layer="2064" from-port="0" to-layer="2065" to-port="0" />
<edge from-layer="2065" from-port="1" to-layer="2067" to-port="0" />
<edge from-layer="2066" from-port="0" to-layer="2067" to-port="1" />
<edge from-layer="2067" from-port="2" to-layer="2068" to-port="1" />
<edge from-layer="2068" from-port="2" to-layer="2070" to-port="0" />
<edge from-layer="2069" from-port="0" to-layer="2070" to-port="1" />
<edge from-layer="2070" from-port="2" to-layer="2071" to-port="0" />
<edge from-layer="2071" from-port="2" to-layer="2073" to-port="0" />
<edge from-layer="2072" from-port="0" to-layer="2073" to-port="1" />
<edge from-layer="2073" from-port="2" to-layer="2075" to-port="0" />
<edge from-layer="2074" from-port="0" to-layer="2075" to-port="1" />
<edge from-layer="2075" from-port="2" to-layer="2077" to-port="0" />
<edge from-layer="2076" from-port="0" to-layer="2077" to-port="1" />
<edge from-layer="2077" from-port="2" to-layer="2079" to-port="0" />
<edge from-layer="2077" from-port="2" to-layer="2147" to-port="1" />
<edge from-layer="2078" from-port="0" to-layer="2079" to-port="1" />
<edge from-layer="2079" from-port="2" to-layer="2084" to-port="0" />
<edge from-layer="2080" from-port="0" to-layer="2084" to-port="1" />
<edge from-layer="2081" from-port="0" to-layer="2084" to-port="2" />
<edge from-layer="2082" from-port="0" to-layer="2084" to-port="3" />
<edge from-layer="2083" from-port="0" to-layer="2084" to-port="4" />
<edge from-layer="2084" from-port="5" to-layer="2089" to-port="0" />
<edge from-layer="2084" from-port="5" to-layer="2121" to-port="0" />
<edge from-layer="2084" from-port="5" to-layer="2105" to-port="0" />
<edge from-layer="2085" from-port="0" to-layer="2086" to-port="0" />
<edge from-layer="2086" from-port="1" to-layer="2088" to-port="0" />
<edge from-layer="2087" from-port="0" to-layer="2088" to-port="1" />
<edge from-layer="2088" from-port="2" to-layer="2089" to-port="1" />
<edge from-layer="2089" from-port="2" to-layer="2091" to-port="0" />
<edge from-layer="2090" from-port="0" to-layer="2091" to-port="1" />
<edge from-layer="2091" from-port="2" to-layer="2096" to-port="0" />
<edge from-layer="2092" from-port="0" to-layer="2096" to-port="1" />
<edge from-layer="2093" from-port="0" to-layer="2096" to-port="2" />
<edge from-layer="2094" from-port="0" to-layer="2096" to-port="3" />
<edge from-layer="2095" from-port="0" to-layer="2096" to-port="4" />
<edge from-layer="2096" from-port="5" to-layer="2098" to-port="0" />
<edge from-layer="2097" from-port="0" to-layer="2098" to-port="1" />
<edge from-layer="2098" from-port="2" to-layer="2100" to-port="0" />
<edge from-layer="2099" from-port="0" to-layer="2100" to-port="1" />
<edge from-layer="2100" from-port="2" to-layer="2128" to-port="0" />
<edge from-layer="2101" from-port="0" to-layer="2102" to-port="0" />
<edge from-layer="2102" from-port="1" to-layer="2104" to-port="0" />
<edge from-layer="2103" from-port="0" to-layer="2104" to-port="1" />
<edge from-layer="2104" from-port="2" to-layer="2105" to-port="1" />
<edge from-layer="2105" from-port="2" to-layer="2107" to-port="0" />
<edge from-layer="2106" from-port="0" to-layer="2107" to-port="1" />
<edge from-layer="2107" from-port="2" to-layer="2112" to-port="0" />
<edge from-layer="2108" from-port="0" to-layer="2112" to-port="1" />
<edge from-layer="2109" from-port="0" to-layer="2112" to-port="2" />
<edge from-layer="2110" from-port="0" to-layer="2112" to-port="3" />
<edge from-layer="2111" from-port="0" to-layer="2112" to-port="4" />
<edge from-layer="2112" from-port="5" to-layer="2114" to-port="0" />
<edge from-layer="2113" from-port="0" to-layer="2114" to-port="1" />
<edge from-layer="2114" from-port="2" to-layer="2116" to-port="0" />
<edge from-layer="2115" from-port="0" to-layer="2116" to-port="1" />
<edge from-layer="2116" from-port="2" to-layer="2128" to-port="1" />
<edge from-layer="2117" from-port="0" to-layer="2118" to-port="0" />
<edge from-layer="2118" from-port="1" to-layer="2120" to-port="0" />
<edge from-layer="2119" from-port="0" to-layer="2120" to-port="1" />
<edge from-layer="2120" from-port="2" to-layer="2121" to-port="1" />
<edge from-layer="2121" from-port="2" to-layer="2123" to-port="0" />
<edge from-layer="2122" from-port="0" to-layer="2123" to-port="1" />
<edge from-layer="2123" from-port="2" to-layer="2125" to-port="0" />
<edge from-layer="2124" from-port="0" to-layer="2125" to-port="1" />
<edge from-layer="2125" from-port="2" to-layer="2127" to-port="0" />
<edge from-layer="2126" from-port="0" to-layer="2127" to-port="1" />
<edge from-layer="2127" from-port="2" to-layer="2128" to-port="2" />
<edge from-layer="2128" from-port="4" to-layer="2130" to-port="0" />
<edge from-layer="2129" from-port="0" to-layer="2130" to-port="1" />
<edge from-layer="2130" from-port="2" to-layer="2132" to-port="0" />
<edge from-layer="2131" from-port="0" to-layer="2132" to-port="1" />
<edge from-layer="2132" from-port="2" to-layer="2134" to-port="0" />
<edge from-layer="2133" from-port="0" to-layer="2134" to-port="1" />
<edge from-layer="2134" from-port="2" to-layer="2139" to-port="0" />
<edge from-layer="2135" from-port="0" to-layer="2139" to-port="1" />
<edge from-layer="2136" from-port="0" to-layer="2139" to-port="2" />
<edge from-layer="2137" from-port="0" to-layer="2139" to-port="3" />
<edge from-layer="2138" from-port="0" to-layer="2139" to-port="4" />
<edge from-layer="2139" from-port="5" to-layer="2144" to-port="0" />
<edge from-layer="2140" from-port="0" to-layer="2141" to-port="0" />
<edge from-layer="2141" from-port="1" to-layer="2143" to-port="0" />
<edge from-layer="2142" from-port="0" to-layer="2143" to-port="1" />
<edge from-layer="2143" from-port="2" to-layer="2144" to-port="1" />
<edge from-layer="2144" from-port="2" to-layer="2146" to-port="0" />
<edge from-layer="2145" from-port="0" to-layer="2146" to-port="1" />
<edge from-layer="2146" from-port="2" to-layer="2147" to-port="0" />
<edge from-layer="2147" from-port="2" to-layer="2149" to-port="0" />
<edge from-layer="2148" from-port="0" to-layer="2149" to-port="1" />
<edge from-layer="2149" from-port="2" to-layer="2151" to-port="0" />
<edge from-layer="2150" from-port="0" to-layer="2151" to-port="1" />
<edge from-layer="2151" from-port="2" to-layer="2153" to-port="0" />
<edge from-layer="2152" from-port="0" to-layer="2153" to-port="1" />
<edge from-layer="2153" from-port="2" to-layer="2183" to-port="1" />
<edge from-layer="2153" from-port="2" to-layer="2155" to-port="0" />
<edge from-layer="2154" from-port="0" to-layer="2155" to-port="1" />
<edge from-layer="2155" from-port="2" to-layer="2160" to-port="0" />
<edge from-layer="2156" from-port="0" to-layer="2160" to-port="1" />
<edge from-layer="2157" from-port="0" to-layer="2160" to-port="2" />
<edge from-layer="2158" from-port="0" to-layer="2160" to-port="3" />
<edge from-layer="2159" from-port="0" to-layer="2160" to-port="4" />
<edge from-layer="2160" from-port="5" to-layer="2165" to-port="0" />
<edge from-layer="2161" from-port="0" to-layer="2162" to-port="0" />
<edge from-layer="2162" from-port="1" to-layer="2164" to-port="0" />
<edge from-layer="2163" from-port="0" to-layer="2164" to-port="1" />
<edge from-layer="2164" from-port="2" to-layer="2165" to-port="1" />
<edge from-layer="2165" from-port="2" to-layer="2167" to-port="0" />
<edge from-layer="2166" from-port="0" to-layer="2167" to-port="1" />
<edge from-layer="2167" from-port="2" to-layer="2168" to-port="0" />
<edge from-layer="2168" from-port="1" to-layer="2170" to-port="0" />
<edge from-layer="2169" from-port="0" to-layer="2170" to-port="1" />
<edge from-layer="2170" from-port="2" to-layer="2175" to-port="0" />
<edge from-layer="2171" from-port="0" to-layer="2175" to-port="1" />
<edge from-layer="2172" from-port="0" to-layer="2175" to-port="2" />
<edge from-layer="2173" from-port="0" to-layer="2175" to-port="3" />
<edge from-layer="2174" from-port="0" to-layer="2175" to-port="4" />
<edge from-layer="2175" from-port="5" to-layer="2180" to-port="0" />
<edge from-layer="2176" from-port="0" to-layer="2177" to-port="0" />
<edge from-layer="2177" from-port="1" to-layer="2179" to-port="0" />
<edge from-layer="2178" from-port="0" to-layer="2179" to-port="1" />
<edge from-layer="2179" from-port="2" to-layer="2180" to-port="1" />
<edge from-layer="2180" from-port="2" to-layer="2182" to-port="0" />
<edge from-layer="2181" from-port="0" to-layer="2182" to-port="1" />
<edge from-layer="2182" from-port="2" to-layer="2183" to-port="0" />
<edge from-layer="2183" from-port="2" to-layer="2185" to-port="0" />
<edge from-layer="2184" from-port="0" to-layer="2185" to-port="1" />
<edge from-layer="2185" from-port="2" to-layer="2187" to-port="0" />
<edge from-layer="2186" from-port="0" to-layer="2187" to-port="1" />
<edge from-layer="2187" from-port="2" to-layer="2189" to-port="0" />
<edge from-layer="2188" from-port="0" to-layer="2189" to-port="1" />
<edge from-layer="2189" from-port="2" to-layer="2259" to-port="1" />
<edge from-layer="2189" from-port="2" to-layer="2191" to-port="0" />
<edge from-layer="2190" from-port="0" to-layer="2191" to-port="1" />
<edge from-layer="2191" from-port="2" to-layer="2196" to-port="0" />
<edge from-layer="2192" from-port="0" to-layer="2196" to-port="1" />
<edge from-layer="2193" from-port="0" to-layer="2196" to-port="2" />
<edge from-layer="2194" from-port="0" to-layer="2196" to-port="3" />
<edge from-layer="2195" from-port="0" to-layer="2196" to-port="4" />
<edge from-layer="2196" from-port="5" to-layer="2201" to-port="0" />
<edge from-layer="2196" from-port="5" to-layer="2233" to-port="0" />
<edge from-layer="2196" from-port="5" to-layer="2217" to-port="0" />
<edge from-layer="2197" from-port="0" to-layer="2198" to-port="0" />
<edge from-layer="2198" from-port="1" to-layer="2200" to-port="0" />
<edge from-layer="2199" from-port="0" to-layer="2200" to-port="1" />
<edge from-layer="2200" from-port="2" to-layer="2201" to-port="1" />
<edge from-layer="2201" from-port="2" to-layer="2203" to-port="0" />
<edge from-layer="2202" from-port="0" to-layer="2203" to-port="1" />
<edge from-layer="2203" from-port="2" to-layer="2208" to-port="0" />
<edge from-layer="2204" from-port="0" to-layer="2208" to-port="1" />
<edge from-layer="2205" from-port="0" to-layer="2208" to-port="2" />
<edge from-layer="2206" from-port="0" to-layer="2208" to-port="3" />
<edge from-layer="2207" from-port="0" to-layer="2208" to-port="4" />
<edge from-layer="2208" from-port="5" to-layer="2210" to-port="0" />
<edge from-layer="2209" from-port="0" to-layer="2210" to-port="1" />
<edge from-layer="2210" from-port="2" to-layer="2212" to-port="0" />
<edge from-layer="2211" from-port="0" to-layer="2212" to-port="1" />
<edge from-layer="2212" from-port="2" to-layer="2240" to-port="0" />
<edge from-layer="2213" from-port="0" to-layer="2214" to-port="0" />
<edge from-layer="2214" from-port="1" to-layer="2216" to-port="0" />
<edge from-layer="2215" from-port="0" to-layer="2216" to-port="1" />
<edge from-layer="2216" from-port="2" to-layer="2217" to-port="1" />
<edge from-layer="2217" from-port="2" to-layer="2219" to-port="0" />
<edge from-layer="2218" from-port="0" to-layer="2219" to-port="1" />
<edge from-layer="2219" from-port="2" to-layer="2224" to-port="0" />
<edge from-layer="2220" from-port="0" to-layer="2224" to-port="1" />
<edge from-layer="2221" from-port="0" to-layer="2224" to-port="2" />
<edge from-layer="2222" from-port="0" to-layer="2224" to-port="3" />
<edge from-layer="2223" from-port="0" to-layer="2224" to-port="4" />
<edge from-layer="2224" from-port="5" to-layer="2226" to-port="0" />
<edge from-layer="2225" from-port="0" to-layer="2226" to-port="1" />
<edge from-layer="2226" from-port="2" to-layer="2228" to-port="0" />
<edge from-layer="2227" from-port="0" to-layer="2228" to-port="1" />
<edge from-layer="2228" from-port="2" to-layer="2240" to-port="1" />
<edge from-layer="2229" from-port="0" to-layer="2230" to-port="0" />
<edge from-layer="2230" from-port="1" to-layer="2232" to-port="0" />
<edge from-layer="2231" from-port="0" to-layer="2232" to-port="1" />
<edge from-layer="2232" from-port="2" to-layer="2233" to-port="1" />
<edge from-layer="2233" from-port="2" to-layer="2235" to-port="0" />
<edge from-layer="2234" from-port="0" to-layer="2235" to-port="1" />
<edge from-layer="2235" from-port="2" to-layer="2237" to-port="0" />
<edge from-layer="2236" from-port="0" to-layer="2237" to-port="1" />
<edge from-layer="2237" from-port="2" to-layer="2239" to-port="0" />
<edge from-layer="2238" from-port="0" to-layer="2239" to-port="1" />
<edge from-layer="2239" from-port="2" to-layer="2240" to-port="2" />
<edge from-layer="2240" from-port="4" to-layer="2242" to-port="0" />
<edge from-layer="2241" from-port="0" to-layer="2242" to-port="1" />
<edge from-layer="2242" from-port="2" to-layer="2244" to-port="0" />
<edge from-layer="2243" from-port="0" to-layer="2244" to-port="1" />
<edge from-layer="2244" from-port="2" to-layer="2246" to-port="0" />
<edge from-layer="2245" from-port="0" to-layer="2246" to-port="1" />
<edge from-layer="2246" from-port="2" to-layer="2251" to-port="0" />
<edge from-layer="2247" from-port="0" to-layer="2251" to-port="1" />
<edge from-layer="2248" from-port="0" to-layer="2251" to-port="2" />
<edge from-layer="2249" from-port="0" to-layer="2251" to-port="3" />
<edge from-layer="2250" from-port="0" to-layer="2251" to-port="4" />
<edge from-layer="2251" from-port="5" to-layer="2256" to-port="0" />
<edge from-layer="2252" from-port="0" to-layer="2253" to-port="0" />
<edge from-layer="2253" from-port="1" to-layer="2255" to-port="0" />
<edge from-layer="2254" from-port="0" to-layer="2255" to-port="1" />
<edge from-layer="2255" from-port="2" to-layer="2256" to-port="1" />
<edge from-layer="2256" from-port="2" to-layer="2258" to-port="0" />
<edge from-layer="2257" from-port="0" to-layer="2258" to-port="1" />
<edge from-layer="2258" from-port="2" to-layer="2259" to-port="0" />
<edge from-layer="2259" from-port="2" to-layer="2261" to-port="0" />
<edge from-layer="2260" from-port="0" to-layer="2261" to-port="1" />
<edge from-layer="2261" from-port="2" to-layer="2263" to-port="0" />
<edge from-layer="2262" from-port="0" to-layer="2263" to-port="1" />
<edge from-layer="2263" from-port="2" to-layer="2265" to-port="0" />
<edge from-layer="2264" from-port="0" to-layer="2265" to-port="1" />
<edge from-layer="2265" from-port="2" to-layer="2295" to-port="1" />
<edge from-layer="2265" from-port="2" to-layer="2267" to-port="0" />
<edge from-layer="2266" from-port="0" to-layer="2267" to-port="1" />
<edge from-layer="2267" from-port="2" to-layer="2272" to-port="0" />
<edge from-layer="2268" from-port="0" to-layer="2272" to-port="1" />
<edge from-layer="2269" from-port="0" to-layer="2272" to-port="2" />
<edge from-layer="2270" from-port="0" to-layer="2272" to-port="3" />
<edge from-layer="2271" from-port="0" to-layer="2272" to-port="4" />
<edge from-layer="2272" from-port="5" to-layer="2277" to-port="0" />
<edge from-layer="2273" from-port="0" to-layer="2274" to-port="0" />
<edge from-layer="2274" from-port="1" to-layer="2276" to-port="0" />
<edge from-layer="2275" from-port="0" to-layer="2276" to-port="1" />
<edge from-layer="2276" from-port="2" to-layer="2277" to-port="1" />
<edge from-layer="2277" from-port="2" to-layer="2279" to-port="0" />
<edge from-layer="2278" from-port="0" to-layer="2279" to-port="1" />
<edge from-layer="2279" from-port="2" to-layer="2280" to-port="0" />
<edge from-layer="2280" from-port="1" to-layer="2282" to-port="0" />
<edge from-layer="2281" from-port="0" to-layer="2282" to-port="1" />
<edge from-layer="2282" from-port="2" to-layer="2287" to-port="0" />
<edge from-layer="2283" from-port="0" to-layer="2287" to-port="1" />
<edge from-layer="2284" from-port="0" to-layer="2287" to-port="2" />
<edge from-layer="2285" from-port="0" to-layer="2287" to-port="3" />
<edge from-layer="2286" from-port="0" to-layer="2287" to-port="4" />
<edge from-layer="2287" from-port="5" to-layer="2292" to-port="0" />
<edge from-layer="2288" from-port="0" to-layer="2289" to-port="0" />
<edge from-layer="2289" from-port="1" to-layer="2291" to-port="0" />
<edge from-layer="2290" from-port="0" to-layer="2291" to-port="1" />
<edge from-layer="2291" from-port="2" to-layer="2292" to-port="1" />
<edge from-layer="2292" from-port="2" to-layer="2294" to-port="0" />
<edge from-layer="2293" from-port="0" to-layer="2294" to-port="1" />
<edge from-layer="2294" from-port="2" to-layer="2295" to-port="0" />
<edge from-layer="2295" from-port="2" to-layer="2297" to-port="0" />
<edge from-layer="2296" from-port="0" to-layer="2297" to-port="1" />
<edge from-layer="2297" from-port="2" to-layer="2299" to-port="0" />
<edge from-layer="2298" from-port="0" to-layer="2299" to-port="1" />
<edge from-layer="2299" from-port="2" to-layer="2301" to-port="0" />
<edge from-layer="2300" from-port="0" to-layer="2301" to-port="1" />
<edge from-layer="2301" from-port="2" to-layer="2371" to-port="1" />
<edge from-layer="2301" from-port="2" to-layer="2303" to-port="0" />
<edge from-layer="2302" from-port="0" to-layer="2303" to-port="1" />
<edge from-layer="2303" from-port="2" to-layer="2308" to-port="0" />
<edge from-layer="2304" from-port="0" to-layer="2308" to-port="1" />
<edge from-layer="2305" from-port="0" to-layer="2308" to-port="2" />
<edge from-layer="2306" from-port="0" to-layer="2308" to-port="3" />
<edge from-layer="2307" from-port="0" to-layer="2308" to-port="4" />
<edge from-layer="2308" from-port="5" to-layer="2345" to-port="0" />
<edge from-layer="2308" from-port="5" to-layer="2313" to-port="0" />
<edge from-layer="2308" from-port="5" to-layer="2329" to-port="0" />
<edge from-layer="2309" from-port="0" to-layer="2310" to-port="0" />
<edge from-layer="2310" from-port="1" to-layer="2312" to-port="0" />
<edge from-layer="2311" from-port="0" to-layer="2312" to-port="1" />
<edge from-layer="2312" from-port="2" to-layer="2313" to-port="1" />
<edge from-layer="2313" from-port="2" to-layer="2315" to-port="0" />
<edge from-layer="2314" from-port="0" to-layer="2315" to-port="1" />
<edge from-layer="2315" from-port="2" to-layer="2320" to-port="0" />
<edge from-layer="2316" from-port="0" to-layer="2320" to-port="1" />
<edge from-layer="2317" from-port="0" to-layer="2320" to-port="2" />
<edge from-layer="2318" from-port="0" to-layer="2320" to-port="3" />
<edge from-layer="2319" from-port="0" to-layer="2320" to-port="4" />
<edge from-layer="2320" from-port="5" to-layer="2322" to-port="0" />
<edge from-layer="2321" from-port="0" to-layer="2322" to-port="1" />
<edge from-layer="2322" from-port="2" to-layer="2324" to-port="0" />
<edge from-layer="2323" from-port="0" to-layer="2324" to-port="1" />
<edge from-layer="2324" from-port="2" to-layer="2352" to-port="0" />
<edge from-layer="2325" from-port="0" to-layer="2326" to-port="0" />
<edge from-layer="2326" from-port="1" to-layer="2328" to-port="0" />
<edge from-layer="2327" from-port="0" to-layer="2328" to-port="1" />
<edge from-layer="2328" from-port="2" to-layer="2329" to-port="1" />
<edge from-layer="2329" from-port="2" to-layer="2331" to-port="0" />
<edge from-layer="2330" from-port="0" to-layer="2331" to-port="1" />
<edge from-layer="2331" from-port="2" to-layer="2336" to-port="0" />
<edge from-layer="2332" from-port="0" to-layer="2336" to-port="1" />
<edge from-layer="2333" from-port="0" to-layer="2336" to-port="2" />
<edge from-layer="2334" from-port="0" to-layer="2336" to-port="3" />
<edge from-layer="2335" from-port="0" to-layer="2336" to-port="4" />
<edge from-layer="2336" from-port="5" to-layer="2338" to-port="0" />
<edge from-layer="2337" from-port="0" to-layer="2338" to-port="1" />
<edge from-layer="2338" from-port="2" to-layer="2340" to-port="0" />
<edge from-layer="2339" from-port="0" to-layer="2340" to-port="1" />
<edge from-layer="2340" from-port="2" to-layer="2352" to-port="1" />
<edge from-layer="2341" from-port="0" to-layer="2342" to-port="0" />
<edge from-layer="2342" from-port="1" to-layer="2344" to-port="0" />
<edge from-layer="2343" from-port="0" to-layer="2344" to-port="1" />
<edge from-layer="2344" from-port="2" to-layer="2345" to-port="1" />
<edge from-layer="2345" from-port="2" to-layer="2347" to-port="0" />
<edge from-layer="2346" from-port="0" to-layer="2347" to-port="1" />
<edge from-layer="2347" from-port="2" to-layer="2349" to-port="0" />
<edge from-layer="2348" from-port="0" to-layer="2349" to-port="1" />
<edge from-layer="2349" from-port="2" to-layer="2351" to-port="0" />
<edge from-layer="2350" from-port="0" to-layer="2351" to-port="1" />
<edge from-layer="2351" from-port="2" to-layer="2352" to-port="2" />
<edge from-layer="2352" from-port="4" to-layer="2354" to-port="0" />
<edge from-layer="2353" from-port="0" to-layer="2354" to-port="1" />
<edge from-layer="2354" from-port="2" to-layer="2356" to-port="0" />
<edge from-layer="2355" from-port="0" to-layer="2356" to-port="1" />
<edge from-layer="2356" from-port="2" to-layer="2358" to-port="0" />
<edge from-layer="2357" from-port="0" to-layer="2358" to-port="1" />
<edge from-layer="2358" from-port="2" to-layer="2363" to-port="0" />
<edge from-layer="2359" from-port="0" to-layer="2363" to-port="1" />
<edge from-layer="2360" from-port="0" to-layer="2363" to-port="2" />
<edge from-layer="2361" from-port="0" to-layer="2363" to-port="3" />
<edge from-layer="2362" from-port="0" to-layer="2363" to-port="4" />
<edge from-layer="2363" from-port="5" to-layer="2368" to-port="0" />
<edge from-layer="2364" from-port="0" to-layer="2365" to-port="0" />
<edge from-layer="2365" from-port="1" to-layer="2367" to-port="0" />
<edge from-layer="2366" from-port="0" to-layer="2367" to-port="1" />
<edge from-layer="2367" from-port="2" to-layer="2368" to-port="1" />
<edge from-layer="2368" from-port="2" to-layer="2370" to-port="0" />
<edge from-layer="2369" from-port="0" to-layer="2370" to-port="1" />
<edge from-layer="2370" from-port="2" to-layer="2371" to-port="0" />
<edge from-layer="2371" from-port="2" to-layer="2373" to-port="0" />
<edge from-layer="2372" from-port="0" to-layer="2373" to-port="1" />
<edge from-layer="2373" from-port="2" to-layer="2375" to-port="0" />
<edge from-layer="2374" from-port="0" to-layer="2375" to-port="1" />
<edge from-layer="2375" from-port="2" to-layer="2377" to-port="0" />
<edge from-layer="2376" from-port="0" to-layer="2377" to-port="1" />
<edge from-layer="2377" from-port="2" to-layer="2379" to-port="0" />
<edge from-layer="2377" from-port="2" to-layer="2407" to-port="1" />
<edge from-layer="2378" from-port="0" to-layer="2379" to-port="1" />
<edge from-layer="2379" from-port="2" to-layer="2384" to-port="0" />
<edge from-layer="2380" from-port="0" to-layer="2384" to-port="1" />
<edge from-layer="2381" from-port="0" to-layer="2384" to-port="2" />
<edge from-layer="2382" from-port="0" to-layer="2384" to-port="3" />
<edge from-layer="2383" from-port="0" to-layer="2384" to-port="4" />
<edge from-layer="2384" from-port="5" to-layer="2389" to-port="0" />
<edge from-layer="2385" from-port="0" to-layer="2386" to-port="0" />
<edge from-layer="2386" from-port="1" to-layer="2388" to-port="0" />
<edge from-layer="2387" from-port="0" to-layer="2388" to-port="1" />
<edge from-layer="2388" from-port="2" to-layer="2389" to-port="1" />
<edge from-layer="2389" from-port="2" to-layer="2391" to-port="0" />
<edge from-layer="2390" from-port="0" to-layer="2391" to-port="1" />
<edge from-layer="2391" from-port="2" to-layer="2392" to-port="0" />
<edge from-layer="2392" from-port="1" to-layer="2394" to-port="0" />
<edge from-layer="2393" from-port="0" to-layer="2394" to-port="1" />
<edge from-layer="2394" from-port="2" to-layer="2399" to-port="0" />
<edge from-layer="2395" from-port="0" to-layer="2399" to-port="1" />
<edge from-layer="2396" from-port="0" to-layer="2399" to-port="2" />
<edge from-layer="2397" from-port="0" to-layer="2399" to-port="3" />
<edge from-layer="2398" from-port="0" to-layer="2399" to-port="4" />
<edge from-layer="2399" from-port="5" to-layer="2404" to-port="0" />
<edge from-layer="2400" from-port="0" to-layer="2401" to-port="0" />
<edge from-layer="2401" from-port="1" to-layer="2403" to-port="0" />
<edge from-layer="2402" from-port="0" to-layer="2403" to-port="1" />
<edge from-layer="2403" from-port="2" to-layer="2404" to-port="1" />
<edge from-layer="2404" from-port="2" to-layer="2406" to-port="0" />
<edge from-layer="2405" from-port="0" to-layer="2406" to-port="1" />
<edge from-layer="2406" from-port="2" to-layer="2407" to-port="0" />
<edge from-layer="2407" from-port="2" to-layer="2409" to-port="0" />
<edge from-layer="2408" from-port="0" to-layer="2409" to-port="1" />
<edge from-layer="2409" from-port="2" to-layer="2411" to-port="0" />
<edge from-layer="2410" from-port="0" to-layer="2411" to-port="1" />
<edge from-layer="2411" from-port="2" to-layer="2413" to-port="0" />
<edge from-layer="2412" from-port="0" to-layer="2413" to-port="1" />
<edge from-layer="2413" from-port="2" to-layer="2483" to-port="1" />
<edge from-layer="2413" from-port="2" to-layer="2415" to-port="0" />
<edge from-layer="2414" from-port="0" to-layer="2415" to-port="1" />
<edge from-layer="2415" from-port="2" to-layer="2420" to-port="0" />
<edge from-layer="2416" from-port="0" to-layer="2420" to-port="1" />
<edge from-layer="2417" from-port="0" to-layer="2420" to-port="2" />
<edge from-layer="2418" from-port="0" to-layer="2420" to-port="3" />
<edge from-layer="2419" from-port="0" to-layer="2420" to-port="4" />
<edge from-layer="2420" from-port="5" to-layer="2425" to-port="0" />
<edge from-layer="2420" from-port="5" to-layer="2457" to-port="0" />
<edge from-layer="2420" from-port="5" to-layer="2441" to-port="0" />
<edge from-layer="2421" from-port="0" to-layer="2422" to-port="0" />
<edge from-layer="2422" from-port="1" to-layer="2424" to-port="0" />
<edge from-layer="2423" from-port="0" to-layer="2424" to-port="1" />
<edge from-layer="2424" from-port="2" to-layer="2425" to-port="1" />
<edge from-layer="2425" from-port="2" to-layer="2427" to-port="0" />
<edge from-layer="2426" from-port="0" to-layer="2427" to-port="1" />
<edge from-layer="2427" from-port="2" to-layer="2432" to-port="0" />
<edge from-layer="2428" from-port="0" to-layer="2432" to-port="1" />
<edge from-layer="2429" from-port="0" to-layer="2432" to-port="2" />
<edge from-layer="2430" from-port="0" to-layer="2432" to-port="3" />
<edge from-layer="2431" from-port="0" to-layer="2432" to-port="4" />
<edge from-layer="2432" from-port="5" to-layer="2434" to-port="0" />
<edge from-layer="2433" from-port="0" to-layer="2434" to-port="1" />
<edge from-layer="2434" from-port="2" to-layer="2436" to-port="0" />
<edge from-layer="2435" from-port="0" to-layer="2436" to-port="1" />
<edge from-layer="2436" from-port="2" to-layer="2464" to-port="0" />
<edge from-layer="2437" from-port="0" to-layer="2438" to-port="0" />
<edge from-layer="2438" from-port="1" to-layer="2440" to-port="0" />
<edge from-layer="2439" from-port="0" to-layer="2440" to-port="1" />
<edge from-layer="2440" from-port="2" to-layer="2441" to-port="1" />
<edge from-layer="2441" from-port="2" to-layer="2443" to-port="0" />
<edge from-layer="2442" from-port="0" to-layer="2443" to-port="1" />
<edge from-layer="2443" from-port="2" to-layer="2448" to-port="0" />
<edge from-layer="2444" from-port="0" to-layer="2448" to-port="1" />
<edge from-layer="2445" from-port="0" to-layer="2448" to-port="2" />
<edge from-layer="2446" from-port="0" to-layer="2448" to-port="3" />
<edge from-layer="2447" from-port="0" to-layer="2448" to-port="4" />
<edge from-layer="2448" from-port="5" to-layer="2450" to-port="0" />
<edge from-layer="2449" from-port="0" to-layer="2450" to-port="1" />
<edge from-layer="2450" from-port="2" to-layer="2452" to-port="0" />
<edge from-layer="2451" from-port="0" to-layer="2452" to-port="1" />
<edge from-layer="2452" from-port="2" to-layer="2464" to-port="1" />
<edge from-layer="2453" from-port="0" to-layer="2454" to-port="0" />
<edge from-layer="2454" from-port="1" to-layer="2456" to-port="0" />
<edge from-layer="2455" from-port="0" to-layer="2456" to-port="1" />
<edge from-layer="2456" from-port="2" to-layer="2457" to-port="1" />
<edge from-layer="2457" from-port="2" to-layer="2459" to-port="0" />
<edge from-layer="2458" from-port="0" to-layer="2459" to-port="1" />
<edge from-layer="2459" from-port="2" to-layer="2461" to-port="0" />
<edge from-layer="2460" from-port="0" to-layer="2461" to-port="1" />
<edge from-layer="2461" from-port="2" to-layer="2463" to-port="0" />
<edge from-layer="2462" from-port="0" to-layer="2463" to-port="1" />
<edge from-layer="2463" from-port="2" to-layer="2464" to-port="2" />
<edge from-layer="2464" from-port="4" to-layer="2466" to-port="0" />
<edge from-layer="2465" from-port="0" to-layer="2466" to-port="1" />
<edge from-layer="2466" from-port="2" to-layer="2468" to-port="0" />
<edge from-layer="2467" from-port="0" to-layer="2468" to-port="1" />
<edge from-layer="2468" from-port="2" to-layer="2470" to-port="0" />
<edge from-layer="2469" from-port="0" to-layer="2470" to-port="1" />
<edge from-layer="2470" from-port="2" to-layer="2475" to-port="0" />
<edge from-layer="2471" from-port="0" to-layer="2475" to-port="1" />
<edge from-layer="2472" from-port="0" to-layer="2475" to-port="2" />
<edge from-layer="2473" from-port="0" to-layer="2475" to-port="3" />
<edge from-layer="2474" from-port="0" to-layer="2475" to-port="4" />
<edge from-layer="2475" from-port="5" to-layer="2480" to-port="0" />
<edge from-layer="2476" from-port="0" to-layer="2477" to-port="0" />
<edge from-layer="2477" from-port="1" to-layer="2479" to-port="0" />
<edge from-layer="2478" from-port="0" to-layer="2479" to-port="1" />
<edge from-layer="2479" from-port="2" to-layer="2480" to-port="1" />
<edge from-layer="2480" from-port="2" to-layer="2482" to-port="0" />
<edge from-layer="2481" from-port="0" to-layer="2482" to-port="1" />
<edge from-layer="2482" from-port="2" to-layer="2483" to-port="0" />
<edge from-layer="2483" from-port="2" to-layer="2485" to-port="0" />
<edge from-layer="2484" from-port="0" to-layer="2485" to-port="1" />
<edge from-layer="2485" from-port="2" to-layer="2487" to-port="0" />
<edge from-layer="2486" from-port="0" to-layer="2487" to-port="1" />
<edge from-layer="2487" from-port="2" to-layer="2489" to-port="0" />
<edge from-layer="2488" from-port="0" to-layer="2489" to-port="1" />
<edge from-layer="2489" from-port="2" to-layer="2519" to-port="1" />
<edge from-layer="2489" from-port="2" to-layer="2491" to-port="0" />
<edge from-layer="2490" from-port="0" to-layer="2491" to-port="1" />
<edge from-layer="2491" from-port="2" to-layer="2496" to-port="0" />
<edge from-layer="2492" from-port="0" to-layer="2496" to-port="1" />
<edge from-layer="2493" from-port="0" to-layer="2496" to-port="2" />
<edge from-layer="2494" from-port="0" to-layer="2496" to-port="3" />
<edge from-layer="2495" from-port="0" to-layer="2496" to-port="4" />
<edge from-layer="2496" from-port="5" to-layer="2501" to-port="0" />
<edge from-layer="2497" from-port="0" to-layer="2498" to-port="0" />
<edge from-layer="2498" from-port="1" to-layer="2500" to-port="0" />
<edge from-layer="2499" from-port="0" to-layer="2500" to-port="1" />
<edge from-layer="2500" from-port="2" to-layer="2501" to-port="1" />
<edge from-layer="2501" from-port="2" to-layer="2503" to-port="0" />
<edge from-layer="2502" from-port="0" to-layer="2503" to-port="1" />
<edge from-layer="2503" from-port="2" to-layer="2504" to-port="0" />
<edge from-layer="2504" from-port="1" to-layer="2506" to-port="0" />
<edge from-layer="2505" from-port="0" to-layer="2506" to-port="1" />
<edge from-layer="2506" from-port="2" to-layer="2511" to-port="0" />
<edge from-layer="2507" from-port="0" to-layer="2511" to-port="1" />
<edge from-layer="2508" from-port="0" to-layer="2511" to-port="2" />
<edge from-layer="2509" from-port="0" to-layer="2511" to-port="3" />
<edge from-layer="2510" from-port="0" to-layer="2511" to-port="4" />
<edge from-layer="2511" from-port="5" to-layer="2516" to-port="0" />
<edge from-layer="2512" from-port="0" to-layer="2513" to-port="0" />
<edge from-layer="2513" from-port="1" to-layer="2515" to-port="0" />
<edge from-layer="2514" from-port="0" to-layer="2515" to-port="1" />
<edge from-layer="2515" from-port="2" to-layer="2516" to-port="1" />
<edge from-layer="2516" from-port="2" to-layer="2518" to-port="0" />
<edge from-layer="2517" from-port="0" to-layer="2518" to-port="1" />
<edge from-layer="2518" from-port="2" to-layer="2519" to-port="0" />
<edge from-layer="2519" from-port="2" to-layer="2521" to-port="0" />
<edge from-layer="2520" from-port="0" to-layer="2521" to-port="1" />
<edge from-layer="2521" from-port="2" to-layer="2523" to-port="0" />
<edge from-layer="2522" from-port="0" to-layer="2523" to-port="1" />
<edge from-layer="2523" from-port="2" to-layer="2525" to-port="0" />
<edge from-layer="2524" from-port="0" to-layer="2525" to-port="1" />
<edge from-layer="2525" from-port="2" to-layer="2595" to-port="1" />
<edge from-layer="2525" from-port="2" to-layer="2527" to-port="0" />
<edge from-layer="2526" from-port="0" to-layer="2527" to-port="1" />
<edge from-layer="2527" from-port="2" to-layer="2532" to-port="0" />
<edge from-layer="2528" from-port="0" to-layer="2532" to-port="1" />
<edge from-layer="2529" from-port="0" to-layer="2532" to-port="2" />
<edge from-layer="2530" from-port="0" to-layer="2532" to-port="3" />
<edge from-layer="2531" from-port="0" to-layer="2532" to-port="4" />
<edge from-layer="2532" from-port="5" to-layer="2537" to-port="0" />
<edge from-layer="2532" from-port="5" to-layer="2569" to-port="0" />
<edge from-layer="2532" from-port="5" to-layer="2553" to-port="0" />
<edge from-layer="2533" from-port="0" to-layer="2534" to-port="0" />
<edge from-layer="2534" from-port="1" to-layer="2536" to-port="0" />
<edge from-layer="2535" from-port="0" to-layer="2536" to-port="1" />
<edge from-layer="2536" from-port="2" to-layer="2537" to-port="1" />
<edge from-layer="2537" from-port="2" to-layer="2539" to-port="0" />
<edge from-layer="2538" from-port="0" to-layer="2539" to-port="1" />
<edge from-layer="2539" from-port="2" to-layer="2544" to-port="0" />
<edge from-layer="2540" from-port="0" to-layer="2544" to-port="1" />
<edge from-layer="2541" from-port="0" to-layer="2544" to-port="2" />
<edge from-layer="2542" from-port="0" to-layer="2544" to-port="3" />
<edge from-layer="2543" from-port="0" to-layer="2544" to-port="4" />
<edge from-layer="2544" from-port="5" to-layer="2546" to-port="0" />
<edge from-layer="2545" from-port="0" to-layer="2546" to-port="1" />
<edge from-layer="2546" from-port="2" to-layer="2548" to-port="0" />
<edge from-layer="2547" from-port="0" to-layer="2548" to-port="1" />
<edge from-layer="2548" from-port="2" to-layer="2576" to-port="0" />
<edge from-layer="2549" from-port="0" to-layer="2550" to-port="0" />
<edge from-layer="2550" from-port="1" to-layer="2552" to-port="0" />
<edge from-layer="2551" from-port="0" to-layer="2552" to-port="1" />
<edge from-layer="2552" from-port="2" to-layer="2553" to-port="1" />
<edge from-layer="2553" from-port="2" to-layer="2555" to-port="0" />
<edge from-layer="2554" from-port="0" to-layer="2555" to-port="1" />
<edge from-layer="2555" from-port="2" to-layer="2560" to-port="0" />
<edge from-layer="2556" from-port="0" to-layer="2560" to-port="1" />
<edge from-layer="2557" from-port="0" to-layer="2560" to-port="2" />
<edge from-layer="2558" from-port="0" to-layer="2560" to-port="3" />
<edge from-layer="2559" from-port="0" to-layer="2560" to-port="4" />
<edge from-layer="2560" from-port="5" to-layer="2562" to-port="0" />
<edge from-layer="2561" from-port="0" to-layer="2562" to-port="1" />
<edge from-layer="2562" from-port="2" to-layer="2564" to-port="0" />
<edge from-layer="2563" from-port="0" to-layer="2564" to-port="1" />
<edge from-layer="2564" from-port="2" to-layer="2576" to-port="1" />
<edge from-layer="2565" from-port="0" to-layer="2566" to-port="0" />
<edge from-layer="2566" from-port="1" to-layer="2568" to-port="0" />
<edge from-layer="2567" from-port="0" to-layer="2568" to-port="1" />
<edge from-layer="2568" from-port="2" to-layer="2569" to-port="1" />
<edge from-layer="2569" from-port="2" to-layer="2571" to-port="0" />
<edge from-layer="2570" from-port="0" to-layer="2571" to-port="1" />
<edge from-layer="2571" from-port="2" to-layer="2573" to-port="0" />
<edge from-layer="2572" from-port="0" to-layer="2573" to-port="1" />
<edge from-layer="2573" from-port="2" to-layer="2575" to-port="0" />
<edge from-layer="2574" from-port="0" to-layer="2575" to-port="1" />
<edge from-layer="2575" from-port="2" to-layer="2576" to-port="2" />
<edge from-layer="2576" from-port="4" to-layer="2578" to-port="0" />
<edge from-layer="2577" from-port="0" to-layer="2578" to-port="1" />
<edge from-layer="2578" from-port="2" to-layer="2580" to-port="0" />
<edge from-layer="2579" from-port="0" to-layer="2580" to-port="1" />
<edge from-layer="2580" from-port="2" to-layer="2582" to-port="0" />
<edge from-layer="2581" from-port="0" to-layer="2582" to-port="1" />
<edge from-layer="2582" from-port="2" to-layer="2587" to-port="0" />
<edge from-layer="2583" from-port="0" to-layer="2587" to-port="1" />
<edge from-layer="2584" from-port="0" to-layer="2587" to-port="2" />
<edge from-layer="2585" from-port="0" to-layer="2587" to-port="3" />
<edge from-layer="2586" from-port="0" to-layer="2587" to-port="4" />
<edge from-layer="2587" from-port="5" to-layer="2592" to-port="0" />
<edge from-layer="2588" from-port="0" to-layer="2589" to-port="0" />
<edge from-layer="2589" from-port="1" to-layer="2591" to-port="0" />
<edge from-layer="2590" from-port="0" to-layer="2591" to-port="1" />
<edge from-layer="2591" from-port="2" to-layer="2592" to-port="1" />
<edge from-layer="2592" from-port="2" to-layer="2594" to-port="0" />
<edge from-layer="2593" from-port="0" to-layer="2594" to-port="1" />
<edge from-layer="2594" from-port="2" to-layer="2595" to-port="0" />
<edge from-layer="2595" from-port="2" to-layer="2597" to-port="0" />
<edge from-layer="2596" from-port="0" to-layer="2597" to-port="1" />
<edge from-layer="2597" from-port="2" to-layer="2599" to-port="0" />
<edge from-layer="2598" from-port="0" to-layer="2599" to-port="1" />
<edge from-layer="2599" from-port="2" to-layer="2601" to-port="0" />
<edge from-layer="2600" from-port="0" to-layer="2601" to-port="1" />
<edge from-layer="2601" from-port="2" to-layer="2603" to-port="0" />
<edge from-layer="2601" from-port="2" to-layer="2631" to-port="1" />
<edge from-layer="2602" from-port="0" to-layer="2603" to-port="1" />
<edge from-layer="2603" from-port="2" to-layer="2608" to-port="0" />
<edge from-layer="2604" from-port="0" to-layer="2608" to-port="1" />
<edge from-layer="2605" from-port="0" to-layer="2608" to-port="2" />
<edge from-layer="2606" from-port="0" to-layer="2608" to-port="3" />
<edge from-layer="2607" from-port="0" to-layer="2608" to-port="4" />
<edge from-layer="2608" from-port="5" to-layer="2613" to-port="0" />
<edge from-layer="2609" from-port="0" to-layer="2610" to-port="0" />
<edge from-layer="2610" from-port="1" to-layer="2612" to-port="0" />
<edge from-layer="2611" from-port="0" to-layer="2612" to-port="1" />
<edge from-layer="2612" from-port="2" to-layer="2613" to-port="1" />
<edge from-layer="2613" from-port="2" to-layer="2615" to-port="0" />
<edge from-layer="2614" from-port="0" to-layer="2615" to-port="1" />
<edge from-layer="2615" from-port="2" to-layer="2616" to-port="0" />
<edge from-layer="2616" from-port="1" to-layer="2618" to-port="0" />
<edge from-layer="2617" from-port="0" to-layer="2618" to-port="1" />
<edge from-layer="2618" from-port="2" to-layer="2623" to-port="0" />
<edge from-layer="2619" from-port="0" to-layer="2623" to-port="1" />
<edge from-layer="2620" from-port="0" to-layer="2623" to-port="2" />
<edge from-layer="2621" from-port="0" to-layer="2623" to-port="3" />
<edge from-layer="2622" from-port="0" to-layer="2623" to-port="4" />
<edge from-layer="2623" from-port="5" to-layer="2628" to-port="0" />
<edge from-layer="2624" from-port="0" to-layer="2625" to-port="0" />
<edge from-layer="2625" from-port="1" to-layer="2627" to-port="0" />
<edge from-layer="2626" from-port="0" to-layer="2627" to-port="1" />
<edge from-layer="2627" from-port="2" to-layer="2628" to-port="1" />
<edge from-layer="2628" from-port="2" to-layer="2630" to-port="0" />
<edge from-layer="2629" from-port="0" to-layer="2630" to-port="1" />
<edge from-layer="2630" from-port="2" to-layer="2631" to-port="0" />
<edge from-layer="2631" from-port="2" to-layer="2633" to-port="0" />
<edge from-layer="2632" from-port="0" to-layer="2633" to-port="1" />
<edge from-layer="2633" from-port="2" to-layer="2635" to-port="0" />
<edge from-layer="2634" from-port="0" to-layer="2635" to-port="1" />
<edge from-layer="2635" from-port="2" to-layer="2637" to-port="0" />
<edge from-layer="2636" from-port="0" to-layer="2637" to-port="1" />
<edge from-layer="2637" from-port="2" to-layer="2639" to-port="0" />
<edge from-layer="2637" from-port="2" to-layer="2707" to-port="1" />
<edge from-layer="2638" from-port="0" to-layer="2639" to-port="1" />
<edge from-layer="2639" from-port="2" to-layer="2644" to-port="0" />
<edge from-layer="2640" from-port="0" to-layer="2644" to-port="1" />
<edge from-layer="2641" from-port="0" to-layer="2644" to-port="2" />
<edge from-layer="2642" from-port="0" to-layer="2644" to-port="3" />
<edge from-layer="2643" from-port="0" to-layer="2644" to-port="4" />
<edge from-layer="2644" from-port="5" to-layer="2681" to-port="0" />
<edge from-layer="2644" from-port="5" to-layer="2665" to-port="0" />
<edge from-layer="2644" from-port="5" to-layer="2649" to-port="0" />
<edge from-layer="2645" from-port="0" to-layer="2646" to-port="0" />
<edge from-layer="2646" from-port="1" to-layer="2648" to-port="0" />
<edge from-layer="2647" from-port="0" to-layer="2648" to-port="1" />
<edge from-layer="2648" from-port="2" to-layer="2649" to-port="1" />
<edge from-layer="2649" from-port="2" to-layer="2651" to-port="0" />
<edge from-layer="2650" from-port="0" to-layer="2651" to-port="1" />
<edge from-layer="2651" from-port="2" to-layer="2656" to-port="0" />
<edge from-layer="2652" from-port="0" to-layer="2656" to-port="1" />
<edge from-layer="2653" from-port="0" to-layer="2656" to-port="2" />
<edge from-layer="2654" from-port="0" to-layer="2656" to-port="3" />
<edge from-layer="2655" from-port="0" to-layer="2656" to-port="4" />
<edge from-layer="2656" from-port="5" to-layer="2658" to-port="0" />
<edge from-layer="2657" from-port="0" to-layer="2658" to-port="1" />
<edge from-layer="2658" from-port="2" to-layer="2660" to-port="0" />
<edge from-layer="2659" from-port="0" to-layer="2660" to-port="1" />
<edge from-layer="2660" from-port="2" to-layer="2688" to-port="0" />
<edge from-layer="2661" from-port="0" to-layer="2662" to-port="0" />
<edge from-layer="2662" from-port="1" to-layer="2664" to-port="0" />
<edge from-layer="2663" from-port="0" to-layer="2664" to-port="1" />
<edge from-layer="2664" from-port="2" to-layer="2665" to-port="1" />
<edge from-layer="2665" from-port="2" to-layer="2667" to-port="0" />
<edge from-layer="2666" from-port="0" to-layer="2667" to-port="1" />
<edge from-layer="2667" from-port="2" to-layer="2672" to-port="0" />
<edge from-layer="2668" from-port="0" to-layer="2672" to-port="1" />
<edge from-layer="2669" from-port="0" to-layer="2672" to-port="2" />
<edge from-layer="2670" from-port="0" to-layer="2672" to-port="3" />
<edge from-layer="2671" from-port="0" to-layer="2672" to-port="4" />
<edge from-layer="2672" from-port="5" to-layer="2674" to-port="0" />
<edge from-layer="2673" from-port="0" to-layer="2674" to-port="1" />
<edge from-layer="2674" from-port="2" to-layer="2676" to-port="0" />
<edge from-layer="2675" from-port="0" to-layer="2676" to-port="1" />
<edge from-layer="2676" from-port="2" to-layer="2688" to-port="1" />
<edge from-layer="2677" from-port="0" to-layer="2678" to-port="0" />
<edge from-layer="2678" from-port="1" to-layer="2680" to-port="0" />
<edge from-layer="2679" from-port="0" to-layer="2680" to-port="1" />
<edge from-layer="2680" from-port="2" to-layer="2681" to-port="1" />
<edge from-layer="2681" from-port="2" to-layer="2683" to-port="0" />
<edge from-layer="2682" from-port="0" to-layer="2683" to-port="1" />
<edge from-layer="2683" from-port="2" to-layer="2685" to-port="0" />
<edge from-layer="2684" from-port="0" to-layer="2685" to-port="1" />
<edge from-layer="2685" from-port="2" to-layer="2687" to-port="0" />
<edge from-layer="2686" from-port="0" to-layer="2687" to-port="1" />
<edge from-layer="2687" from-port="2" to-layer="2688" to-port="2" />
<edge from-layer="2688" from-port="4" to-layer="2690" to-port="0" />
<edge from-layer="2689" from-port="0" to-layer="2690" to-port="1" />
<edge from-layer="2690" from-port="2" to-layer="2692" to-port="0" />
<edge from-layer="2691" from-port="0" to-layer="2692" to-port="1" />
<edge from-layer="2692" from-port="2" to-layer="2694" to-port="0" />
<edge from-layer="2693" from-port="0" to-layer="2694" to-port="1" />
<edge from-layer="2694" from-port="2" to-layer="2699" to-port="0" />
<edge from-layer="2695" from-port="0" to-layer="2699" to-port="1" />
<edge from-layer="2696" from-port="0" to-layer="2699" to-port="2" />
<edge from-layer="2697" from-port="0" to-layer="2699" to-port="3" />
<edge from-layer="2698" from-port="0" to-layer="2699" to-port="4" />
<edge from-layer="2699" from-port="5" to-layer="2704" to-port="0" />
<edge from-layer="2700" from-port="0" to-layer="2701" to-port="0" />
<edge from-layer="2701" from-port="1" to-layer="2703" to-port="0" />
<edge from-layer="2702" from-port="0" to-layer="2703" to-port="1" />
<edge from-layer="2703" from-port="2" to-layer="2704" to-port="1" />
<edge from-layer="2704" from-port="2" to-layer="2706" to-port="0" />
<edge from-layer="2705" from-port="0" to-layer="2706" to-port="1" />
<edge from-layer="2706" from-port="2" to-layer="2707" to-port="0" />
<edge from-layer="2707" from-port="2" to-layer="2709" to-port="0" />
<edge from-layer="2708" from-port="0" to-layer="2709" to-port="1" />
<edge from-layer="2709" from-port="2" to-layer="2711" to-port="0" />
<edge from-layer="2710" from-port="0" to-layer="2711" to-port="1" />
<edge from-layer="2711" from-port="2" to-layer="2713" to-port="0" />
<edge from-layer="2712" from-port="0" to-layer="2713" to-port="1" />
<edge from-layer="2713" from-port="2" to-layer="2743" to-port="1" />
<edge from-layer="2713" from-port="2" to-layer="2715" to-port="0" />
<edge from-layer="2714" from-port="0" to-layer="2715" to-port="1" />
<edge from-layer="2715" from-port="2" to-layer="2720" to-port="0" />
<edge from-layer="2716" from-port="0" to-layer="2720" to-port="1" />
<edge from-layer="2717" from-port="0" to-layer="2720" to-port="2" />
<edge from-layer="2718" from-port="0" to-layer="2720" to-port="3" />
<edge from-layer="2719" from-port="0" to-layer="2720" to-port="4" />
<edge from-layer="2720" from-port="5" to-layer="2725" to-port="0" />
<edge from-layer="2721" from-port="0" to-layer="2722" to-port="0" />
<edge from-layer="2722" from-port="1" to-layer="2724" to-port="0" />
<edge from-layer="2723" from-port="0" to-layer="2724" to-port="1" />
<edge from-layer="2724" from-port="2" to-layer="2725" to-port="1" />
<edge from-layer="2725" from-port="2" to-layer="2727" to-port="0" />
<edge from-layer="2726" from-port="0" to-layer="2727" to-port="1" />
<edge from-layer="2727" from-port="2" to-layer="2728" to-port="0" />
<edge from-layer="2728" from-port="1" to-layer="2730" to-port="0" />
<edge from-layer="2729" from-port="0" to-layer="2730" to-port="1" />
<edge from-layer="2730" from-port="2" to-layer="2735" to-port="0" />
<edge from-layer="2731" from-port="0" to-layer="2735" to-port="1" />
<edge from-layer="2732" from-port="0" to-layer="2735" to-port="2" />
<edge from-layer="2733" from-port="0" to-layer="2735" to-port="3" />
<edge from-layer="2734" from-port="0" to-layer="2735" to-port="4" />
<edge from-layer="2735" from-port="5" to-layer="2740" to-port="0" />
<edge from-layer="2736" from-port="0" to-layer="2737" to-port="0" />
<edge from-layer="2737" from-port="1" to-layer="2739" to-port="0" />
<edge from-layer="2738" from-port="0" to-layer="2739" to-port="1" />
<edge from-layer="2739" from-port="2" to-layer="2740" to-port="1" />
<edge from-layer="2740" from-port="2" to-layer="2742" to-port="0" />
<edge from-layer="2741" from-port="0" to-layer="2742" to-port="1" />
<edge from-layer="2742" from-port="2" to-layer="2743" to-port="0" />
<edge from-layer="2743" from-port="2" to-layer="2745" to-port="0" />
<edge from-layer="2744" from-port="0" to-layer="2745" to-port="1" />
<edge from-layer="2745" from-port="2" to-layer="2747" to-port="0" />
<edge from-layer="2746" from-port="0" to-layer="2747" to-port="1" />
<edge from-layer="2747" from-port="2" to-layer="2749" to-port="0" />
<edge from-layer="2748" from-port="0" to-layer="2749" to-port="1" />
<edge from-layer="2749" from-port="2" to-layer="2750" to-port="0" />
</edges>
<rt_info>
<Runtime_version value="2024.4.1-16618-643f23d1318-releases/2024/4" />
<conversion_parameters>
<framework value="pytorch" />
<is_python_object value="True" />
</conversion_parameters>
<nncf>
<friendly_names_were_updated value="True" />
<quantization>
<advanced_parameters value="{'overflow_fix': 'disable', 'quantize_outputs': False, 'inplace_statistics': True, 'disable_channel_alignment': True, 'disable_bias_correction': False, 'batchwise_statistics': None, 'activations_quantization_params': None, 'weights_quantization_params': None, 'activations_range_estimator_params': {'min': {'statistics_type': None, 'aggregator_type': None, 'clipping_value': None, 'quantile_outlier_prob': 0.0001}, 'max': {'statistics_type': None, 'aggregator_type': None, 'clipping_value': None, 'quantile_outlier_prob': 0.0001}}, 'weights_range_estimator_params': {'min': {'statistics_type': None, 'aggregator_type': None, 'clipping_value': None, 'quantile_outlier_prob': 0.0001}, 'max': {'statistics_type': None, 'aggregator_type': None, 'clipping_value': None, 'quantile_outlier_prob': 0.0001}}, 'bias_correction_params': {'apply_for_all_nodes': False, 'threshold': None}, 'smooth_quant_alphas': {'convolution': -1, 'matmul': 0.95}, 'smooth_quant_alpha': None, 'backend_params': {}}" />
<fast_bias_correction value="True" />
<ignored_scope>
<types value="['GroupNormalization']" />
</ignored_scope>
<model_type value="transformer" />
<preset value="mixed" />
<subset_size value="300" />
<target_device value="ANY" />
</quantization>
</nncf>
<optimum>
<optimum_intel_version value="1.20.1" />
<optimum_version value="1.23.3" />
<pytorch_version value="2.5.1" />
<transformers_version value="4.46.2" />
</optimum>
</rt_info>
</net>