anemll commited on
Commit
391b039
·
verified ·
1 Parent(s): 50e4992

Upload folder using huggingface_hub

Browse files
.DS_Store ADDED
Binary file (8.2 kB). View file
 
meta.yaml CHANGED
@@ -14,7 +14,7 @@ model_info:
14
  parameters:
15
  context_length: 512
16
  batch_size: 64
17
- lut_embeddings: 6
18
  lut_ffn: 6
19
  lut_lmhead: 6
20
  num_chunks: 1
 
14
  parameters:
15
  context_length: 512
16
  batch_size: 64
17
+ lut_embeddings: None
18
  lut_ffn: 6
19
  lut_lmhead: 6
20
  num_chunks: 1
qwen_embeddings.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:310d49eb41b891c1cfc6a9a7dbe1d62e5a33e9136343e46852bb15d61754e43f
3
+ size 243
qwen_embeddings.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:626e4ce3bf37e13a0b48524ccabe18155e9b7fed0c972dd10d4db61a5d0880c6
3
+ size 527
qwen_embeddings.mlmodelc/metadata.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "shortDescription" : "Anemll Model (Embeddings) converted to CoreML",
4
+ "metadataOutputVersion" : "3.0",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16)",
11
+ "shortDescription" : "",
12
+ "shape" : "[]",
13
+ "name" : "hidden_states",
14
+ "type" : "MultiArray"
15
+ }
16
+ ],
17
+ "version" : "0.3.3",
18
+ "modelParameters" : [
19
+
20
+ ],
21
+ "author" : "Converted with Anemll v0.3.3",
22
+ "specificationVersion" : 9,
23
+ "storagePrecision" : "Mixed (Float16, Palettized (21 bits), UInt6)",
24
+ "mlProgramOperationTypeHistogram" : {
25
+ "Ios18.constexprLutToDense" : 1,
26
+ "Ios18.gather" : 1
27
+ },
28
+ "computePrecision" : "Mixed (Float16, Int32)",
29
+ "stateSchema" : [
30
+
31
+ ],
32
+ "isUpdatable" : "0",
33
+ "availability" : {
34
+ "macOS" : "15.0",
35
+ "tvOS" : "18.0",
36
+ "visionOS" : "2.0",
37
+ "watchOS" : "11.0",
38
+ "iOS" : "18.0",
39
+ "macCatalyst" : "18.0"
40
+ },
41
+ "modelType" : {
42
+ "name" : "MLModelType_mlProgram"
43
+ },
44
+ "inputSchema" : [
45
+ {
46
+ "shortDescription" : "",
47
+ "dataType" : "Int32",
48
+ "hasShapeFlexibility" : "1",
49
+ "isOptional" : "0",
50
+ "shapeFlexibility" : "1 × 1 | 1 × 64",
51
+ "formattedType" : "MultiArray (Int32 1 × 1)",
52
+ "type" : "MultiArray",
53
+ "shape" : "[1, 1]",
54
+ "name" : "input_ids",
55
+ "enumeratedShapes" : "[[1, 1], [1, 64]]"
56
+ }
57
+ ],
58
+ "userDefinedMetadata" : {
59
+ "com.anemll.context_length" : "512",
60
+ "com.github.apple.coremltools.version" : "8.3.0",
61
+ "com.anemll.lut_bits" : "6",
62
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
63
+ "com.anemll.info" : "Converted with Anemll v0.3.3",
64
+ "com.github.apple.coremltools.source_dialect" : "TorchScript"
65
+ },
66
+ "generatedClassName" : "qwen_embeddings",
67
+ "method" : "predict"
68
+ }
69
+ ]
qwen_embeddings.mlmodelc/model.mil ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.3)
2
+ [buildInfo = dict<string, string>({{"coremlc-component-MIL", "3500.11.1"}, {"coremlc-version", "3500.21.1"}})]
3
+ {
4
+ func main<ios18>(tensor<int32, [1, ?]> input_ids) [FlexibleShapeInformation = tuple<tuple<string, dict<string, tensor<int32, [?]>>>, tuple<string, dict<string, dict<string, tensor<int32, [?]>>>>>((("DefaultShapes", {{"input_ids", [1, 1]}}), ("EnumeratedShapes", {{"79ae981e", {{"input_ids", [1, 1]}}}, {"ed9b58c8", {{"input_ids", [1, 64]}}}})))] {
5
+ int32 hidden_states_axis_0 = const()[name = string("hidden_states_axis_0"), val = int32(0)];
6
+ int32 hidden_states_batch_dims_0 = const()[name = string("hidden_states_batch_dims_0"), val = int32(0)];
7
+ bool hidden_states_validate_indices_0 = const()[name = string("hidden_states_validate_indices_0"), val = bool(false)];
8
+ tensor<fp16, [151936, 1024]> embed_tokens_weight_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [151936, 1024]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64))), lut = tensor<fp16, [18992, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(116686976))))[name = string("embed_tokens_weight_to_fp16_palettized")];
9
+ tensor<fp16, [1, ?, 1024]> hidden_states = gather(axis = hidden_states_axis_0, batch_dims = hidden_states_batch_dims_0, indices = input_ids, validate_indices = hidden_states_validate_indices_0, x = embed_tokens_weight_to_fp16_palettized)[name = string("hidden_states_cast_fp16")];
10
+ } -> (hidden_states);
11
+ }
qwen_embeddings.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3aa93c5a92574caf627576b16b8bb7b7f5bacfdf5a5eb1951916bd64b3456ff3
3
+ size 119118016
qwen_lm_head.mlmodelc/analytics/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e05343a44f7fc2c4b9afbeeb300d09a6c1186c7be10912da68dfa83b14735c85
3
+ size 243
qwen_lm_head.mlmodelc/coremldata.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:097eaf8ad6e5423e442dae8722977a3f5aab61660be17053b7a32c53c95152fd
3
+ size 897
qwen_lm_head.mlmodelc/metadata.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "shortDescription" : "Anemll Model (LM Head) converted to CoreML",
4
+ "metadataOutputVersion" : "3.0",
5
+ "outputSchema" : [
6
+ {
7
+ "hasShapeFlexibility" : "0",
8
+ "isOptional" : "0",
9
+ "dataType" : "Float16",
10
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
11
+ "shortDescription" : "",
12
+ "shape" : "[1, 1, 9496]",
13
+ "name" : "logits1",
14
+ "type" : "MultiArray"
15
+ },
16
+ {
17
+ "hasShapeFlexibility" : "0",
18
+ "isOptional" : "0",
19
+ "dataType" : "Float16",
20
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
21
+ "shortDescription" : "",
22
+ "shape" : "[1, 1, 9496]",
23
+ "name" : "logits2",
24
+ "type" : "MultiArray"
25
+ },
26
+ {
27
+ "hasShapeFlexibility" : "0",
28
+ "isOptional" : "0",
29
+ "dataType" : "Float16",
30
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
31
+ "shortDescription" : "",
32
+ "shape" : "[1, 1, 9496]",
33
+ "name" : "logits3",
34
+ "type" : "MultiArray"
35
+ },
36
+ {
37
+ "hasShapeFlexibility" : "0",
38
+ "isOptional" : "0",
39
+ "dataType" : "Float16",
40
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
41
+ "shortDescription" : "",
42
+ "shape" : "[1, 1, 9496]",
43
+ "name" : "logits4",
44
+ "type" : "MultiArray"
45
+ },
46
+ {
47
+ "hasShapeFlexibility" : "0",
48
+ "isOptional" : "0",
49
+ "dataType" : "Float16",
50
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
51
+ "shortDescription" : "",
52
+ "shape" : "[1, 1, 9496]",
53
+ "name" : "logits5",
54
+ "type" : "MultiArray"
55
+ },
56
+ {
57
+ "hasShapeFlexibility" : "0",
58
+ "isOptional" : "0",
59
+ "dataType" : "Float16",
60
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
61
+ "shortDescription" : "",
62
+ "shape" : "[1, 1, 9496]",
63
+ "name" : "logits6",
64
+ "type" : "MultiArray"
65
+ },
66
+ {
67
+ "hasShapeFlexibility" : "0",
68
+ "isOptional" : "0",
69
+ "dataType" : "Float16",
70
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
71
+ "shortDescription" : "",
72
+ "shape" : "[1, 1, 9496]",
73
+ "name" : "logits7",
74
+ "type" : "MultiArray"
75
+ },
76
+ {
77
+ "hasShapeFlexibility" : "0",
78
+ "isOptional" : "0",
79
+ "dataType" : "Float16",
80
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
81
+ "shortDescription" : "",
82
+ "shape" : "[1, 1, 9496]",
83
+ "name" : "logits8",
84
+ "type" : "MultiArray"
85
+ },
86
+ {
87
+ "hasShapeFlexibility" : "0",
88
+ "isOptional" : "0",
89
+ "dataType" : "Float16",
90
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
91
+ "shortDescription" : "",
92
+ "shape" : "[1, 1, 9496]",
93
+ "name" : "logits9",
94
+ "type" : "MultiArray"
95
+ },
96
+ {
97
+ "hasShapeFlexibility" : "0",
98
+ "isOptional" : "0",
99
+ "dataType" : "Float16",
100
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
101
+ "shortDescription" : "",
102
+ "shape" : "[1, 1, 9496]",
103
+ "name" : "logits10",
104
+ "type" : "MultiArray"
105
+ },
106
+ {
107
+ "hasShapeFlexibility" : "0",
108
+ "isOptional" : "0",
109
+ "dataType" : "Float16",
110
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
111
+ "shortDescription" : "",
112
+ "shape" : "[1, 1, 9496]",
113
+ "name" : "logits11",
114
+ "type" : "MultiArray"
115
+ },
116
+ {
117
+ "hasShapeFlexibility" : "0",
118
+ "isOptional" : "0",
119
+ "dataType" : "Float16",
120
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
121
+ "shortDescription" : "",
122
+ "shape" : "[1, 1, 9496]",
123
+ "name" : "logits12",
124
+ "type" : "MultiArray"
125
+ },
126
+ {
127
+ "hasShapeFlexibility" : "0",
128
+ "isOptional" : "0",
129
+ "dataType" : "Float16",
130
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
131
+ "shortDescription" : "",
132
+ "shape" : "[1, 1, 9496]",
133
+ "name" : "logits13",
134
+ "type" : "MultiArray"
135
+ },
136
+ {
137
+ "hasShapeFlexibility" : "0",
138
+ "isOptional" : "0",
139
+ "dataType" : "Float16",
140
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
141
+ "shortDescription" : "",
142
+ "shape" : "[1, 1, 9496]",
143
+ "name" : "logits14",
144
+ "type" : "MultiArray"
145
+ },
146
+ {
147
+ "hasShapeFlexibility" : "0",
148
+ "isOptional" : "0",
149
+ "dataType" : "Float16",
150
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
151
+ "shortDescription" : "",
152
+ "shape" : "[1, 1, 9496]",
153
+ "name" : "logits15",
154
+ "type" : "MultiArray"
155
+ },
156
+ {
157
+ "hasShapeFlexibility" : "0",
158
+ "isOptional" : "0",
159
+ "dataType" : "Float16",
160
+ "formattedType" : "MultiArray (Float16 1 × 1 × 9496)",
161
+ "shortDescription" : "",
162
+ "shape" : "[1, 1, 9496]",
163
+ "name" : "logits16",
164
+ "type" : "MultiArray"
165
+ }
166
+ ],
167
+ "version" : "0.3.3",
168
+ "modelParameters" : [
169
+
170
+ ],
171
+ "author" : "Converted with Anemll v0.3.3",
172
+ "specificationVersion" : 9,
173
+ "storagePrecision" : "Mixed (Float16, Palettized (17 bits), UInt6)",
174
+ "mlProgramOperationTypeHistogram" : {
175
+ "Ios18.transpose" : 17,
176
+ "Ios18.constexprLutToDense" : 16,
177
+ "Ios18.expandDims" : 1,
178
+ "Ios18.conv" : 16,
179
+ "Ios18.squeeze" : 16
180
+ },
181
+ "computePrecision" : "Mixed (Float16, Int32)",
182
+ "stateSchema" : [
183
+
184
+ ],
185
+ "isUpdatable" : "0",
186
+ "availability" : {
187
+ "macOS" : "15.0",
188
+ "tvOS" : "18.0",
189
+ "visionOS" : "2.0",
190
+ "watchOS" : "11.0",
191
+ "iOS" : "18.0",
192
+ "macCatalyst" : "18.0"
193
+ },
194
+ "modelType" : {
195
+ "name" : "MLModelType_mlProgram"
196
+ },
197
+ "inputSchema" : [
198
+ {
199
+ "hasShapeFlexibility" : "0",
200
+ "isOptional" : "0",
201
+ "dataType" : "Float16",
202
+ "formattedType" : "MultiArray (Float16 1 × 1 × 1024)",
203
+ "shortDescription" : "",
204
+ "shape" : "[1, 1, 1024]",
205
+ "name" : "hidden_states",
206
+ "type" : "MultiArray"
207
+ }
208
+ ],
209
+ "userDefinedMetadata" : {
210
+ "com.github.apple.coremltools.source_dialect" : "TorchScript",
211
+ "com.github.apple.coremltools.version" : "8.3.0",
212
+ "com.anemll.lut_bits" : "6",
213
+ "com.github.apple.coremltools.source" : "torch==2.5.0",
214
+ "com.anemll.info" : "Converted with Anemll v0.3.3",
215
+ "com.anemll.context_length" : "512"
216
+ },
217
+ "generatedClassName" : "qwen_lm_head_lut6",
218
+ "method" : "predict"
219
+ }
220
+ ]
qwen_lm_head.mlmodelc/model.mil ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ program(1.3)
2
+ [buildInfo = dict<string, string>({{"coremlc-component-MIL", "3500.11.1"}, {"coremlc-version", "3500.21.1"}})]
3
+ {
4
+ func main<ios18>(tensor<fp16, [1, 1, 1024]> hidden_states) {
5
+ tensor<int32, [3]> var_5 = const()[name = string("op_5"), val = tensor<int32, [3]>([0, 2, 1])];
6
+ tensor<int32, [1]> input_axes_0 = const()[name = string("input_axes_0"), val = tensor<int32, [1]>([2])];
7
+ tensor<fp16, [1, 1024, 1]> var_6_cast_fp16 = transpose(perm = var_5, x = hidden_states)[name = string("transpose_16")];
8
+ tensor<fp16, [1, 1024, 1, 1]> input_cast_fp16 = expand_dims(axes = input_axes_0, x = var_6_cast_fp16)[name = string("input_cast_fp16")];
9
+ string var_29_pad_type_0 = const()[name = string("op_29_pad_type_0"), val = string("valid")];
10
+ tensor<int32, [2]> var_29_strides_0 = const()[name = string("op_29_strides_0"), val = tensor<int32, [2]>([1, 1])];
11
+ tensor<int32, [4]> var_29_pad_0 = const()[name = string("op_29_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
12
+ tensor<int32, [2]> var_29_dilations_0 = const()[name = string("op_29_dilations_0"), val = tensor<int32, [2]>([1, 1])];
13
+ int32 var_29_groups_0 = const()[name = string("op_29_groups_0"), val = int32(1)];
14
+ tensor<fp16, [9496, 1024, 1, 1]> op_9_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(64))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(7293056))))[name = string("op_9_promoted_to_fp16_palettized")];
15
+ tensor<fp16, [1, 9496, 1, 1]> var_29_cast_fp16 = conv(dilations = var_29_dilations_0, groups = var_29_groups_0, pad = var_29_pad_0, pad_type = var_29_pad_type_0, strides = var_29_strides_0, weight = op_9_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_29_cast_fp16")];
16
+ tensor<int32, [1]> var_31_axes_0 = const()[name = string("op_31_axes_0"), val = tensor<int32, [1]>([2])];
17
+ tensor<fp16, [1, 9496, 1]> var_31_cast_fp16 = squeeze(axes = var_31_axes_0, x = var_29_cast_fp16)[name = string("op_31_cast_fp16")];
18
+ tensor<int32, [3]> var_34_perm_0 = const()[name = string("op_34_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
19
+ string var_55_pad_type_0 = const()[name = string("op_55_pad_type_0"), val = string("valid")];
20
+ tensor<int32, [2]> var_55_strides_0 = const()[name = string("op_55_strides_0"), val = tensor<int32, [2]>([1, 1])];
21
+ tensor<int32, [4]> var_55_pad_0 = const()[name = string("op_55_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
22
+ tensor<int32, [2]> var_55_dilations_0 = const()[name = string("op_55_dilations_0"), val = tensor<int32, [2]>([1, 1])];
23
+ int32 var_55_groups_0 = const()[name = string("op_55_groups_0"), val = int32(1)];
24
+ tensor<fp16, [9496, 1024, 1, 1]> op_35_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(7445056))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(14738048))))[name = string("op_35_promoted_to_fp16_palettized")];
25
+ tensor<fp16, [1, 9496, 1, 1]> var_55_cast_fp16 = conv(dilations = var_55_dilations_0, groups = var_55_groups_0, pad = var_55_pad_0, pad_type = var_55_pad_type_0, strides = var_55_strides_0, weight = op_35_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_55_cast_fp16")];
26
+ tensor<int32, [1]> var_57_axes_0 = const()[name = string("op_57_axes_0"), val = tensor<int32, [1]>([2])];
27
+ tensor<fp16, [1, 9496, 1]> var_57_cast_fp16 = squeeze(axes = var_57_axes_0, x = var_55_cast_fp16)[name = string("op_57_cast_fp16")];
28
+ tensor<int32, [3]> var_60_perm_0 = const()[name = string("op_60_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
29
+ string var_81_pad_type_0 = const()[name = string("op_81_pad_type_0"), val = string("valid")];
30
+ tensor<int32, [2]> var_81_strides_0 = const()[name = string("op_81_strides_0"), val = tensor<int32, [2]>([1, 1])];
31
+ tensor<int32, [4]> var_81_pad_0 = const()[name = string("op_81_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
32
+ tensor<int32, [2]> var_81_dilations_0 = const()[name = string("op_81_dilations_0"), val = tensor<int32, [2]>([1, 1])];
33
+ int32 var_81_groups_0 = const()[name = string("op_81_groups_0"), val = int32(1)];
34
+ tensor<fp16, [9496, 1024, 1, 1]> op_61_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(14890048))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(22183040))))[name = string("op_61_promoted_to_fp16_palettized")];
35
+ tensor<fp16, [1, 9496, 1, 1]> var_81_cast_fp16 = conv(dilations = var_81_dilations_0, groups = var_81_groups_0, pad = var_81_pad_0, pad_type = var_81_pad_type_0, strides = var_81_strides_0, weight = op_61_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_81_cast_fp16")];
36
+ tensor<int32, [1]> var_83_axes_0 = const()[name = string("op_83_axes_0"), val = tensor<int32, [1]>([2])];
37
+ tensor<fp16, [1, 9496, 1]> var_83_cast_fp16 = squeeze(axes = var_83_axes_0, x = var_81_cast_fp16)[name = string("op_83_cast_fp16")];
38
+ tensor<int32, [3]> var_86_perm_0 = const()[name = string("op_86_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
39
+ string var_107_pad_type_0 = const()[name = string("op_107_pad_type_0"), val = string("valid")];
40
+ tensor<int32, [2]> var_107_strides_0 = const()[name = string("op_107_strides_0"), val = tensor<int32, [2]>([1, 1])];
41
+ tensor<int32, [4]> var_107_pad_0 = const()[name = string("op_107_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
42
+ tensor<int32, [2]> var_107_dilations_0 = const()[name = string("op_107_dilations_0"), val = tensor<int32, [2]>([1, 1])];
43
+ int32 var_107_groups_0 = const()[name = string("op_107_groups_0"), val = int32(1)];
44
+ tensor<fp16, [9496, 1024, 1, 1]> op_87_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(22335040))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(29628032))))[name = string("op_87_promoted_to_fp16_palettized")];
45
+ tensor<fp16, [1, 9496, 1, 1]> var_107_cast_fp16 = conv(dilations = var_107_dilations_0, groups = var_107_groups_0, pad = var_107_pad_0, pad_type = var_107_pad_type_0, strides = var_107_strides_0, weight = op_87_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_107_cast_fp16")];
46
+ tensor<int32, [1]> var_109_axes_0 = const()[name = string("op_109_axes_0"), val = tensor<int32, [1]>([2])];
47
+ tensor<fp16, [1, 9496, 1]> var_109_cast_fp16 = squeeze(axes = var_109_axes_0, x = var_107_cast_fp16)[name = string("op_109_cast_fp16")];
48
+ tensor<int32, [3]> var_112_perm_0 = const()[name = string("op_112_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
49
+ string var_133_pad_type_0 = const()[name = string("op_133_pad_type_0"), val = string("valid")];
50
+ tensor<int32, [2]> var_133_strides_0 = const()[name = string("op_133_strides_0"), val = tensor<int32, [2]>([1, 1])];
51
+ tensor<int32, [4]> var_133_pad_0 = const()[name = string("op_133_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
52
+ tensor<int32, [2]> var_133_dilations_0 = const()[name = string("op_133_dilations_0"), val = tensor<int32, [2]>([1, 1])];
53
+ int32 var_133_groups_0 = const()[name = string("op_133_groups_0"), val = int32(1)];
54
+ tensor<fp16, [9496, 1024, 1, 1]> op_113_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(29780032))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(37073024))))[name = string("op_113_promoted_to_fp16_palettized")];
55
+ tensor<fp16, [1, 9496, 1, 1]> var_133_cast_fp16 = conv(dilations = var_133_dilations_0, groups = var_133_groups_0, pad = var_133_pad_0, pad_type = var_133_pad_type_0, strides = var_133_strides_0, weight = op_113_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_133_cast_fp16")];
56
+ tensor<int32, [1]> var_135_axes_0 = const()[name = string("op_135_axes_0"), val = tensor<int32, [1]>([2])];
57
+ tensor<fp16, [1, 9496, 1]> var_135_cast_fp16 = squeeze(axes = var_135_axes_0, x = var_133_cast_fp16)[name = string("op_135_cast_fp16")];
58
+ tensor<int32, [3]> var_138_perm_0 = const()[name = string("op_138_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
59
+ string var_159_pad_type_0 = const()[name = string("op_159_pad_type_0"), val = string("valid")];
60
+ tensor<int32, [2]> var_159_strides_0 = const()[name = string("op_159_strides_0"), val = tensor<int32, [2]>([1, 1])];
61
+ tensor<int32, [4]> var_159_pad_0 = const()[name = string("op_159_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
62
+ tensor<int32, [2]> var_159_dilations_0 = const()[name = string("op_159_dilations_0"), val = tensor<int32, [2]>([1, 1])];
63
+ int32 var_159_groups_0 = const()[name = string("op_159_groups_0"), val = int32(1)];
64
+ tensor<fp16, [9496, 1024, 1, 1]> op_139_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(37225024))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(44518016))))[name = string("op_139_promoted_to_fp16_palettized")];
65
+ tensor<fp16, [1, 9496, 1, 1]> var_159_cast_fp16 = conv(dilations = var_159_dilations_0, groups = var_159_groups_0, pad = var_159_pad_0, pad_type = var_159_pad_type_0, strides = var_159_strides_0, weight = op_139_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_159_cast_fp16")];
66
+ tensor<int32, [1]> var_161_axes_0 = const()[name = string("op_161_axes_0"), val = tensor<int32, [1]>([2])];
67
+ tensor<fp16, [1, 9496, 1]> var_161_cast_fp16 = squeeze(axes = var_161_axes_0, x = var_159_cast_fp16)[name = string("op_161_cast_fp16")];
68
+ tensor<int32, [3]> var_164_perm_0 = const()[name = string("op_164_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
69
+ string var_185_pad_type_0 = const()[name = string("op_185_pad_type_0"), val = string("valid")];
70
+ tensor<int32, [2]> var_185_strides_0 = const()[name = string("op_185_strides_0"), val = tensor<int32, [2]>([1, 1])];
71
+ tensor<int32, [4]> var_185_pad_0 = const()[name = string("op_185_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
72
+ tensor<int32, [2]> var_185_dilations_0 = const()[name = string("op_185_dilations_0"), val = tensor<int32, [2]>([1, 1])];
73
+ int32 var_185_groups_0 = const()[name = string("op_185_groups_0"), val = int32(1)];
74
+ tensor<fp16, [9496, 1024, 1, 1]> op_165_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(44670016))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(51963008))))[name = string("op_165_promoted_to_fp16_palettized")];
75
+ tensor<fp16, [1, 9496, 1, 1]> var_185_cast_fp16 = conv(dilations = var_185_dilations_0, groups = var_185_groups_0, pad = var_185_pad_0, pad_type = var_185_pad_type_0, strides = var_185_strides_0, weight = op_165_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_185_cast_fp16")];
76
+ tensor<int32, [1]> var_187_axes_0 = const()[name = string("op_187_axes_0"), val = tensor<int32, [1]>([2])];
77
+ tensor<fp16, [1, 9496, 1]> var_187_cast_fp16 = squeeze(axes = var_187_axes_0, x = var_185_cast_fp16)[name = string("op_187_cast_fp16")];
78
+ tensor<int32, [3]> var_190_perm_0 = const()[name = string("op_190_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
79
+ string var_211_pad_type_0 = const()[name = string("op_211_pad_type_0"), val = string("valid")];
80
+ tensor<int32, [2]> var_211_strides_0 = const()[name = string("op_211_strides_0"), val = tensor<int32, [2]>([1, 1])];
81
+ tensor<int32, [4]> var_211_pad_0 = const()[name = string("op_211_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
82
+ tensor<int32, [2]> var_211_dilations_0 = const()[name = string("op_211_dilations_0"), val = tensor<int32, [2]>([1, 1])];
83
+ int32 var_211_groups_0 = const()[name = string("op_211_groups_0"), val = int32(1)];
84
+ tensor<fp16, [9496, 1024, 1, 1]> op_191_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(52115008))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(59408000))))[name = string("op_191_promoted_to_fp16_palettized")];
85
+ tensor<fp16, [1, 9496, 1, 1]> var_211_cast_fp16 = conv(dilations = var_211_dilations_0, groups = var_211_groups_0, pad = var_211_pad_0, pad_type = var_211_pad_type_0, strides = var_211_strides_0, weight = op_191_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_211_cast_fp16")];
86
+ tensor<int32, [1]> var_213_axes_0 = const()[name = string("op_213_axes_0"), val = tensor<int32, [1]>([2])];
87
+ tensor<fp16, [1, 9496, 1]> var_213_cast_fp16 = squeeze(axes = var_213_axes_0, x = var_211_cast_fp16)[name = string("op_213_cast_fp16")];
88
+ tensor<int32, [3]> var_216_perm_0 = const()[name = string("op_216_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
89
+ string var_237_pad_type_0 = const()[name = string("op_237_pad_type_0"), val = string("valid")];
90
+ tensor<int32, [2]> var_237_strides_0 = const()[name = string("op_237_strides_0"), val = tensor<int32, [2]>([1, 1])];
91
+ tensor<int32, [4]> var_237_pad_0 = const()[name = string("op_237_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
92
+ tensor<int32, [2]> var_237_dilations_0 = const()[name = string("op_237_dilations_0"), val = tensor<int32, [2]>([1, 1])];
93
+ int32 var_237_groups_0 = const()[name = string("op_237_groups_0"), val = int32(1)];
94
+ tensor<fp16, [9496, 1024, 1, 1]> op_217_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(59560000))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(66852992))))[name = string("op_217_promoted_to_fp16_palettized")];
95
+ tensor<fp16, [1, 9496, 1, 1]> var_237_cast_fp16 = conv(dilations = var_237_dilations_0, groups = var_237_groups_0, pad = var_237_pad_0, pad_type = var_237_pad_type_0, strides = var_237_strides_0, weight = op_217_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_237_cast_fp16")];
96
+ tensor<int32, [1]> var_239_axes_0 = const()[name = string("op_239_axes_0"), val = tensor<int32, [1]>([2])];
97
+ tensor<fp16, [1, 9496, 1]> var_239_cast_fp16 = squeeze(axes = var_239_axes_0, x = var_237_cast_fp16)[name = string("op_239_cast_fp16")];
98
+ tensor<int32, [3]> var_242_perm_0 = const()[name = string("op_242_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
99
+ string var_263_pad_type_0 = const()[name = string("op_263_pad_type_0"), val = string("valid")];
100
+ tensor<int32, [2]> var_263_strides_0 = const()[name = string("op_263_strides_0"), val = tensor<int32, [2]>([1, 1])];
101
+ tensor<int32, [4]> var_263_pad_0 = const()[name = string("op_263_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
102
+ tensor<int32, [2]> var_263_dilations_0 = const()[name = string("op_263_dilations_0"), val = tensor<int32, [2]>([1, 1])];
103
+ int32 var_263_groups_0 = const()[name = string("op_263_groups_0"), val = int32(1)];
104
+ tensor<fp16, [9496, 1024, 1, 1]> op_243_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(67004992))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(74297984))))[name = string("op_243_promoted_to_fp16_palettized")];
105
+ tensor<fp16, [1, 9496, 1, 1]> var_263_cast_fp16 = conv(dilations = var_263_dilations_0, groups = var_263_groups_0, pad = var_263_pad_0, pad_type = var_263_pad_type_0, strides = var_263_strides_0, weight = op_243_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_263_cast_fp16")];
106
+ tensor<int32, [1]> var_265_axes_0 = const()[name = string("op_265_axes_0"), val = tensor<int32, [1]>([2])];
107
+ tensor<fp16, [1, 9496, 1]> var_265_cast_fp16 = squeeze(axes = var_265_axes_0, x = var_263_cast_fp16)[name = string("op_265_cast_fp16")];
108
+ tensor<int32, [3]> var_268_perm_0 = const()[name = string("op_268_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
109
+ string var_289_pad_type_0 = const()[name = string("op_289_pad_type_0"), val = string("valid")];
110
+ tensor<int32, [2]> var_289_strides_0 = const()[name = string("op_289_strides_0"), val = tensor<int32, [2]>([1, 1])];
111
+ tensor<int32, [4]> var_289_pad_0 = const()[name = string("op_289_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
112
+ tensor<int32, [2]> var_289_dilations_0 = const()[name = string("op_289_dilations_0"), val = tensor<int32, [2]>([1, 1])];
113
+ int32 var_289_groups_0 = const()[name = string("op_289_groups_0"), val = int32(1)];
114
+ tensor<fp16, [9496, 1024, 1, 1]> op_269_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(74449984))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(81742976))))[name = string("op_269_promoted_to_fp16_palettized")];
115
+ tensor<fp16, [1, 9496, 1, 1]> var_289_cast_fp16 = conv(dilations = var_289_dilations_0, groups = var_289_groups_0, pad = var_289_pad_0, pad_type = var_289_pad_type_0, strides = var_289_strides_0, weight = op_269_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_289_cast_fp16")];
116
+ tensor<int32, [1]> var_291_axes_0 = const()[name = string("op_291_axes_0"), val = tensor<int32, [1]>([2])];
117
+ tensor<fp16, [1, 9496, 1]> var_291_cast_fp16 = squeeze(axes = var_291_axes_0, x = var_289_cast_fp16)[name = string("op_291_cast_fp16")];
118
+ tensor<int32, [3]> var_294_perm_0 = const()[name = string("op_294_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
119
+ string var_315_pad_type_0 = const()[name = string("op_315_pad_type_0"), val = string("valid")];
120
+ tensor<int32, [2]> var_315_strides_0 = const()[name = string("op_315_strides_0"), val = tensor<int32, [2]>([1, 1])];
121
+ tensor<int32, [4]> var_315_pad_0 = const()[name = string("op_315_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
122
+ tensor<int32, [2]> var_315_dilations_0 = const()[name = string("op_315_dilations_0"), val = tensor<int32, [2]>([1, 1])];
123
+ int32 var_315_groups_0 = const()[name = string("op_315_groups_0"), val = int32(1)];
124
+ tensor<fp16, [9496, 1024, 1, 1]> op_295_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(81894976))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(89187968))))[name = string("op_295_promoted_to_fp16_palettized")];
125
+ tensor<fp16, [1, 9496, 1, 1]> var_315_cast_fp16 = conv(dilations = var_315_dilations_0, groups = var_315_groups_0, pad = var_315_pad_0, pad_type = var_315_pad_type_0, strides = var_315_strides_0, weight = op_295_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_315_cast_fp16")];
126
+ tensor<int32, [1]> var_317_axes_0 = const()[name = string("op_317_axes_0"), val = tensor<int32, [1]>([2])];
127
+ tensor<fp16, [1, 9496, 1]> var_317_cast_fp16 = squeeze(axes = var_317_axes_0, x = var_315_cast_fp16)[name = string("op_317_cast_fp16")];
128
+ tensor<int32, [3]> var_320_perm_0 = const()[name = string("op_320_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
129
+ string var_341_pad_type_0 = const()[name = string("op_341_pad_type_0"), val = string("valid")];
130
+ tensor<int32, [2]> var_341_strides_0 = const()[name = string("op_341_strides_0"), val = tensor<int32, [2]>([1, 1])];
131
+ tensor<int32, [4]> var_341_pad_0 = const()[name = string("op_341_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
132
+ tensor<int32, [2]> var_341_dilations_0 = const()[name = string("op_341_dilations_0"), val = tensor<int32, [2]>([1, 1])];
133
+ int32 var_341_groups_0 = const()[name = string("op_341_groups_0"), val = int32(1)];
134
+ tensor<fp16, [9496, 1024, 1, 1]> op_321_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(89339968))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(96632960))))[name = string("op_321_promoted_to_fp16_palettized")];
135
+ tensor<fp16, [1, 9496, 1, 1]> var_341_cast_fp16 = conv(dilations = var_341_dilations_0, groups = var_341_groups_0, pad = var_341_pad_0, pad_type = var_341_pad_type_0, strides = var_341_strides_0, weight = op_321_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_341_cast_fp16")];
136
+ tensor<int32, [1]> var_343_axes_0 = const()[name = string("op_343_axes_0"), val = tensor<int32, [1]>([2])];
137
+ tensor<fp16, [1, 9496, 1]> var_343_cast_fp16 = squeeze(axes = var_343_axes_0, x = var_341_cast_fp16)[name = string("op_343_cast_fp16")];
138
+ tensor<int32, [3]> var_346_perm_0 = const()[name = string("op_346_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
139
+ string var_367_pad_type_0 = const()[name = string("op_367_pad_type_0"), val = string("valid")];
140
+ tensor<int32, [2]> var_367_strides_0 = const()[name = string("op_367_strides_0"), val = tensor<int32, [2]>([1, 1])];
141
+ tensor<int32, [4]> var_367_pad_0 = const()[name = string("op_367_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
142
+ tensor<int32, [2]> var_367_dilations_0 = const()[name = string("op_367_dilations_0"), val = tensor<int32, [2]>([1, 1])];
143
+ int32 var_367_groups_0 = const()[name = string("op_367_groups_0"), val = int32(1)];
144
+ tensor<fp16, [9496, 1024, 1, 1]> op_347_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(96784960))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(104077952))))[name = string("op_347_promoted_to_fp16_palettized")];
145
+ tensor<fp16, [1, 9496, 1, 1]> var_367_cast_fp16 = conv(dilations = var_367_dilations_0, groups = var_367_groups_0, pad = var_367_pad_0, pad_type = var_367_pad_type_0, strides = var_367_strides_0, weight = op_347_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_367_cast_fp16")];
146
+ tensor<int32, [1]> var_369_axes_0 = const()[name = string("op_369_axes_0"), val = tensor<int32, [1]>([2])];
147
+ tensor<fp16, [1, 9496, 1]> var_369_cast_fp16 = squeeze(axes = var_369_axes_0, x = var_367_cast_fp16)[name = string("op_369_cast_fp16")];
148
+ tensor<int32, [3]> var_372_perm_0 = const()[name = string("op_372_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
149
+ string var_393_pad_type_0 = const()[name = string("op_393_pad_type_0"), val = string("valid")];
150
+ tensor<int32, [2]> var_393_strides_0 = const()[name = string("op_393_strides_0"), val = tensor<int32, [2]>([1, 1])];
151
+ tensor<int32, [4]> var_393_pad_0 = const()[name = string("op_393_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
152
+ tensor<int32, [2]> var_393_dilations_0 = const()[name = string("op_393_dilations_0"), val = tensor<int32, [2]>([1, 1])];
153
+ int32 var_393_groups_0 = const()[name = string("op_393_groups_0"), val = int32(1)];
154
+ tensor<fp16, [9496, 1024, 1, 1]> op_373_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(104229952))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(111522944))))[name = string("op_373_promoted_to_fp16_palettized")];
155
+ tensor<fp16, [1, 9496, 1, 1]> var_393_cast_fp16 = conv(dilations = var_393_dilations_0, groups = var_393_groups_0, pad = var_393_pad_0, pad_type = var_393_pad_type_0, strides = var_393_strides_0, weight = op_373_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_393_cast_fp16")];
156
+ tensor<int32, [1]> var_395_axes_0 = const()[name = string("op_395_axes_0"), val = tensor<int32, [1]>([2])];
157
+ tensor<fp16, [1, 9496, 1]> var_395_cast_fp16 = squeeze(axes = var_395_axes_0, x = var_393_cast_fp16)[name = string("op_395_cast_fp16")];
158
+ tensor<int32, [3]> var_398_perm_0 = const()[name = string("op_398_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
159
+ string var_419_pad_type_0 = const()[name = string("op_419_pad_type_0"), val = string("valid")];
160
+ tensor<int32, [2]> var_419_strides_0 = const()[name = string("op_419_strides_0"), val = tensor<int32, [2]>([1, 1])];
161
+ tensor<int32, [4]> var_419_pad_0 = const()[name = string("op_419_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
162
+ tensor<int32, [2]> var_419_dilations_0 = const()[name = string("op_419_dilations_0"), val = tensor<int32, [2]>([1, 1])];
163
+ int32 var_419_groups_0 = const()[name = string("op_419_groups_0"), val = int32(1)];
164
+ tensor<fp16, [9496, 1024, 1, 1]> op_399_promoted_to_fp16_palettized = constexpr_lut_to_dense(indices = tensor<uint6, [9496, 1024, 1, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(111674944))), lut = tensor<fp16, [1187, 1, 1, 1, 64, 1]>(BLOBFILE(path = string("@model_path/weights/weight.bin"), offset = uint64(118967936))))[name = string("op_399_promoted_to_fp16_palettized")];
165
+ tensor<fp16, [1, 9496, 1, 1]> var_419_cast_fp16 = conv(dilations = var_419_dilations_0, groups = var_419_groups_0, pad = var_419_pad_0, pad_type = var_419_pad_type_0, strides = var_419_strides_0, weight = op_399_promoted_to_fp16_palettized, x = input_cast_fp16)[name = string("op_419_cast_fp16")];
166
+ tensor<int32, [1]> var_421_axes_0 = const()[name = string("op_421_axes_0"), val = tensor<int32, [1]>([2])];
167
+ tensor<fp16, [1, 9496, 1]> var_421_cast_fp16 = squeeze(axes = var_421_axes_0, x = var_419_cast_fp16)[name = string("op_421_cast_fp16")];
168
+ tensor<int32, [3]> var_424_perm_0 = const()[name = string("op_424_perm_0"), val = tensor<int32, [3]>([0, 2, 1])];
169
+ tensor<fp16, [1, 1, 9496]> logits1 = transpose(perm = var_34_perm_0, x = var_31_cast_fp16)[name = string("transpose_0")];
170
+ tensor<fp16, [1, 1, 9496]> logits2 = transpose(perm = var_60_perm_0, x = var_57_cast_fp16)[name = string("transpose_1")];
171
+ tensor<fp16, [1, 1, 9496]> logits3 = transpose(perm = var_86_perm_0, x = var_83_cast_fp16)[name = string("transpose_2")];
172
+ tensor<fp16, [1, 1, 9496]> logits4 = transpose(perm = var_112_perm_0, x = var_109_cast_fp16)[name = string("transpose_3")];
173
+ tensor<fp16, [1, 1, 9496]> logits5 = transpose(perm = var_138_perm_0, x = var_135_cast_fp16)[name = string("transpose_4")];
174
+ tensor<fp16, [1, 1, 9496]> logits6 = transpose(perm = var_164_perm_0, x = var_161_cast_fp16)[name = string("transpose_5")];
175
+ tensor<fp16, [1, 1, 9496]> logits7 = transpose(perm = var_190_perm_0, x = var_187_cast_fp16)[name = string("transpose_6")];
176
+ tensor<fp16, [1, 1, 9496]> logits8 = transpose(perm = var_216_perm_0, x = var_213_cast_fp16)[name = string("transpose_7")];
177
+ tensor<fp16, [1, 1, 9496]> logits9 = transpose(perm = var_242_perm_0, x = var_239_cast_fp16)[name = string("transpose_8")];
178
+ tensor<fp16, [1, 1, 9496]> logits10 = transpose(perm = var_268_perm_0, x = var_265_cast_fp16)[name = string("transpose_9")];
179
+ tensor<fp16, [1, 1, 9496]> logits11 = transpose(perm = var_294_perm_0, x = var_291_cast_fp16)[name = string("transpose_10")];
180
+ tensor<fp16, [1, 1, 9496]> logits12 = transpose(perm = var_320_perm_0, x = var_317_cast_fp16)[name = string("transpose_11")];
181
+ tensor<fp16, [1, 1, 9496]> logits13 = transpose(perm = var_346_perm_0, x = var_343_cast_fp16)[name = string("transpose_12")];
182
+ tensor<fp16, [1, 1, 9496]> logits14 = transpose(perm = var_372_perm_0, x = var_369_cast_fp16)[name = string("transpose_13")];
183
+ tensor<fp16, [1, 1, 9496]> logits15 = transpose(perm = var_398_perm_0, x = var_395_cast_fp16)[name = string("transpose_14")];
184
+ tensor<fp16, [1, 1, 9496]> logits16 = transpose(perm = var_424_perm_0, x = var_421_cast_fp16)[name = string("transpose_15")];
185
+ } -> (logits1, logits2, logits3, logits4, logits5, logits6, logits7, logits8, logits9, logits10, logits11, logits12, logits13, logits14, logits15, logits16);
186
+ }
qwen_lm_head.mlmodelc/weights/weight.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8707b04445d6c46e5b15b263bb51e7329841877d4517808b26760117d42b22ac
3
+ size 119119936