@@ -79,7 +79,7 @@ static Kernel prim_ops[] = {
79
79
// aten::sym_size.int(Tensor self, int dim) -> SymInt
80
80
Kernel (
81
81
" aten::sym_size.int" ,
82
- [](KernelRuntimeContext& context, EValue** stack) {
82
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
83
83
(void )context;
84
84
EValue& self = *stack[0 ];
85
85
EValue& dim = *stack[1 ];
@@ -93,7 +93,7 @@ static Kernel prim_ops[] = {
93
93
// aten::_local_scalar_dense(Tensor self) -> Scalar
94
94
Kernel (
95
95
" aten::_local_scalar_dense" ,
96
- [](KernelRuntimeContext& context, EValue** stack) {
96
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
97
97
(void )context;
98
98
EValue& self = *stack[0 ];
99
99
EValue& out = *stack[1 ];
@@ -112,7 +112,7 @@ static Kernel prim_ops[] = {
112
112
// aten::sym_numel(Tensor self) -> SymInt
113
113
Kernel (
114
114
" aten::sym_numel" ,
115
- [](KernelRuntimeContext& context, EValue** stack) {
115
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
116
116
(void )context;
117
117
EValue& self = *stack[0 ];
118
118
EValue& out = *stack[1 ];
@@ -124,7 +124,7 @@ static Kernel prim_ops[] = {
124
124
// executorch_prim::sym_max.Scalar(SymInt a, SymInt b) -> SymInt
125
125
Kernel (
126
126
" executorch_prim::sym_max.Scalar" ,
127
- [](KernelRuntimeContext& context, EValue** stack) {
127
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
128
128
(void )context;
129
129
EValue& a = *stack[0 ];
130
130
EValue& b = *stack[1 ];
@@ -145,7 +145,7 @@ static Kernel prim_ops[] = {
145
145
// executorch_prim::sym_min.Scalar(SymInt a, SymInt b) -> SymInt
146
146
Kernel (
147
147
" executorch_prim::sym_min.Scalar" ,
148
- [](KernelRuntimeContext& context, EValue** stack) {
148
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
149
149
(void )context;
150
150
EValue& a = *stack[0 ];
151
151
EValue& b = *stack[1 ];
@@ -166,22 +166,22 @@ static Kernel prim_ops[] = {
166
166
// executorch_prim::add.Scalar(Scalar, Scalar) -> Scalar
167
167
Kernel (
168
168
" executorch_prim::add.Scalar" ,
169
- [](KernelRuntimeContext& context, EValue** stack) {
169
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
170
170
(void )context;
171
171
ALGEBRA_ET_PRIM_OP (+, stack, context);
172
172
}),
173
173
174
174
// executorch_prim::sub.Scalar(Scalar, Scalar) -> Scalar
175
175
Kernel (
176
176
" executorch_prim::sub.Scalar" ,
177
- [](KernelRuntimeContext& context, EValue** stack) {
177
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
178
178
ALGEBRA_ET_PRIM_OP (-, stack, context);
179
179
}),
180
180
181
181
// executorch_prim::mul.Scalar(Scalar, Scalar) -> Scalar
182
182
Kernel (
183
183
" executorch_prim::mul.Scalar" ,
184
- [](KernelRuntimeContext& context, EValue** stack) {
184
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
185
185
ALGEBRA_ET_PRIM_OP (*, stack, context);
186
186
}),
187
187
@@ -196,7 +196,7 @@ static Kernel prim_ops[] = {
196
196
*/
197
197
Kernel (
198
198
" executorch_prim::floordiv.Scalar" ,
199
- [](KernelRuntimeContext& context, EValue** stack) {
199
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
200
200
(void )context;
201
201
EValue& a = *stack[0 ];
202
202
EValue& b = *stack[1 ];
@@ -231,7 +231,7 @@ static Kernel prim_ops[] = {
231
231
// executorch_prim::floordiv.Scalar(Scalar, Scalar) -> Scalar
232
232
Kernel (
233
233
" executorch_prim::truediv.Scalar" ,
234
- [](KernelRuntimeContext& context, EValue** stack) {
234
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
235
235
// can't use macro because of custom casting behavior
236
236
(void )context;
237
237
EValue& a = *stack[0 ];
@@ -262,7 +262,7 @@ static Kernel prim_ops[] = {
262
262
// executorch_prim::sym_float.Scalar(Scalar) -> Scalar
263
263
Kernel (
264
264
" executorch_prim::sym_float.Scalar" ,
265
- [](KernelRuntimeContext& context, EValue** stack) {
265
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
266
266
// can't use macro because of custom casting behavior
267
267
// TODO: Now that we are reliably generating conversion operators,
268
268
// we can remove the mixed type handling for other operators
@@ -283,41 +283,41 @@ static Kernel prim_ops[] = {
283
283
// executorch_prim::eq.Scalar(Scalar, Scalar) -> bool
284
284
Kernel (
285
285
" executorch_prim::eq.Scalar" ,
286
- [](KernelRuntimeContext& context, EValue** stack) {
286
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
287
287
BOOLEAN_ET_PRIM_OP (==, stack, context);
288
288
}),
289
289
290
290
// executorch_prim::gt.Scalar(Scalar, Scalar) -> bool
291
291
Kernel (
292
292
" executorch_prim::gt.Scalar" ,
293
- [](KernelRuntimeContext& context, EValue** stack) {
293
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
294
294
BOOLEAN_ET_PRIM_OP (>, stack, context);
295
295
}),
296
296
297
297
// executorch_prim::lt.Scalar(Scalar, Scalar) -> bool
298
298
Kernel (
299
299
" executorch_prim::lt.Scalar" ,
300
- [](KernelRuntimeContext& context, EValue** stack) {
300
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
301
301
BOOLEAN_ET_PRIM_OP (<, stack, context);
302
302
}),
303
303
304
304
// executorch_prim::ge.Scalar(Scalar, Scalar) -> bool
305
305
Kernel (
306
306
" executorch_prim::ge.Scalar" ,
307
- [](KernelRuntimeContext& context, EValue** stack) {
307
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
308
308
BOOLEAN_ET_PRIM_OP (>=, stack, context);
309
309
}),
310
310
311
311
// executorch_prim::le.Scalar(Scalar, Scalar) -> bool
312
312
Kernel (
313
313
" executorch_prim::le.Scalar" ,
314
- [](KernelRuntimeContext& context, EValue** stack) {
314
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
315
315
BOOLEAN_ET_PRIM_OP (<=, stack, context);
316
316
}),
317
317
// executorch_prim::neg.Scalar(Scalar) -> Scalar
318
318
Kernel (
319
319
" executorch_prim::neg.Scalar" ,
320
- [](KernelRuntimeContext& context, EValue** stack) {
320
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
321
321
(void )context;
322
322
EValue& a = *stack[0 ];
323
323
EValue& out = *stack[1 ];
@@ -334,7 +334,7 @@ static Kernel prim_ops[] = {
334
334
// executorch_prim::floordiv.int(int, int) -> int
335
335
Kernel (
336
336
" executorch_prim::floordiv.int" ,
337
- [](KernelRuntimeContext& context, EValue** stack) {
337
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
338
338
(void )context;
339
339
EValue& a = *stack[0 ];
340
340
EValue& b = *stack[1 ];
@@ -345,7 +345,7 @@ static Kernel prim_ops[] = {
345
345
// executorch_prim::mod.int(int, int) -> int
346
346
Kernel (
347
347
" executorch_prim::mod.int" ,
348
- [](KernelRuntimeContext& context, EValue** stack) {
348
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
349
349
(void )context;
350
350
EValue& a = *stack[0 ];
351
351
EValue& b = *stack[1 ];
@@ -356,7 +356,7 @@ static Kernel prim_ops[] = {
356
356
// executorch_prim::mod.Scalar(Scalar, Scalar) -> Scalar
357
357
Kernel (
358
358
" executorch_prim::mod.Scalar" ,
359
- [](KernelRuntimeContext& context, EValue** stack) {
359
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
360
360
(void )context;
361
361
EValue& a = *stack[0 ];
362
362
EValue& b = *stack[1 ];
@@ -378,7 +378,7 @@ static Kernel prim_ops[] = {
378
378
// ceil.Scalar(Scalar a) -> Scalar
379
379
Kernel (
380
380
" executorch_prim::ceil.Scalar" ,
381
- [](KernelRuntimeContext& context, EValue** stack) {
381
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
382
382
(void )context;
383
383
EValue& a = *stack[0 ];
384
384
EValue& out = *stack[1 ];
@@ -398,7 +398,7 @@ static Kernel prim_ops[] = {
398
398
// round.Scalar(Scalar a) -> Scalar
399
399
Kernel (
400
400
" executorch_prim::round.Scalar" ,
401
- [](KernelRuntimeContext& context, EValue** stack) {
401
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
402
402
(void )context;
403
403
EValue& a = *stack[0 ];
404
404
EValue& out = *stack[1 ];
@@ -435,7 +435,7 @@ static Kernel prim_ops[] = {
435
435
// trunc.Scalar(Scalar a) -> Scalar
436
436
Kernel (
437
437
" executorch_prim::trunc.Scalar" ,
438
- [](KernelRuntimeContext& context, EValue** stack) {
438
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
439
439
(void )context;
440
440
EValue& a = *stack[0 ];
441
441
EValue& out = *stack[1 ];
@@ -450,13 +450,13 @@ static Kernel prim_ops[] = {
450
450
// executorch_prim::et_copy_index.tensor(tensor, tensor) -> tensor
451
451
Kernel (
452
452
" executorch_prim::et_copy_index.tensor" ,
453
- [](KernelRuntimeContext& context, EValue** stack) {
453
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
454
454
et_copy_index (context, stack);
455
455
}),
456
456
// executorch_prim::et_view.default(Tensor, int[]) -> Tensor
457
457
Kernel (
458
458
" executorch_prim::et_view.default" ,
459
- [](KernelRuntimeContext& context, EValue** stack) {
459
+ [](KernelRuntimeContext& context, Span< EValue*> stack) {
460
460
et_view (context, stack);
461
461
}),
462
462
0 commit comments