Skip to content

Commit 72bb4f4

Browse files
Evalue** -> Span
Differential Revision: D78904269 Pull Request resolved: #12908
1 parent 6177a78 commit 72bb4f4

18 files changed

+81
-60
lines changed

codegen/gen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -297,7 +297,7 @@ def __call__(
297297
f"""
298298
Kernel(
299299
"{f.namespace}::{f.func.name}",{newline + '"' + (k + '",') if k != "default" else ""}
300-
[]({contextArg.defn()}, EValue** stack) {{
300+
[]({contextArg.defn()}, Span<EValue*> stack) {{
301301
{code_connector.join(code_list)}
302302
303303
{exception_boundary_begin}

codegen/test/test_executorch_gen.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -507,7 +507,7 @@ def test_codegen_unboxed_specialized(self) -> None:
507507
Kernel(
508508
"custom_1::op_1",
509509
"v1/7;0,1,2,3|7;0,1,2,3|7;0,1,2,3",
510-
[](torch::executor::KernelRuntimeContext & context, EValue** stack) {
510+
[](torch::executor::KernelRuntimeContext & context, Span<EValue*> stack) {
511511
"""
512512
+ """
513513
@@ -605,7 +605,7 @@ def test_codegen_unboxed_default(self) -> None:
605605
"""
606606
Kernel(
607607
"custom_1::op_1",
608-
[](torch::executor::KernelRuntimeContext & context, EValue** stack) {
608+
[](torch::executor::KernelRuntimeContext & context, Span<EValue*> stack) {
609609
"""
610610
+ """
611611
@@ -632,7 +632,7 @@ def test_codegen_unboxed_default(self) -> None:
632632
"""
633633
Kernel(
634634
"custom_1::op_1",
635-
[](torch::executor::KernelRuntimeContext & context, EValue** stack) {
635+
[](torch::executor::KernelRuntimeContext & context, Span<EValue*> stack) {
636636
"""
637637
+ """
638638
@@ -675,7 +675,7 @@ def test_codegen_unboxed_default_kernel_key_selected(self) -> None:
675675
"""
676676
Kernel(
677677
"custom_1::op_1",
678-
[](torch::executor::KernelRuntimeContext & context, EValue** stack) {
678+
[](torch::executor::KernelRuntimeContext & context, Span<EValue*> stack) {
679679
"""
680680
+ """
681681

extension/kernel_util/make_boxed_from_unboxed_functor.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,8 @@ struct evalue_to_arg<executorch::aten::ArrayRef<std::optional<T>>> final {
112112

113113
template <class Functor, size_t... evalue_arg_indices, typename... ArgTypes>
114114
void call_functor_with_args_from_stack(
115-
::executorch::runtime::KernelRuntimeContext& ctx,
116-
executorch::runtime::EValue** stack,
115+
executorch::runtime::KernelRuntimeContext& ctx,
116+
executorch::runtime::Span<executorch::runtime::EValue*> stack,
117117
std::index_sequence<evalue_arg_indices...>,
118118
typelist<ArgTypes...>*) {
119119
(*Functor::func_ptr())(
@@ -151,7 +151,7 @@ struct WrapUnboxedIntoFunctor {
151151

152152
static void call(
153153
::executorch::runtime::KernelRuntimeContext& ctx,
154-
executorch::runtime::EValue** stack) {
154+
executorch::runtime::Span<executorch::runtime::EValue*> stack) {
155155
constexpr size_t num_inputs =
156156
kernel_util_internal::size<ContextRemovedArgsType>::value;
157157
return kernel_util_internal::call_functor_with_args_from_stack<FuncType>(

kernels/prim_ops/et_copy_index.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ constexpr size_t kTensorDimensionLimit = 16;
6464
//
6565
// The output of each iteration (copy_from) is copied into the copy_to tensor at
6666
// the specified index. This operator is supported in both ATen and lean modes.
67-
void et_copy_index(KernelRuntimeContext& context, EValue** stack) {
67+
void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack) {
6868
(void)context;
6969
SizesType expected_output_size[kTensorDimensionLimit];
7070

kernels/prim_ops/et_copy_index.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,14 @@
99
#pragma once
1010

1111
#include <executorch/runtime/core/evalue.h>
12+
#include <executorch/runtime/core/span.h>
1213
#include <executorch/runtime/kernel/kernel_runtime_context.h>
1314

1415
namespace torch {
1516
namespace executor {
1617
namespace function {
1718

18-
void et_copy_index(KernelRuntimeContext& context, EValue** stack);
19+
void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack);
1920

2021
} // namespace function
2122
} // namespace executor

kernels/prim_ops/et_view.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ bool get_view_target_size(
6565
}
6666
} // namespace
6767

68-
void et_view(KernelRuntimeContext& context, EValue** stack) {
68+
void et_view(KernelRuntimeContext& context, Span<EValue*> stack) {
6969
(void)context;
7070

7171
auto self = (*stack[0]).toTensor();

kernels/prim_ops/et_view.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,14 @@
99
#pragma once
1010

1111
#include <executorch/runtime/core/evalue.h>
12+
#include <executorch/runtime/core/span.h>
1213
#include <executorch/runtime/kernel/kernel_runtime_context.h>
1314

1415
namespace torch {
1516
namespace executor {
1617
namespace function {
1718

18-
void et_view(KernelRuntimeContext& context, EValue** stack);
19+
void et_view(KernelRuntimeContext& context, Span<EValue*> stack);
1920

2021
} // namespace function
2122
} // namespace executor

kernels/prim_ops/register_prim_ops.cpp

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ static Kernel prim_ops[] = {
7979
// aten::sym_size.int(Tensor self, int dim) -> SymInt
8080
Kernel(
8181
"aten::sym_size.int",
82-
[](KernelRuntimeContext& context, EValue** stack) {
82+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
8383
(void)context;
8484
EValue& self = *stack[0];
8585
EValue& dim = *stack[1];
@@ -93,7 +93,7 @@ static Kernel prim_ops[] = {
9393
// aten::_local_scalar_dense(Tensor self) -> Scalar
9494
Kernel(
9595
"aten::_local_scalar_dense",
96-
[](KernelRuntimeContext& context, EValue** stack) {
96+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
9797
(void)context;
9898
EValue& self = *stack[0];
9999
EValue& out = *stack[1];
@@ -112,7 +112,7 @@ static Kernel prim_ops[] = {
112112
// aten::sym_numel(Tensor self) -> SymInt
113113
Kernel(
114114
"aten::sym_numel",
115-
[](KernelRuntimeContext& context, EValue** stack) {
115+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
116116
(void)context;
117117
EValue& self = *stack[0];
118118
EValue& out = *stack[1];
@@ -124,7 +124,7 @@ static Kernel prim_ops[] = {
124124
// executorch_prim::sym_max.Scalar(SymInt a, SymInt b) -> SymInt
125125
Kernel(
126126
"executorch_prim::sym_max.Scalar",
127-
[](KernelRuntimeContext& context, EValue** stack) {
127+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
128128
(void)context;
129129
EValue& a = *stack[0];
130130
EValue& b = *stack[1];
@@ -145,7 +145,7 @@ static Kernel prim_ops[] = {
145145
// executorch_prim::sym_min.Scalar(SymInt a, SymInt b) -> SymInt
146146
Kernel(
147147
"executorch_prim::sym_min.Scalar",
148-
[](KernelRuntimeContext& context, EValue** stack) {
148+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
149149
(void)context;
150150
EValue& a = *stack[0];
151151
EValue& b = *stack[1];
@@ -166,22 +166,22 @@ static Kernel prim_ops[] = {
166166
// executorch_prim::add.Scalar(Scalar, Scalar) -> Scalar
167167
Kernel(
168168
"executorch_prim::add.Scalar",
169-
[](KernelRuntimeContext& context, EValue** stack) {
169+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
170170
(void)context;
171171
ALGEBRA_ET_PRIM_OP(+, stack, context);
172172
}),
173173

174174
// executorch_prim::sub.Scalar(Scalar, Scalar) -> Scalar
175175
Kernel(
176176
"executorch_prim::sub.Scalar",
177-
[](KernelRuntimeContext& context, EValue** stack) {
177+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
178178
ALGEBRA_ET_PRIM_OP(-, stack, context);
179179
}),
180180

181181
// executorch_prim::mul.Scalar(Scalar, Scalar) -> Scalar
182182
Kernel(
183183
"executorch_prim::mul.Scalar",
184-
[](KernelRuntimeContext& context, EValue** stack) {
184+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
185185
ALGEBRA_ET_PRIM_OP(*, stack, context);
186186
}),
187187

@@ -196,7 +196,7 @@ static Kernel prim_ops[] = {
196196
*/
197197
Kernel(
198198
"executorch_prim::floordiv.Scalar",
199-
[](KernelRuntimeContext& context, EValue** stack) {
199+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
200200
(void)context;
201201
EValue& a = *stack[0];
202202
EValue& b = *stack[1];
@@ -231,7 +231,7 @@ static Kernel prim_ops[] = {
231231
// executorch_prim::floordiv.Scalar(Scalar, Scalar) -> Scalar
232232
Kernel(
233233
"executorch_prim::truediv.Scalar",
234-
[](KernelRuntimeContext& context, EValue** stack) {
234+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
235235
// can't use macro because of custom casting behavior
236236
(void)context;
237237
EValue& a = *stack[0];
@@ -262,7 +262,7 @@ static Kernel prim_ops[] = {
262262
// executorch_prim::sym_float.Scalar(Scalar) -> Scalar
263263
Kernel(
264264
"executorch_prim::sym_float.Scalar",
265-
[](KernelRuntimeContext& context, EValue** stack) {
265+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
266266
// can't use macro because of custom casting behavior
267267
// TODO: Now that we are reliably generating conversion operators,
268268
// we can remove the mixed type handling for other operators
@@ -283,41 +283,41 @@ static Kernel prim_ops[] = {
283283
// executorch_prim::eq.Scalar(Scalar, Scalar) -> bool
284284
Kernel(
285285
"executorch_prim::eq.Scalar",
286-
[](KernelRuntimeContext& context, EValue** stack) {
286+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
287287
BOOLEAN_ET_PRIM_OP(==, stack, context);
288288
}),
289289

290290
// executorch_prim::gt.Scalar(Scalar, Scalar) -> bool
291291
Kernel(
292292
"executorch_prim::gt.Scalar",
293-
[](KernelRuntimeContext& context, EValue** stack) {
293+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
294294
BOOLEAN_ET_PRIM_OP(>, stack, context);
295295
}),
296296

297297
// executorch_prim::lt.Scalar(Scalar, Scalar) -> bool
298298
Kernel(
299299
"executorch_prim::lt.Scalar",
300-
[](KernelRuntimeContext& context, EValue** stack) {
300+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
301301
BOOLEAN_ET_PRIM_OP(<, stack, context);
302302
}),
303303

304304
// executorch_prim::ge.Scalar(Scalar, Scalar) -> bool
305305
Kernel(
306306
"executorch_prim::ge.Scalar",
307-
[](KernelRuntimeContext& context, EValue** stack) {
307+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
308308
BOOLEAN_ET_PRIM_OP(>=, stack, context);
309309
}),
310310

311311
// executorch_prim::le.Scalar(Scalar, Scalar) -> bool
312312
Kernel(
313313
"executorch_prim::le.Scalar",
314-
[](KernelRuntimeContext& context, EValue** stack) {
314+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
315315
BOOLEAN_ET_PRIM_OP(<=, stack, context);
316316
}),
317317
// executorch_prim::neg.Scalar(Scalar) -> Scalar
318318
Kernel(
319319
"executorch_prim::neg.Scalar",
320-
[](KernelRuntimeContext& context, EValue** stack) {
320+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
321321
(void)context;
322322
EValue& a = *stack[0];
323323
EValue& out = *stack[1];
@@ -334,7 +334,7 @@ static Kernel prim_ops[] = {
334334
// executorch_prim::floordiv.int(int, int) -> int
335335
Kernel(
336336
"executorch_prim::floordiv.int",
337-
[](KernelRuntimeContext& context, EValue** stack) {
337+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
338338
(void)context;
339339
EValue& a = *stack[0];
340340
EValue& b = *stack[1];
@@ -345,7 +345,7 @@ static Kernel prim_ops[] = {
345345
// executorch_prim::mod.int(int, int) -> int
346346
Kernel(
347347
"executorch_prim::mod.int",
348-
[](KernelRuntimeContext& context, EValue** stack) {
348+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
349349
(void)context;
350350
EValue& a = *stack[0];
351351
EValue& b = *stack[1];
@@ -356,7 +356,7 @@ static Kernel prim_ops[] = {
356356
// executorch_prim::mod.Scalar(Scalar, Scalar) -> Scalar
357357
Kernel(
358358
"executorch_prim::mod.Scalar",
359-
[](KernelRuntimeContext& context, EValue** stack) {
359+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
360360
(void)context;
361361
EValue& a = *stack[0];
362362
EValue& b = *stack[1];
@@ -378,7 +378,7 @@ static Kernel prim_ops[] = {
378378
// ceil.Scalar(Scalar a) -> Scalar
379379
Kernel(
380380
"executorch_prim::ceil.Scalar",
381-
[](KernelRuntimeContext& context, EValue** stack) {
381+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
382382
(void)context;
383383
EValue& a = *stack[0];
384384
EValue& out = *stack[1];
@@ -398,7 +398,7 @@ static Kernel prim_ops[] = {
398398
// round.Scalar(Scalar a) -> Scalar
399399
Kernel(
400400
"executorch_prim::round.Scalar",
401-
[](KernelRuntimeContext& context, EValue** stack) {
401+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
402402
(void)context;
403403
EValue& a = *stack[0];
404404
EValue& out = *stack[1];
@@ -435,7 +435,7 @@ static Kernel prim_ops[] = {
435435
// trunc.Scalar(Scalar a) -> Scalar
436436
Kernel(
437437
"executorch_prim::trunc.Scalar",
438-
[](KernelRuntimeContext& context, EValue** stack) {
438+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
439439
(void)context;
440440
EValue& a = *stack[0];
441441
EValue& out = *stack[1];
@@ -450,13 +450,13 @@ static Kernel prim_ops[] = {
450450
// executorch_prim::et_copy_index.tensor(tensor, tensor) -> tensor
451451
Kernel(
452452
"executorch_prim::et_copy_index.tensor",
453-
[](KernelRuntimeContext& context, EValue** stack) {
453+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
454454
et_copy_index(context, stack);
455455
}),
456456
// executorch_prim::et_view.default(Tensor, int[]) -> Tensor
457457
Kernel(
458458
"executorch_prim::et_view.default",
459-
[](KernelRuntimeContext& context, EValue** stack) {
459+
[](KernelRuntimeContext& context, Span<EValue*> stack) {
460460
et_view(context, stack);
461461
}),
462462

kernels/prim_ops/targets.bzl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ def define_common_targets():
1717
exported_headers = ["et_copy_index.h"],
1818
deps = [
1919
"//executorch/runtime/kernel:kernel_includes" + aten_suffix,
20+
"//executorch/runtime/core:core",
2021
],
2122
exported_deps = [
2223
"//executorch/runtime/core:evalue" + aten_suffix,
@@ -31,6 +32,7 @@ def define_common_targets():
3132
exported_headers = ["et_view.h"],
3233
deps = [
3334
"//executorch/runtime/kernel:kernel_includes" + aten_suffix,
35+
"//executorch/runtime/core:core",
3436
],
3537
exported_deps = [
3638
"//executorch/runtime/core:evalue" + aten_suffix,

runtime/executor/method.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1319,7 +1319,7 @@ Error Method::execute_instruction() {
13191319
// TODO(T147221312): Also expose tensor resizer via the context.
13201320
KernelRuntimeContext context(event_tracer_, temp_allocator_);
13211321
auto args = chain.argument_lists_[step_state_.instr_idx];
1322-
chain.kernels_[step_state_.instr_idx](context, args.data());
1322+
chain.kernels_[step_state_.instr_idx](context, args);
13231323
// We reset the temp_allocator after the switch statement
13241324
err = context.failure_state();
13251325
if (err != Error::Ok) {

0 commit comments

Comments
 (0)