Skip to content

Commit 3c85c6e

Browse files
authored
⬆️ tensorflow@2.11.0 (#762)
* ⬆️ tensorflow@2.11.0 * Update TF on CI * Update converter to match TF 2.11
1 parent 98862c4 commit 3c85c6e

21 files changed

+55
-50
lines changed

.bazelversion

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
5.1.1
1+
5.3.0

.github/workflows/unittests.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ jobs:
8686
if: github.ref != 'refs/heads/main'
8787
shell: bash
8888
- name: Install pip dependencies
89-
run: pip install tensorflow-cpu~=2.10.0 larq~=0.12 larq_zoo~=2.0 pytest tensorflow_datasets~=4.4 flatbuffers==2.0 tqdm --no-cache-dir
89+
run: pip install tensorflow-cpu~=2.11.0 larq~=0.12 larq_zoo~=2.0 pytest tensorflow_datasets~=4.4 flatbuffers==2.0 tqdm --no-cache-dir
9090
- name: Run Interpreter test
9191
run: bazelisk test larq_compute_engine/tflite/tests:interpreter_test --test_output=all
9292
- name: Run FileCheck tests
@@ -100,7 +100,7 @@ jobs:
100100
runs-on: ubuntu-latest
101101
strategy:
102102
matrix:
103-
tf-version: [1.14.0, 1.15.5, 2.0.4, 2.1.4, 2.2.3, 2.3.3, 2.4.4, 2.5.3, 2.6.4, 2.7.2, 2.8.1, 2.9.0, 2.10.0]
103+
tf-version: [1.14.0, 1.15.5, 2.0.4, 2.1.4, 2.2.3, 2.3.3, 2.4.4, 2.5.3, 2.6.4, 2.7.2, 2.8.1, 2.9.0, 2.10.0, 2.11.0]
104104
if: "!contains(github.event.head_commit.message, 'ci-skip')"
105105
steps:
106106
- uses: actions/checkout@v3

WORKSPACE

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,10 +17,10 @@ http_archive(
1717
"//third_party/tensorflow_patches:disable_forced_mkl.patch",
1818
"//third_party/tensorflow_patches:embedded_toolchain.patch",
1919
],
20-
sha256 = "b5a1bb04c84b6fe1538377e5a1f649bb5d5f0b2e3625a3c526ff3a8af88633e8",
21-
strip_prefix = "tensorflow-2.10.0",
20+
sha256 = "99c732b92b1b37fc243a559e02f9aef5671771e272758aa4aec7f34dc92dac48",
21+
strip_prefix = "tensorflow-2.11.0",
2222
urls = [
23-
"https://github.com/tensorflow/tensorflow/archive/v2.10.0.tar.gz",
23+
"https://github.com/tensorflow/tensorflow/archive/v2.11.0.tar.gz",
2424
],
2525
)
2626

larq_compute_engine/mlir/ir/lce_ops.cc

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
#include "flatbuffers/flexbuffers.h"
44
#include "larq_compute_engine/core/bitpacking/bitpack.h"
55
#include "larq_compute_engine/mlir/transforms/bitpack.h"
6-
#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
6+
#include "mlir/Dialect/Arith/IR/Arith.h"
77
#include "tensorflow/lite/schema/schema_generated.h"
88

99
// Generated dialect defs.
@@ -36,15 +36,15 @@ std::vector<uint8_t> DequantizeOp::buildCustomOptions() { return {}; }
3636
std::vector<uint8_t> Bconv2dOp::buildCustomOptions() {
3737
flexbuffers::Builder fbb;
3838
fbb.Map([&]() {
39-
fbb.Int("channels_in", channels_in());
40-
fbb.Int("dilation_height_factor", dilation_height_factor());
41-
fbb.Int("dilation_width_factor", dilation_width_factor());
39+
fbb.Int("channels_in", getChannelsIn());
40+
fbb.Int("dilation_height_factor", getDilationHeightFactor());
41+
fbb.Int("dilation_width_factor", getDilationWidthFactor());
4242
fbb.Int("fused_activation_function",
43-
(int)ConvertActivationAttr(fused_activation_function()));
44-
fbb.Int("pad_values", pad_values());
45-
fbb.Int("padding", (int)ConvertPaddingAttr(padding()));
46-
fbb.Int("stride_height", stride_height());
47-
fbb.Int("stride_width", stride_width());
43+
(int)ConvertActivationAttr(getFusedActivationFunction()));
44+
fbb.Int("pad_values", getPadValues());
45+
fbb.Int("padding", (int)ConvertPaddingAttr(getPadding()));
46+
fbb.Int("stride_height", getStrideHeight());
47+
fbb.Int("stride_width", getStrideWidth());
4848
});
4949
fbb.Finish();
5050
return fbb.GetBuffer();
@@ -53,11 +53,11 @@ std::vector<uint8_t> Bconv2dOp::buildCustomOptions() {
5353
std::vector<uint8_t> BMaxPool2dOp::buildCustomOptions() {
5454
flexbuffers::Builder fbb;
5555
fbb.Map([&]() {
56-
fbb.Int("padding", (int)ConvertPaddingAttr(padding()));
57-
fbb.Int("stride_width", stride_width());
58-
fbb.Int("stride_height", stride_height());
59-
fbb.Int("filter_width", filter_width());
60-
fbb.Int("filter_height", filter_height());
56+
fbb.Int("padding", (int)ConvertPaddingAttr(getPadding()));
57+
fbb.Int("stride_width", getStrideWidth());
58+
fbb.Int("stride_height", getStrideHeight());
59+
fbb.Int("filter_width", getFilterWidth());
60+
fbb.Int("filter_height", getFilterHeight());
6161
});
6262
fbb.Finish();
6363
return fbb.GetBuffer();

larq_compute_engine/mlir/ir/lce_ops.td

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ class LQ_Op<string mnemonic, list<Trait> traits = []> :
8383
class TensorOfOrNone<list<Type> allowedTypes, string description = ""> :
8484
AnyTypeOf<[TensorOf<allowedTypes>, NoneType], description>;
8585

86-
def LQ_QuantizeOp : LQ_Op<"Quantize", [NoSideEffect]> {
86+
def LQ_QuantizeOp : LQ_Op<"Quantize", [Pure]> {
8787
let summary = "Binary quantize operator";
8888

8989
let description = [{
@@ -103,7 +103,7 @@ Converts floating point, integer, or boolean tensors to binarized bitpacked tens
103103
let hasFolder = 1;
104104
}
105105

106-
def LQ_DequantizeOp : LQ_Op<"Dequantize", [NoSideEffect]> {
106+
def LQ_DequantizeOp : LQ_Op<"Dequantize", [Pure]> {
107107
let summary = "Binary dequantize operator";
108108

109109
let description = [{
@@ -121,7 +121,7 @@ Converts binarized bitpacked tensors to floating point, integer, or boolean tens
121121
let hasFolder = 1;
122122
}
123123

124-
def LQ_Bconv2dOp : LQ_Op<"Bconv2d", [NoSideEffect]> {
124+
def LQ_Bconv2dOp : LQ_Op<"Bconv2d", [Pure]> {
125125
let summary = [{
126126
Computes a 2D binary convolution by binarizing and bitpacking the input and filter.
127127
}];
@@ -152,7 +152,7 @@ TODO
152152
);
153153
}
154154

155-
def LQ_BMaxPool2dOp : LQ_Op<"BMaxPool2d", [NoSideEffect]> {
155+
def LQ_BMaxPool2dOp : LQ_Op<"BMaxPool2d", [Pure]> {
156156
let summary = [{
157157
Binary MaxPool2D op.
158158
}];

larq_compute_engine/mlir/lce_mlir_opt.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
int main(int argc, char** argv) {
1010
mlir::registerTransformsPasses();
1111
mlir::DialectRegistry registry;
12-
registry.insert<mlir::arith::ArithmeticDialect, mlir::func::FuncDialect,
12+
registry.insert<mlir::arith::ArithDialect, mlir::func::FuncDialect,
1313
mlir::quant::QuantizationDialect, mlir::TF::TensorFlowDialect,
1414
mlir::TFL::TensorFlowLiteDialect, mlir::lq::LarqDialect>();
1515
return failed(mlir::MlirOptMain(

larq_compute_engine/mlir/tests/legalize-lce.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ func.func @legalize_bconv2d(%arg0: tensor<256x32x32x1xi32>, %arg1: tensor<16x3x3
66
%0 = "lq.Bconv2d"(%arg0, %arg1, %arg2, %arg3, %arg4) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32} : (tensor<256x32x32x1xi32>, tensor<16x3x3x3xf32>, tensor<16xf32>, tensor<16xf32>, none) -> tensor<256x30x30x16xf32>
77
return %0 : tensor<256x30x30x16xf32>
88

9-
// CHECK: %0 = "tfl.custom"(%arg0, %arg1, %arg2, %arg3, %arg4) {custom_code = "LceBconv2d", custom_option = opaque<"lq", "0x6368616E6E656C735F696E0064696C6174696F6E5F6865696768745F666163746F720064696C6174696F6E5F77696474685F666163746F720066757365645F61637469766174696F6E5F66756E6374696F6E007061645F76616C7565730070616464696E67007374726964655F686569676874007374726964655F776964746800088277614C3329221508010803010100000101010404040404040404102401"> : tensor<160xi8>} : (tensor<256x32x32x1xi32>, tensor<16x3x3x3xf32>, tensor<16xf32>, tensor<16xf32>, none) -> tensor<256x30x30x16xf32>
9+
// CHECK: %0 = "tfl.custom"(%arg0, %arg1, %arg2, %arg3, %arg4) {custom_code = "LceBconv2d", custom_option = #tfl<const_bytes : "0x6368616E6E656C735F696E0064696C6174696F6E5F6865696768745F666163746F720064696C6174696F6E5F77696474685F666163746F720066757365645F61637469766174696F6E5F66756E6374696F6E007061645F76616C7565730070616464696E67007374726964655F686569676874007374726964655F776964746800088277614C3329221508010803010100000101010404040404040404102401">} : (tensor<256x32x32x1xi32>, tensor<16x3x3x3xf32>, tensor<16xf32>, tensor<16xf32>, none) -> tensor<256x30x30x16xf32>
1010
// CHECK-NEXT: return %0
1111

1212
// TRANSLATE: %0 = "lq.Bconv2d"(%arg0, %arg1, %arg2, %arg3, %arg4) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32} : (tensor<256x32x32x1xi32>, tensor<16x3x3x3xf32>, tensor<16xf32>, tensor<16xf32>, none) -> tensor<256x30x30x16xf32>
@@ -18,7 +18,7 @@ func.func @legalize_bmax_pool2d(%arg0: tensor<256x32x32x3xi32>) -> tensor<256x16
1818
%0 = "lq.BMaxPool2d"(%arg0) {filter_height = 2 : i32, filter_width = 2 : i32, padding = "SAME", stride_height = 2 : i32, stride_width = 2 : i32} : (tensor<256x32x32x3xi32>) -> tensor<256x16x16x3xi32>
1919
return %0 : tensor<256x16x16x3xi32>
2020

21-
// CHECK: %0 = "tfl.custom"(%arg0) {custom_code = "LceBMaxPool2d", custom_option = opaque<"lq", "0x70616464696E67007374726964655F7769647468007374726964655F6865696768740066696C7465725F77696474680066696C7465725F68656967687400050F1D412D3B050105020200020204040404040A2401"> : tensor<84xi8>} : (tensor<256x32x32x3xi32>) -> tensor<256x16x16x3xi32>
21+
// CHECK: %0 = "tfl.custom"(%arg0) {custom_code = "LceBMaxPool2d", custom_option = #tfl<const_bytes : "0x70616464696E67007374726964655F7769647468007374726964655F6865696768740066696C7465725F77696474680066696C7465725F68656967687400050F1D412D3B050105020200020204040404040A2401">} : (tensor<256x32x32x3xi32>) -> tensor<256x16x16x3xi32>
2222
// CHECK-NEXT: return %0
2323

2424
// TRANSLATE: %0 = "lq.BMaxPool2d"(%arg0) {filter_height = 2 : i32, filter_width = 2 : i32, padding = "SAME", stride_height = 2 : i32, stride_width = 2 : i32} : (tensor<256x32x32x3xi32>) -> tensor<256x16x16x3xi32>
@@ -30,7 +30,7 @@ func.func @legalize_quantize(%arg0: tensor<256x32x32x64xf32>) -> tensor<256x32x3
3030
%0 = "lq.Quantize"(%arg0) {} : (tensor<256x32x32x64xf32>) -> tensor<256x32x32x2xi32>
3131
return %0 : tensor<256x32x32x2xi32>
3232

33-
// CHECK: %0 = "tfl.custom"(%arg0) {custom_code = "LceQuantize", custom_option = opaque<"lq", "0x"> : tensor<0xi8>} : (tensor<256x32x32x64xf32>) -> tensor<256x32x32x2xi32>
33+
// CHECK: %0 = "tfl.custom"(%arg0) {custom_code = "LceQuantize", custom_option = #tfl<const_bytes : "0x">} : (tensor<256x32x32x64xf32>) -> tensor<256x32x32x2xi32>
3434
// CHECK-NEXT: return %0
3535

3636
// TRANSLATE: %0 = "lq.Quantize"(%arg0) : (tensor<256x32x32x64xf32>) -> tensor<256x32x32x2xi32>
@@ -42,7 +42,7 @@ func.func @legalize_dequantize(%arg0: tensor<256x32x32x2xi32>) -> tensor<256x32x
4242
%0 = "lq.Dequantize"(%arg0) {} : (tensor<256x32x32x2xi32>) -> tensor<256x32x32x64xf32>
4343
return %0 : tensor<256x32x32x64xf32>
4444

45-
// CHECK: %0 = "tfl.custom"(%arg0) {custom_code = "LceDequantize", custom_option = opaque<"lq", "0x"> : tensor<0xi8>} : (tensor<256x32x32x2xi32>) -> tensor<256x32x32x64xf32>
45+
// CHECK: %0 = "tfl.custom"(%arg0) {custom_code = "LceDequantize", custom_option = #tfl<const_bytes : "0x">} : (tensor<256x32x32x2xi32>) -> tensor<256x32x32x64xf32>
4646
// CHECK-NEXT: return %0
4747

4848
// TRANSLATE: %0 = "lq.Dequantize"(%arg0) : (tensor<256x32x32x2xi32>) -> tensor<256x32x32x64xf32>

larq_compute_engine/mlir/tf_tfl_passes.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,10 @@ void AddQuantizationPasses(const mlir::quant::QuantizationSpecs& quant_specs,
5151
mlir::TFL::CreatePostQuantizePass(emit_quant_adaptor_ops));
5252
pass_manager.addNestedPass<mlir::func::FuncOp>(
5353
mlir::TFL::CreateOptimizeOpOrderPass());
54+
// Add optimization pass after quantization for additional fusing
55+
// opportunities.
56+
pass_manager.addNestedPass<mlir::func::FuncOp>(
57+
mlir::TFL::CreateOptimizePass(true));
5458
}
5559
} // namespace
5660

larq_compute_engine/mlir/transforms/bitpack.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ DenseElementsAttr Bitpack(mlir::Builder* builder, Attribute x) {
2020
if (!x) return nullptr;
2121

2222
// ShapedType is something like tensor<1x2x3xf32> and element_type is f32
23-
auto shaped_type = x.getType().cast<ShapedType>();
23+
auto shaped_type = x.cast<TypedAttr>().getType().cast<ShapedType>();
2424
auto shape = shaped_type.getShape();
2525
auto element_type = shaped_type.getElementType();
2626

@@ -60,7 +60,8 @@ DenseElementsAttr Unpack(Attribute x, ShapedType result_type) {
6060
if (!x) return nullptr;
6161
if (!result_type.hasStaticShape()) return nullptr;
6262

63-
auto input_shape = x.getType().cast<ShapedType>().getShape();
63+
auto input_shape =
64+
x.cast<TypedAttr>().getType().cast<ShapedType>().getShape();
6465
auto output_shape = result_type.getShape();
6566
auto output_type = result_type.getElementType();
6667

larq_compute_engine/mlir/transforms/bitpack_activations_patterns.td

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
include "mlir/IR/PatternBase.td"
22
include "mlir/Dialect/Func/IR/FuncOps.td"
3-
include "mlir/Dialect/Arithmetic/IR/ArithmeticOps.td"
3+
include "mlir/Dialect/Arith/IR/ArithOps.td"
44
include "tensorflow/compiler/mlir/lite/ir/tfl_ops.td"
55
include "larq_compute_engine/mlir/ir/lce_ops.td"
66

0 commit comments

Comments
 (0)