From 04cd682ac70adc8439162809baeeec244f816fba Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 6 May 2025 10:25:48 +0530 Subject: [PATCH 001/115] Update nn.py --- keras/src/backend/jax/nn.py | 154 +++++++++++++++++++++++++++--------- 1 file changed, 116 insertions(+), 38 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index ba3dbd103acb..9cb4a103ba65 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1126,6 +1126,9 @@ def wrap_flash_attention( decoder_segment_ids, custom_mask=None, attn_logits_soft_cap=None, + head_shards=1, + q_seq_shards=1, + ): if decoder_segment_ids is not None: assert query.shape[2] == decoder_segment_ids.q.shape[1], ( @@ -1135,7 +1138,6 @@ def wrap_flash_attention( if custom_mask is not None: mask = splash_attention_mask.NumpyMask(array=custom_mask) - else: mask = splash_attention_mask.CausalMask( shape=(query.shape[2], query.shape[2]) @@ -1177,47 +1179,120 @@ def dot_product_attention( f"Received: query.shape={query.shape}, key.shape={key.shape}, " f"value.shape={value.shape}." ) + + # Check platform + platform = jax.devices()[0].platform + is_tpu = platform == "tpu" + + # Check if inputs use partial sharding (not fully replicated) + # Flash attention works well with fully replicated tensors on all platforms + # but may have issues with certain partial sharding patterns on non-TPU platforms + partially_sharded_inputs = any( + hasattr(t, "sharding") and not t.sharding.is_fully_replicated + for t in (query, key, value) + ) + + # Determine flash attention compatibility if flash_attention is None: - flash_attention = _can_use_flash_attention(query, key, value, bias) - elif flash_attention is True: - # Use `raise_error=True` to provide more details if the inputs failed to - # use flash attention - _can_use_flash_attention(query, key, value, bias, raise_error=True) - - if jax.devices()[0].platform == "tpu": - # Transpose to ('batch', 'heads', 'length', 'kv') - query = jnp.transpose(query, axes=(0, 2, 1, 3)) - key = jnp.transpose(key, axes=(0, 2, 1, 3)) - value = jnp.transpose(value, axes=(0, 2, 1, 3)) - B, H, S, KV = query.shape - - segment_ids = jnp.ones([B, S]) - # {token_ids, padding_mask, segment_ids} enable packing - out = wrap_flash_attention( - query, - key, - value, - decoder_segment_ids=splash_attention_kernel.SegmentIds( - segment_ids, segment_ids - ), - custom_mask=mask, - attn_logits_soft_cap=attn_logits_soft_cap, + # Auto-detect flash attention availability + if is_tpu: + # TPUs have specialized hardware for attention that works with any sharding pattern + flash_attention = True + else: + # For GPU/CPU with partially sharded inputs, we need multiple devices + # to efficiently handle the sharding + if partially_sharded_inputs and len(jax.devices()) <= 1: + flash_attention = False + else: + flash_attention = _can_use_flash_attention(query, key, value, bias) + elif flash_attention is True and not is_tpu: + # If flash attention is explicitly requested, validate compatibility + # Skip validation for TPU as it has specialized hardware support + try: + _can_use_flash_attention(query, key, value, bias, raise_error=True) + except Exception: + # Only disable flash attention on non-TPU platforms if validation fails + flash_attention = False + + # TPU-specific flash attention path + if is_tpu and flash_attention: + # Transpose to ('batch', 'heads', 'length', 'head_dim') + query_tpu_layout = jnp.transpose(query, axes=(0, 2, 1, 3)) + key_tpu_layout = jnp.transpose(key, axes=(0, 2, 1, 3)) + value_tpu_layout = jnp.transpose(value, axes=(0, 2, 1, 3)) + + bs, num_heads, q_len, head_dim = query_tpu_layout.shape + + # Apply scale to query if provided + if scale is not None: + # TPU kernel applies 1/sqrt(head_dim) internally, to achieve + # overall QK^T * scale, scale query by (scale * sqrt(head_dim)) + query_tpu_layout = query_tpu_layout * (scale * math.sqrt(head_dim)) + + # Create segment IDs for Splash Attention (for packing/batching) + segment_ids = jnp.zeros([bs, q_len], dtype=jnp.int32) + decoder_segment_ids = splash_attention_kernel.SegmentIds( + q=segment_ids, kv=segment_ids ) - out = jnp.transpose(out, axes=(0, 2, 1, 3)) - return out - # `dot_product_attention` is only available in jax>=0.4.31 + # Process mask for Splash Attention + custom_mask = None + if mask is not None: + mask_bool = mask.astype("bool") if mask.dtype != jnp.bool_ else mask + + if mask_bool.ndim == 3 and mask_bool.shape[0] == bs: + custom_mask = mask_bool[0] + elif mask_bool.ndim == 4 and mask_bool.shape[0] == bs: + custom_mask = mask_bool[0, 0] + + if is_causal and custom_mask is not None: + causal_mask = jnp.tril(jnp.ones((q_len, q_len), dtype=jnp.bool_)) + custom_mask = jnp.logical_and(custom_mask, causal_mask) + + if custom_mask is None and is_causal: + custom_mask = jnp.tril(jnp.ones((q_len, q_len), dtype=jnp.bool_)) + + try: + output = wrap_flash_attention( + query_tpu_layout, + key_tpu_layout, + value_tpu_layout, + decoder_segment_ids=decoder_segment_ids, + custom_mask=custom_mask, + attn_logits_soft_cap=attn_logits_soft_cap, + ) + # Transpose output back to Keras layout + return jnp.transpose(output, axes=(0, 2, 1, 3)) + except Exception: + flash_attention = False + + # JAX native dot_product_attention for GPU or fallback for TPU if hasattr(jax.nn, "dot_product_attention"): - return jax.nn.dot_product_attention( - query, - key, - value, - bias=bias, - mask=mask, - scale=scale, - is_causal=is_causal, - implementation="cudnn" if flash_attention else "xla", - ) + try: + return jax.nn.dot_product_attention( + query, + key, + value, + bias=bias, + mask=mask, + scale=scale, + is_causal=is_causal, + implementation="cudnn" if flash_attention else "xla", + ) + except Exception: + # If flash attention fails, fall back to XLA implementation + if flash_attention: + return jax.nn.dot_product_attention( + query, + key, + value, + bias=bias, + mask=mask, + scale=scale, + is_causal=is_causal, + implementation="xla", + ) + raise if flash_attention: raise RuntimeError( @@ -1228,6 +1303,9 @@ def dot_product_attention( # Ref: jax.nn.dot_product_attention # https://github.com/jax-ml/jax/blob/jax-v0.4.33/jax/_src/nn/functions.py#L886 # Not support `query_seq_lengths` and `key_value_seq_lengths` args + + # Fallback to custom XLA implementation + # This is the reference implementation from jax.nn.dot_product_attention output_shape = query.shape _, _, K, H = key.shape scale = (1.0 / jnp.sqrt(H)) if scale is None else scale From 1a7446523d1889f2515b3ab39a64c3b293ffe195 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 6 May 2025 10:47:14 +0530 Subject: [PATCH 002/115] Update nn.py --- keras/src/backend/jax/nn.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 9cb4a103ba65..96cf87003b0c 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1128,7 +1128,6 @@ def wrap_flash_attention( attn_logits_soft_cap=None, head_shards=1, q_seq_shards=1, - ): if decoder_segment_ids is not None: assert query.shape[2] == decoder_segment_ids.q.shape[1], ( @@ -1149,8 +1148,8 @@ def wrap_flash_attention( ) splash_kernel = splash_attention_kernel.make_splash_mha( mask=multi_head_mask, - head_shards=1, - q_seq_shards=1, + head_shards=head_shards, + q_seq_shards=q_seq_shards, attn_logits_soft_cap=attn_logits_soft_cap, ) @@ -1169,6 +1168,8 @@ def dot_product_attention( is_causal=False, flash_attention=None, attn_logits_soft_cap=None, + head_shards=1, + q_seq_shards=1, ): query = convert_to_tensor(query) key = convert_to_tensor(key) @@ -1260,6 +1261,8 @@ def dot_product_attention( decoder_segment_ids=decoder_segment_ids, custom_mask=custom_mask, attn_logits_soft_cap=attn_logits_soft_cap, + head_shards=head_shards, # Pass the parameter value instead of hardcoding to 1 + q_seq_shards=q_seq_shards, # Pass the parameter value instead of hardcoding to 1 ) # Transpose output back to Keras layout return jnp.transpose(output, axes=(0, 2, 1, 3)) From c11eb819acd9601db5333a7e87eb45420e5c7e24 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 6 May 2025 11:16:29 +0530 Subject: [PATCH 003/115] Update nn.py --- keras/src/backend/jax/nn.py | 57 ++++++++++++++++++++++++++++++++++--- 1 file changed, 53 insertions(+), 4 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 96cf87003b0c..e21d0169f3d6 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1168,9 +1168,34 @@ def dot_product_attention( is_causal=False, flash_attention=None, attn_logits_soft_cap=None, - head_shards=1, - q_seq_shards=1, ): + """Computes dot-product attention given query, key, and value. + + This is the core computation of attention that is used in transformers. + For TPU platforms, flash attention optimizations are automatically applied + when possible, and sharding parameters are inferred from the layout map + in the current distribution context. + + Args: + query: JAX Array or KerasTensor. Queries with shape `[batch, time, heads, depth_k]`. + key: JAX Array or KerasTensor. Keys with shape `[batch, time, heads, depth_k]`. + value: JAX Array or KerasTensor. Values with shape `[batch, time, heads, depth_v]`. + bias: JAX Array or KerasTensor. Optional bias with shape broadcastable to + `[batch, heads, dest_time, source_time]`. + mask: JAX Array or KerasTensor. Optional mask with shape broadcastable to + `[batch, heads, dest_time, source_time]`. + scale: Float. Optional scale that is applied to the attention computation. + is_causal: Boolean. Specifying whether causal masking is applied. + flash_attention: Boolean. Whether to use flash attention optimization for + increased performance. Default to None, which means it will be + auto-determined based on the platform, input shapes and compatibility. + attn_logits_soft_cap: Float. Optional float to softly cap attention logits to + avoid numerical stability issues. Applied as: + `logits = logits / (1.0 + abs(logits) / attn_logits_soft_cap)`. + + Returns: + JAX Array of shape `[batch, time, heads, depth_v]`. + """ query = convert_to_tensor(query) key = convert_to_tensor(key) value = convert_to_tensor(value) @@ -1185,6 +1210,30 @@ def dot_product_attention( platform = jax.devices()[0].platform is_tpu = platform == "tpu" + # Get sharding parameters from distribution context + head_shards = 1 + q_seq_shards = 1 + + if is_tpu: + try: + from keras.src.distribution.distribution_lib import distribution as get_dist + from keras.src.distribution.distribution_lib import ModelParallel + + # Get current distribution if available + dist = get_dist() + if dist and isinstance(dist, ModelParallel): + mesh = dist.device_mesh + if "model" in mesh.axis_names: + model_dim_index = mesh.axis_names.index("model") + # Set head_shards based on the model dimension of the mesh + head_shards = mesh.shape[model_dim_index] + # Typically keep q_seq_shards=1 for best performance + q_seq_shards = 1 + except (ImportError, ValueError, AttributeError): + # Use default values if detection fails + head_shards = 1 + q_seq_shards = 1 + # Check if inputs use partial sharding (not fully replicated) # Flash attention works well with fully replicated tensors on all platforms # but may have issues with certain partial sharding patterns on non-TPU platforms @@ -1261,8 +1310,8 @@ def dot_product_attention( decoder_segment_ids=decoder_segment_ids, custom_mask=custom_mask, attn_logits_soft_cap=attn_logits_soft_cap, - head_shards=head_shards, # Pass the parameter value instead of hardcoding to 1 - q_seq_shards=q_seq_shards, # Pass the parameter value instead of hardcoding to 1 + head_shards=head_shards, + q_seq_shards=q_seq_shards, ) # Transpose output back to Keras layout return jnp.transpose(output, axes=(0, 2, 1, 3)) From c81e18c8e3bfdfd5c7288a242a8376cdb383a2b8 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 6 May 2025 12:09:55 +0530 Subject: [PATCH 004/115] Update nn.py --- keras/src/backend/jax/nn.py | 95 +++++++++++++++++++++---------------- 1 file changed, 54 insertions(+), 41 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index e21d0169f3d6..6cb9dedc8d1c 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -205,9 +205,9 @@ def _pool( initial_value: the initial value for the reduction. reduce_fn: a reduce function of the form `(T, T) -> T`. pool_size: a sequence of `N` integers, representing the window size to - reduce over. + reduce over. strides: a sequence of `N` integers, representing the inter-window - strides (default: `(1, ..., 1)`). + strides (default: `(1, ..., 1)`). padding: either the string `same` or `valid`. Returns: @@ -1131,8 +1131,8 @@ def wrap_flash_attention( ): if decoder_segment_ids is not None: assert query.shape[2] == decoder_segment_ids.q.shape[1], ( - "Sharding along sequence dimension not allowed in tpu kernel " - "attention" + "Sharding along sequence dimension not allowed" + " in tpu kernel attention" ) if custom_mask is not None: @@ -1148,8 +1148,8 @@ def wrap_flash_attention( ) splash_kernel = splash_attention_kernel.make_splash_mha( mask=multi_head_mask, - head_shards=head_shards, - q_seq_shards=q_seq_shards, + head_shards=head_shards, + q_seq_shards=q_seq_shards, attn_logits_soft_cap=attn_logits_soft_cap, ) @@ -1170,28 +1170,32 @@ def dot_product_attention( attn_logits_soft_cap=None, ): """Computes dot-product attention given query, key, and value. - + This is the core computation of attention that is used in transformers. For TPU platforms, flash attention optimizations are automatically applied when possible, and sharding parameters are inferred from the layout map in the current distribution context. - + Args: - query: JAX Array or KerasTensor. Queries with shape `[batch, time, heads, depth_k]`. - key: JAX Array or KerasTensor. Keys with shape `[batch, time, heads, depth_k]`. - value: JAX Array or KerasTensor. Values with shape `[batch, time, heads, depth_v]`. - bias: JAX Array or KerasTensor. Optional bias with shape broadcastable to - `[batch, heads, dest_time, source_time]`. - mask: JAX Array or KerasTensor. Optional mask with shape broadcastable to - `[batch, heads, dest_time, source_time]`. - scale: Float. Optional scale that is applied to the attention computation. + query: Queries with shape `[batch, time, heads, + depth_k]`. + key: Keys with shape `[batch, time, heads, + depth_k]`. + value: Values with shape `[batch, time, heads, + depth_v]`. + bias: Optional bias with shape broadcastable to + `[batch, heads, dest_time, source_time]`. + mask: Optional mask with shape broadcastable to + `[batch, heads, dest_time, source_time]`. + scale: Float. Optional scale that is applied to the attention + computation. is_causal: Boolean. Specifying whether causal masking is applied. - flash_attention: Boolean. Whether to use flash attention optimization for - increased performance. Default to None, which means it will be - auto-determined based on the platform, input shapes and compatibility. - attn_logits_soft_cap: Float. Optional float to softly cap attention logits to - avoid numerical stability issues. Applied as: - `logits = logits / (1.0 + abs(logits) / attn_logits_soft_cap)`. + flash_attention: Boolean. Whether to use flash attention optimization + for increased performance. Default to None, which means it will be + auto-determined based on the platform, input shapes and compatibility. + attn_logits_soft_cap: Float. Optional float to softly cap attention + logits to avoid numerical stability issues. Applied as: + `logits = logits / (1.0 + abs(logits) / attn_logits_soft_cap)`. Returns: JAX Array of shape `[batch, time, heads, depth_v]`. @@ -1205,20 +1209,22 @@ def dot_product_attention( f"Received: query.shape={query.shape}, key.shape={key.shape}, " f"value.shape={value.shape}." ) - + # Check platform platform = jax.devices()[0].platform is_tpu = platform == "tpu" - + # Get sharding parameters from distribution context head_shards = 1 q_seq_shards = 1 - + if is_tpu: try: - from keras.src.distribution.distribution_lib import distribution as get_dist from keras.src.distribution.distribution_lib import ModelParallel - + from keras.src.distribution.distribution_lib import ( + distribution as get_dist, + ) + # Get current distribution if available dist = get_dist() if dist and isinstance(dist, ModelParallel): @@ -1233,37 +1239,42 @@ def dot_product_attention( # Use default values if detection fails head_shards = 1 q_seq_shards = 1 - + # Check if inputs use partial sharding (not fully replicated) # Flash attention works well with fully replicated tensors on all platforms - # but may have issues with certain partial sharding patterns on non-TPU platforms + # but may have issues with certain partial sharding patterns on non-TPU + # platforms partially_sharded_inputs = any( hasattr(t, "sharding") and not t.sharding.is_fully_replicated for t in (query, key, value) ) - + # Determine flash attention compatibility if flash_attention is None: # Auto-detect flash attention availability if is_tpu: - # TPUs have specialized hardware for attention that works with any sharding pattern + # TPUs have specialized hardware for attention that works with any + # sharding pattern flash_attention = True else: - # For GPU/CPU with partially sharded inputs, we need multiple devices - # to efficiently handle the sharding + # For GPU/CPU with partially sharded inputs, we need + # multiple devices to efficiently handle the sharding if partially_sharded_inputs and len(jax.devices()) <= 1: flash_attention = False else: - flash_attention = _can_use_flash_attention(query, key, value, bias) + flash_attention = _can_use_flash_attention( + query, key, value, bias + ) elif flash_attention is True and not is_tpu: # If flash attention is explicitly requested, validate compatibility # Skip validation for TPU as it has specialized hardware support try: _can_use_flash_attention(query, key, value, bias, raise_error=True) except Exception: - # Only disable flash attention on non-TPU platforms if validation fails + # Only disable flash attention on non-TPU platforms + # if validation fails flash_attention = False - + # TPU-specific flash attention path if is_tpu and flash_attention: # Transpose to ('batch', 'heads', 'length', 'head_dim') @@ -1275,7 +1286,7 @@ def dot_product_attention( # Apply scale to query if provided if scale is not None: - # TPU kernel applies 1/sqrt(head_dim) internally, to achieve + # TPU kernel applies 1/sqrt(head_dim) internally, to achieve # overall QK^T * scale, scale query by (scale * sqrt(head_dim)) query_tpu_layout = query_tpu_layout * (scale * math.sqrt(head_dim)) @@ -1289,16 +1300,18 @@ def dot_product_attention( custom_mask = None if mask is not None: mask_bool = mask.astype("bool") if mask.dtype != jnp.bool_ else mask - + if mask_bool.ndim == 3 and mask_bool.shape[0] == bs: custom_mask = mask_bool[0] elif mask_bool.ndim == 4 and mask_bool.shape[0] == bs: custom_mask = mask_bool[0, 0] if is_causal and custom_mask is not None: - causal_mask = jnp.tril(jnp.ones((q_len, q_len), dtype=jnp.bool_)) + causal_mask = jnp.tril( + jnp.ones((q_len, q_len), dtype=jnp.bool_) + ) custom_mask = jnp.logical_and(custom_mask, causal_mask) - + if custom_mask is None and is_causal: custom_mask = jnp.tril(jnp.ones((q_len, q_len), dtype=jnp.bool_)) @@ -1355,7 +1368,7 @@ def dot_product_attention( # Ref: jax.nn.dot_product_attention # https://github.com/jax-ml/jax/blob/jax-v0.4.33/jax/_src/nn/functions.py#L886 # Not support `query_seq_lengths` and `key_value_seq_lengths` args - + # Fallback to custom XLA implementation # This is the reference implementation from jax.nn.dot_product_attention output_shape = query.shape From d938e20c524b0df619986e43683030cb48ccfe6d Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 7 May 2025 10:27:30 +0530 Subject: [PATCH 005/115] Update nn.py Corrected indentation in doc string --- keras/src/backend/jax/nn.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 6cb9dedc8d1c..258bd0af7e04 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -205,9 +205,9 @@ def _pool( initial_value: the initial value for the reduction. reduce_fn: a reduce function of the form `(T, T) -> T`. pool_size: a sequence of `N` integers, representing the window size to - reduce over. + reduce over. strides: a sequence of `N` integers, representing the inter-window - strides (default: `(1, ..., 1)`). + strides (default: `(1, ..., 1)`). padding: either the string `same` or `valid`. Returns: @@ -1132,7 +1132,7 @@ def wrap_flash_attention( if decoder_segment_ids is not None: assert query.shape[2] == decoder_segment_ids.q.shape[1], ( "Sharding along sequence dimension not allowed" - " in tpu kernel attention" + " in TPU kernel attention" ) if custom_mask is not None: @@ -1178,24 +1178,24 @@ def dot_product_attention( Args: query: Queries with shape `[batch, time, heads, - depth_k]`. + depth_k]`. key: Keys with shape `[batch, time, heads, - depth_k]`. + depth_k]`. value: Values with shape `[batch, time, heads, - depth_v]`. + depth_v]`. bias: Optional bias with shape broadcastable to - `[batch, heads, dest_time, source_time]`. + `[batch, heads, dest_time, source_time]`. mask: Optional mask with shape broadcastable to - `[batch, heads, dest_time, source_time]`. + `[batch, heads, dest_time, source_time]`. scale: Float. Optional scale that is applied to the attention - computation. + computation. is_causal: Boolean. Specifying whether causal masking is applied. flash_attention: Boolean. Whether to use flash attention optimization for increased performance. Default to None, which means it will be - auto-determined based on the platform, input shapes and compatibility. + auto-determined based on the platform, input shapes and compatibility. attn_logits_soft_cap: Float. Optional float to softly cap attention - logits to avoid numerical stability issues. Applied as: - `logits = logits / (1.0 + abs(logits) / attn_logits_soft_cap)`. + logits to avoid numerical stability issues. Applied as: + `logits = logits / (1.0 + abs(logits) / attn_logits_soft_cap)`. Returns: JAX Array of shape `[batch, time, heads, depth_v]`. From f60811ef86c819aa7aee516fa20e4dfe44239b31 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 7 May 2025 10:41:11 +0530 Subject: [PATCH 006/115] Update nn.py --- keras/src/backend/jax/nn.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 258bd0af7e04..cb2a7716c6ce 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1191,8 +1191,9 @@ def dot_product_attention( computation. is_causal: Boolean. Specifying whether causal masking is applied. flash_attention: Boolean. Whether to use flash attention optimization - for increased performance. Default to None, which means it will be - auto-determined based on the platform, input shapes and compatibility. + for increased performance. Default to None, which means it will + be auto-determined based on the platform, input shapes and + compatibility. attn_logits_soft_cap: Float. Optional float to softly cap attention logits to avoid numerical stability issues. Applied as: `logits = logits / (1.0 + abs(logits) / attn_logits_soft_cap)`. From 28eeb2495fb9d72c3a93bf02dd2ae3a36ba26abd Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 12 May 2025 13:17:09 +0530 Subject: [PATCH 007/115] Update random_grayscale.py Fixed issue with passing a single image without batch dimension. --- .../image_preprocessing/random_grayscale.py | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py index 2dbcca6e5026..99ecf860eae5 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py +++ b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py @@ -59,12 +59,20 @@ def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs): def get_random_transformation(self, images, training=True, seed=None): if seed is None: seed = self._get_seed_generator(self.backend._backend) - random_values = self.backend.random.uniform( - shape=(self.backend.core.shape(images)[0],), - minval=0, - maxval=1, - seed=seed, - ) + if len(images.shape) == 4: + random_values = self.backend.random.uniform( + shape=(self.backend.core.shape(images)[0],), + minval=0, + maxval=1, + seed=seed, + ) + else: + random_values = self.backend.random.uniform( + shape=(1,), + minval=0, + maxval=1, + seed=seed, + ) should_apply = self.backend.numpy.expand_dims( random_values < self.factor, axis=[1, 2, 3] ) From de81e5bc1da01e4a55aedc73fcf16725b6be3002 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR <55033230+pctablet505@users.noreply.github.com> Date: Mon, 12 May 2025 14:19:12 +0530 Subject: [PATCH 008/115] Update keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py Co-authored-by: Jyotinder Singh <33001894+JyotinderSingh@users.noreply.github.com> --- .../image_preprocessing/random_grayscale.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py index 99ecf860eae5..31c911f476fa 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py +++ b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py @@ -59,16 +59,14 @@ def __init__(self, factor=0.5, data_format=None, seed=None, **kwargs): def get_random_transformation(self, images, training=True, seed=None): if seed is None: seed = self._get_seed_generator(self.backend._backend) + # Base case: Unbatched data + batch_size = 1 if len(images.shape) == 4: - random_values = self.backend.random.uniform( - shape=(self.backend.core.shape(images)[0],), - minval=0, - maxval=1, - seed=seed, - ) - else: - random_values = self.backend.random.uniform( - shape=(1,), + # This is a batch of images (4D input) + batch_size = self.backend.core.shape(images)[0] + + random_values = self.backend.random.uniform( + shape=(batch_size,), minval=0, maxval=1, seed=seed, From 66661ac827c0c43c46958ebe34a7ff0216e56216 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 12 May 2025 16:05:18 +0530 Subject: [PATCH 009/115] Update random_grayscale_test.py Test case for unbatched inputs --- .../random_grayscale_test.py | 25 ++++++++++++++----- 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py index b488c2c31f83..983554ef82aa 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py +++ b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py @@ -80,15 +80,28 @@ def test_grayscale_with_single_color_image(self): test_cases = [ (np.full((1, 4, 4, 3), 128, dtype=np.float32), "channels_last"), (np.full((1, 3, 4, 4), 128, dtype=np.float32), "channels_first"), + # unbatched inputs + (np.full((4, 4, 3), 128, dtype=np.float32), "channels_last"), + (np.full((3, 4, 4), 128, dtype=np.float32), "channels_first"), ] for xs, data_format in test_cases: layer = layers.RandomGrayscale(factor=1.0, data_format=data_format) transformed = ops.convert_to_numpy(layer(xs)) - - if data_format == "channels_last": - unique_vals = np.unique(transformed[0, :, :, 0]) - self.assertEqual(len(unique_vals), 1) + + if len(xs.shape)==4: + # batched inputs + if data_format == "channels_last": + unique_vals = np.unique(transformed[0, :, :, 0]) + self.assertEqual(len(unique_vals), 1) + else: + unique_vals = np.unique(transformed[0, 0, :, :]) + self.assertEqual(len(unique_vals), 1) else: - unique_vals = np.unique(transformed[0, 0, :, :]) - self.assertEqual(len(unique_vals), 1) + # unbatched inputs + if data_format == "channels_last": + unique_vals = np.unique(transformed[ :, :, 0]) + self.assertEqual(len(unique_vals), 1) + else: + unique_vals = np.unique(transformed[ 0, :, :]) + self.assertEqual(len(unique_vals), 1) From c37f2b51c658fe0b5c981960ba5b629a718b1571 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 13 May 2025 11:59:55 +0530 Subject: [PATCH 010/115] code reformat --- .../image_preprocessing/random_grayscale.py | 12 ++++++------ .../image_preprocessing/random_grayscale_test.py | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py index 31c911f476fa..865c55a3ceeb 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py +++ b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py @@ -62,15 +62,15 @@ def get_random_transformation(self, images, training=True, seed=None): # Base case: Unbatched data batch_size = 1 if len(images.shape) == 4: - # This is a batch of images (4D input) + # This is a batch of images (4D input) batch_size = self.backend.core.shape(images)[0] random_values = self.backend.random.uniform( - shape=(batch_size,), - minval=0, - maxval=1, - seed=seed, - ) + shape=(batch_size,), + minval=0, + maxval=1, + seed=seed, + ) should_apply = self.backend.numpy.expand_dims( random_values < self.factor, axis=[1, 2, 3] ) diff --git a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py index 983554ef82aa..12ba46f275f4 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py +++ b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py @@ -88,8 +88,8 @@ def test_grayscale_with_single_color_image(self): for xs, data_format in test_cases: layer = layers.RandomGrayscale(factor=1.0, data_format=data_format) transformed = ops.convert_to_numpy(layer(xs)) - - if len(xs.shape)==4: + + if len(xs.shape) == 4: # batched inputs if data_format == "channels_last": unique_vals = np.unique(transformed[0, :, :, 0]) @@ -100,8 +100,8 @@ def test_grayscale_with_single_color_image(self): else: # unbatched inputs if data_format == "channels_last": - unique_vals = np.unique(transformed[ :, :, 0]) + unique_vals = np.unique(transformed[:, :, 0]) self.assertEqual(len(unique_vals), 1) else: - unique_vals = np.unique(transformed[ 0, :, :]) + unique_vals = np.unique(transformed[0, :, :]) self.assertEqual(len(unique_vals), 1) From 498dece497053967fa09209f8ff9c3b052bb66b7 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 13 May 2025 13:09:53 +0530 Subject: [PATCH 011/115] Update random_grayscale_test.py Testcase for checking both unbatched and batched single image inputs. --- .../random_grayscale_test.py | 37 +++++++++++-------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py index 12ba46f275f4..a43dfc55694a 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py +++ b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale_test.py @@ -78,6 +78,7 @@ def test_tf_data_compatibility(self): def test_grayscale_with_single_color_image(self): test_cases = [ + # batched inputs (np.full((1, 4, 4, 3), 128, dtype=np.float32), "channels_last"), (np.full((1, 3, 4, 4), 128, dtype=np.float32), "channels_first"), # unbatched inputs @@ -89,19 +90,25 @@ def test_grayscale_with_single_color_image(self): layer = layers.RandomGrayscale(factor=1.0, data_format=data_format) transformed = ops.convert_to_numpy(layer(xs)) - if len(xs.shape) == 4: - # batched inputs - if data_format == "channels_last": - unique_vals = np.unique(transformed[0, :, :, 0]) - self.assertEqual(len(unique_vals), 1) - else: - unique_vals = np.unique(transformed[0, 0, :, :]) - self.assertEqual(len(unique_vals), 1) + # Determine if the input was batched + is_batched = len(xs.shape) == 4 + + # If batched, select the first image from the batch for inspection. + # Otherwise, use the transformed image directly. + # `image_to_inspect` will always be a 3D tensor. + if is_batched: + image_to_inspect = transformed[0] else: - # unbatched inputs - if data_format == "channels_last": - unique_vals = np.unique(transformed[:, :, 0]) - self.assertEqual(len(unique_vals), 1) - else: - unique_vals = np.unique(transformed[0, :, :]) - self.assertEqual(len(unique_vals), 1) + image_to_inspect = transformed + + if data_format == "channels_last": + # image_to_inspect has shape (H, W, C), + # get the first channel [:, :, 0] + channel_data = image_to_inspect[:, :, 0] + else: # data_format == "channels_first" + # image_to_inspect has shape (C, H, W), + # get the first channel [0, :, :] + channel_data = image_to_inspect[0, :, :] + + unique_vals = np.unique(channel_data) + self.assertEqual(len(unique_vals), 1) From 653f5b11d0762fc553c6849e41fd467fc383b66b Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 21 May 2025 11:10:20 +0530 Subject: [PATCH 012/115] changed compute_output_spec There was a bug, and it was causing cycle in graph. --- .../preprocessing/image_preprocessing/random_grayscale.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py index 865c55a3ceeb..ca693a246704 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py +++ b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py @@ -1,4 +1,5 @@ from keras.src import backend +from keras.src import tree from keras.src.api_export import keras_export from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 BaseImagePreprocessingLayer, @@ -96,7 +97,12 @@ def compute_output_shape(self, input_shape): return input_shape def compute_output_spec(self, inputs, **kwargs): - return inputs + return tree.map_structure( + lambda x: backend.KerasTensor( + x.shape, dtype=x.dtype, sparse=x.sparse + ), + inputs, + ) def transform_bounding_boxes(self, bounding_boxes, **kwargs): return bounding_boxes From 27ad80bf5c34583dec0c72d3f429af6257d24fad Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 26 May 2025 13:57:06 +0530 Subject: [PATCH 013/115] Update random_grayscale.py removed the use of tree.map_structure --- .../preprocessing/image_preprocessing/random_grayscale.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py index ca693a246704..ca071d263de7 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py +++ b/keras/src/layers/preprocessing/image_preprocessing/random_grayscale.py @@ -1,5 +1,4 @@ from keras.src import backend -from keras.src import tree from keras.src.api_export import keras_export from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501 BaseImagePreprocessingLayer, @@ -97,11 +96,8 @@ def compute_output_shape(self, input_shape): return input_shape def compute_output_spec(self, inputs, **kwargs): - return tree.map_structure( - lambda x: backend.KerasTensor( - x.shape, dtype=x.dtype, sparse=x.sparse - ), - inputs, + return backend.KerasTensor( + inputs.shape, dtype=inputs.dtype, sparse=inputs.sparse ) def transform_bounding_boxes(self, bounding_boxes, **kwargs): From 579cc11d705a979d913c2b0840c3877d76db2533 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 29 May 2025 15:20:03 +0530 Subject: [PATCH 014/115] Reapply "Fixed issue with dot_product_attention when using TPU. (#21254)" (#21329) This reverts commit 81821e02486886436d10bb59bdfdf1715ebcca1a. --- keras/src/backend/jax/nn.py | 228 +++++++++++++++++++++++++++++------- 1 file changed, 186 insertions(+), 42 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index ba3dbd103acb..cb2a7716c6ce 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1126,16 +1126,17 @@ def wrap_flash_attention( decoder_segment_ids, custom_mask=None, attn_logits_soft_cap=None, + head_shards=1, + q_seq_shards=1, ): if decoder_segment_ids is not None: assert query.shape[2] == decoder_segment_ids.q.shape[1], ( - "Sharding along sequence dimension not allowed in tpu kernel " - "attention" + "Sharding along sequence dimension not allowed" + " in TPU kernel attention" ) if custom_mask is not None: mask = splash_attention_mask.NumpyMask(array=custom_mask) - else: mask = splash_attention_mask.CausalMask( shape=(query.shape[2], query.shape[2]) @@ -1147,8 +1148,8 @@ def wrap_flash_attention( ) splash_kernel = splash_attention_kernel.make_splash_mha( mask=multi_head_mask, - head_shards=1, - q_seq_shards=1, + head_shards=head_shards, + q_seq_shards=q_seq_shards, attn_logits_soft_cap=attn_logits_soft_cap, ) @@ -1168,6 +1169,38 @@ def dot_product_attention( flash_attention=None, attn_logits_soft_cap=None, ): + """Computes dot-product attention given query, key, and value. + + This is the core computation of attention that is used in transformers. + For TPU platforms, flash attention optimizations are automatically applied + when possible, and sharding parameters are inferred from the layout map + in the current distribution context. + + Args: + query: Queries with shape `[batch, time, heads, + depth_k]`. + key: Keys with shape `[batch, time, heads, + depth_k]`. + value: Values with shape `[batch, time, heads, + depth_v]`. + bias: Optional bias with shape broadcastable to + `[batch, heads, dest_time, source_time]`. + mask: Optional mask with shape broadcastable to + `[batch, heads, dest_time, source_time]`. + scale: Float. Optional scale that is applied to the attention + computation. + is_causal: Boolean. Specifying whether causal masking is applied. + flash_attention: Boolean. Whether to use flash attention optimization + for increased performance. Default to None, which means it will + be auto-determined based on the platform, input shapes and + compatibility. + attn_logits_soft_cap: Float. Optional float to softly cap attention + logits to avoid numerical stability issues. Applied as: + `logits = logits / (1.0 + abs(logits) / attn_logits_soft_cap)`. + + Returns: + JAX Array of shape `[batch, time, heads, depth_v]`. + """ query = convert_to_tensor(query) key = convert_to_tensor(key) value = convert_to_tensor(value) @@ -1177,47 +1210,155 @@ def dot_product_attention( f"Received: query.shape={query.shape}, key.shape={key.shape}, " f"value.shape={value.shape}." ) - if flash_attention is None: - flash_attention = _can_use_flash_attention(query, key, value, bias) - elif flash_attention is True: - # Use `raise_error=True` to provide more details if the inputs failed to - # use flash attention - _can_use_flash_attention(query, key, value, bias, raise_error=True) - if jax.devices()[0].platform == "tpu": - # Transpose to ('batch', 'heads', 'length', 'kv') - query = jnp.transpose(query, axes=(0, 2, 1, 3)) - key = jnp.transpose(key, axes=(0, 2, 1, 3)) - value = jnp.transpose(value, axes=(0, 2, 1, 3)) - B, H, S, KV = query.shape - - segment_ids = jnp.ones([B, S]) - # {token_ids, padding_mask, segment_ids} enable packing - out = wrap_flash_attention( - query, - key, - value, - decoder_segment_ids=splash_attention_kernel.SegmentIds( - segment_ids, segment_ids - ), - custom_mask=mask, - attn_logits_soft_cap=attn_logits_soft_cap, + # Check platform + platform = jax.devices()[0].platform + is_tpu = platform == "tpu" + + # Get sharding parameters from distribution context + head_shards = 1 + q_seq_shards = 1 + + if is_tpu: + try: + from keras.src.distribution.distribution_lib import ModelParallel + from keras.src.distribution.distribution_lib import ( + distribution as get_dist, + ) + + # Get current distribution if available + dist = get_dist() + if dist and isinstance(dist, ModelParallel): + mesh = dist.device_mesh + if "model" in mesh.axis_names: + model_dim_index = mesh.axis_names.index("model") + # Set head_shards based on the model dimension of the mesh + head_shards = mesh.shape[model_dim_index] + # Typically keep q_seq_shards=1 for best performance + q_seq_shards = 1 + except (ImportError, ValueError, AttributeError): + # Use default values if detection fails + head_shards = 1 + q_seq_shards = 1 + + # Check if inputs use partial sharding (not fully replicated) + # Flash attention works well with fully replicated tensors on all platforms + # but may have issues with certain partial sharding patterns on non-TPU + # platforms + partially_sharded_inputs = any( + hasattr(t, "sharding") and not t.sharding.is_fully_replicated + for t in (query, key, value) + ) + + # Determine flash attention compatibility + if flash_attention is None: + # Auto-detect flash attention availability + if is_tpu: + # TPUs have specialized hardware for attention that works with any + # sharding pattern + flash_attention = True + else: + # For GPU/CPU with partially sharded inputs, we need + # multiple devices to efficiently handle the sharding + if partially_sharded_inputs and len(jax.devices()) <= 1: + flash_attention = False + else: + flash_attention = _can_use_flash_attention( + query, key, value, bias + ) + elif flash_attention is True and not is_tpu: + # If flash attention is explicitly requested, validate compatibility + # Skip validation for TPU as it has specialized hardware support + try: + _can_use_flash_attention(query, key, value, bias, raise_error=True) + except Exception: + # Only disable flash attention on non-TPU platforms + # if validation fails + flash_attention = False + + # TPU-specific flash attention path + if is_tpu and flash_attention: + # Transpose to ('batch', 'heads', 'length', 'head_dim') + query_tpu_layout = jnp.transpose(query, axes=(0, 2, 1, 3)) + key_tpu_layout = jnp.transpose(key, axes=(0, 2, 1, 3)) + value_tpu_layout = jnp.transpose(value, axes=(0, 2, 1, 3)) + + bs, num_heads, q_len, head_dim = query_tpu_layout.shape + + # Apply scale to query if provided + if scale is not None: + # TPU kernel applies 1/sqrt(head_dim) internally, to achieve + # overall QK^T * scale, scale query by (scale * sqrt(head_dim)) + query_tpu_layout = query_tpu_layout * (scale * math.sqrt(head_dim)) + + # Create segment IDs for Splash Attention (for packing/batching) + segment_ids = jnp.zeros([bs, q_len], dtype=jnp.int32) + decoder_segment_ids = splash_attention_kernel.SegmentIds( + q=segment_ids, kv=segment_ids ) - out = jnp.transpose(out, axes=(0, 2, 1, 3)) - return out - # `dot_product_attention` is only available in jax>=0.4.31 + # Process mask for Splash Attention + custom_mask = None + if mask is not None: + mask_bool = mask.astype("bool") if mask.dtype != jnp.bool_ else mask + + if mask_bool.ndim == 3 and mask_bool.shape[0] == bs: + custom_mask = mask_bool[0] + elif mask_bool.ndim == 4 and mask_bool.shape[0] == bs: + custom_mask = mask_bool[0, 0] + + if is_causal and custom_mask is not None: + causal_mask = jnp.tril( + jnp.ones((q_len, q_len), dtype=jnp.bool_) + ) + custom_mask = jnp.logical_and(custom_mask, causal_mask) + + if custom_mask is None and is_causal: + custom_mask = jnp.tril(jnp.ones((q_len, q_len), dtype=jnp.bool_)) + + try: + output = wrap_flash_attention( + query_tpu_layout, + key_tpu_layout, + value_tpu_layout, + decoder_segment_ids=decoder_segment_ids, + custom_mask=custom_mask, + attn_logits_soft_cap=attn_logits_soft_cap, + head_shards=head_shards, + q_seq_shards=q_seq_shards, + ) + # Transpose output back to Keras layout + return jnp.transpose(output, axes=(0, 2, 1, 3)) + except Exception: + flash_attention = False + + # JAX native dot_product_attention for GPU or fallback for TPU if hasattr(jax.nn, "dot_product_attention"): - return jax.nn.dot_product_attention( - query, - key, - value, - bias=bias, - mask=mask, - scale=scale, - is_causal=is_causal, - implementation="cudnn" if flash_attention else "xla", - ) + try: + return jax.nn.dot_product_attention( + query, + key, + value, + bias=bias, + mask=mask, + scale=scale, + is_causal=is_causal, + implementation="cudnn" if flash_attention else "xla", + ) + except Exception: + # If flash attention fails, fall back to XLA implementation + if flash_attention: + return jax.nn.dot_product_attention( + query, + key, + value, + bias=bias, + mask=mask, + scale=scale, + is_causal=is_causal, + implementation="xla", + ) + raise if flash_attention: raise RuntimeError( @@ -1228,6 +1369,9 @@ def dot_product_attention( # Ref: jax.nn.dot_product_attention # https://github.com/jax-ml/jax/blob/jax-v0.4.33/jax/_src/nn/functions.py#L886 # Not support `query_seq_lengths` and `key_value_seq_lengths` args + + # Fallback to custom XLA implementation + # This is the reference implementation from jax.nn.dot_product_attention output_shape = query.shape _, _, K, H = key.shape scale = (1.0 / jnp.sqrt(H)) if scale is None else scale From 7a0c5473c3091a2c90db031515c1c3f8daae8e7a Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 29 May 2025 15:35:49 +0530 Subject: [PATCH 015/115] Improve error handling in _can_use_flash_attention for better debugging Enhanced the _can_use_flash_attention function to provide more detailed error messages when flash attention compatibility checks fail. Changes: - Replace generic exception catching with specific error propagation - When raise_error=True, directly re-raise original exceptions from check_layout() and check_is_flash_attention() functions - Preserve detailed error context from JAX internal validation functions - Maintain existing behavior when raise_error=False (returns False) This improves debugging experience by surfacing specific technical details about tensor layout incompatibilities, cuDNN version requirements, and other flash attention compatibility issues. Relates to keras-hub PR #2257 and addresses flash attention debugging needs. --- keras/src/backend/jax/nn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index cb2a7716c6ce..8eb06c301c5f 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1072,9 +1072,9 @@ def _can_use_flash_attention(query, key, value, bias, raise_error=False): is_training=False, ) return True - except: + except Exception as e: if raise_error: - raise + raise e return False From f7a22907a4f47acd9619f7d9ab2aaf893e68354a Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 29 May 2025 15:39:01 +0530 Subject: [PATCH 016/115] Revert "Improve error handling in _can_use_flash_attention for better debugging" This reverts commit 7a0c5473c3091a2c90db031515c1c3f8daae8e7a. --- keras/src/backend/jax/nn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 8eb06c301c5f..cb2a7716c6ce 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1072,9 +1072,9 @@ def _can_use_flash_attention(query, key, value, bias, raise_error=False): is_training=False, ) return True - except Exception as e: + except: if raise_error: - raise e + raise return False From 8bae8924329d9b61238521a3ae1f352157779232 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 29 May 2025 15:47:22 +0530 Subject: [PATCH 017/115] Fix JAX API compatibility and improve error handling in `_can_use_flash_attention` Changes: - Add missing q_offsets=None and kv_offsets=None parameters to check_layout() call to match updated JAX function signature - Replace bare `except:` with `except Exception as e:` and `raise e` to preserve detailed error messages from JAX validation functions - Maintain existing fallback behavior when raise_error=False This resolves compatibility issues with newer JAX versions and improves debugging experience by surfacing specific technical details about flash attention compatibility failures. --- keras/src/backend/jax/nn.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index cb2a7716c6ce..3dbd06e6a292 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1062,6 +1062,8 @@ def _can_use_flash_attention(query, key, value, bias, raise_error=False): q_seqlen=None, kv_seqlen=None, layout=_normalize_layout("BTNH"), + q_offsets=None, + kv_offsets=None, ) check_is_flash_attention( query, @@ -1072,9 +1074,9 @@ def _can_use_flash_attention(query, key, value, bias, raise_error=False): is_training=False, ) return True - except: + except Exception as e: if raise_error: - raise + raise e return False From ee196cd1051135364931294c995cc693ceb59b87 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 29 May 2025 16:15:07 +0530 Subject: [PATCH 018/115] Updated `dot_product_attention` Simplified the check for `flasth_attention` by removing redundant checks that are already done in `_can_use_flash_attention`. --- keras/src/backend/jax/nn.py | 52 +++++++------------------------------ 1 file changed, 10 insertions(+), 42 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 3dbd06e6a292..7f097d6e35e8 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1217,11 +1217,17 @@ def dot_product_attention( platform = jax.devices()[0].platform is_tpu = platform == "tpu" - # Get sharding parameters from distribution context - head_shards = 1 - q_seq_shards = 1 + # Determine flash attention compatibility + if flash_attention is None: + flash_attention = _can_use_flash_attention(query, key, value, bias) + elif flash_attention is True: + # Use `raise_error=True` to provide more details if the inputs failed to + # use flash attention + _can_use_flash_attention(query, key, value, bias, raise_error=True) - if is_tpu: + # TPU-specific flash attention path + if is_tpu and flash_attention: + # Get sharding parameters from distribution context try: from keras.src.distribution.distribution_lib import ModelParallel from keras.src.distribution.distribution_lib import ( @@ -1242,44 +1248,6 @@ def dot_product_attention( # Use default values if detection fails head_shards = 1 q_seq_shards = 1 - - # Check if inputs use partial sharding (not fully replicated) - # Flash attention works well with fully replicated tensors on all platforms - # but may have issues with certain partial sharding patterns on non-TPU - # platforms - partially_sharded_inputs = any( - hasattr(t, "sharding") and not t.sharding.is_fully_replicated - for t in (query, key, value) - ) - - # Determine flash attention compatibility - if flash_attention is None: - # Auto-detect flash attention availability - if is_tpu: - # TPUs have specialized hardware for attention that works with any - # sharding pattern - flash_attention = True - else: - # For GPU/CPU with partially sharded inputs, we need - # multiple devices to efficiently handle the sharding - if partially_sharded_inputs and len(jax.devices()) <= 1: - flash_attention = False - else: - flash_attention = _can_use_flash_attention( - query, key, value, bias - ) - elif flash_attention is True and not is_tpu: - # If flash attention is explicitly requested, validate compatibility - # Skip validation for TPU as it has specialized hardware support - try: - _can_use_flash_attention(query, key, value, bias, raise_error=True) - except Exception: - # Only disable flash attention on non-TPU platforms - # if validation fails - flash_attention = False - - # TPU-specific flash attention path - if is_tpu and flash_attention: # Transpose to ('batch', 'heads', 'length', 'head_dim') query_tpu_layout = jnp.transpose(query, axes=(0, 2, 1, 3)) key_tpu_layout = jnp.transpose(key, axes=(0, 2, 1, 3)) From 40583c886a541f454a371142fbd3d82a66a0bdff Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Sat, 7 Jun 2025 18:55:37 +0530 Subject: [PATCH 019/115] Update nn.py --- keras/src/backend/jax/nn.py | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 7f097d6e35e8..1a652539ffed 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1074,9 +1074,9 @@ def _can_use_flash_attention(query, key, value, bias, raise_error=False): is_training=False, ) return True - except Exception as e: + except: if raise_error: - raise e + raise return False @@ -1121,7 +1121,7 @@ def _dot_product_attention_core( return jnp.einsum("BNTS,BSNH->BTNH", probs, value) -def wrap_flash_attention( +def wrap_flash_attention( query, key, value, @@ -1131,6 +1131,34 @@ def wrap_flash_attention( head_shards=1, q_seq_shards=1, ): + """ Applies a wrapped flash attention mechanism using the Splash kernel. + This function prepares the appropriate attention mask (causal or custom), + constructs a multi-head mask, and applies the Splash multi-head attention + kernel to the provided query, key, and value tensors. It supports optional + sharding and soft capping of attention logits. + Args: + query: jax.Array. The query tensor of shape + (batch, num_heads, seq_len, head_dim). + key: jax.Array. The key tensor of shape + (batch, num_heads, seq_len, head_dim). + value: jax.Array. The value tensor of shape + (batch, num_heads, seq_len, head_dim). + decoder_segment_ids: Optional. Segment IDs for the decoder, used for + sharding or masking. + custom_mask: Optional[jax.Array]. A custom attention mask to apply. If + None, a causal mask is used. + attn_logits_soft_cap: Optional[float]. If provided, applies a soft cap + to the attention logits. + head_shards: int, default=1. Number of shards for the attention heads. + q_seq_shards: int, default=1. Number of shards for the query sequence + dimension. + Returns: + jax.Array: The result of applying the Splash multi-head attention + kernel to the inputs. + Raises: + AssertionError: If sharding along the sequence dimension is attempted + with decoder_segment_ids. + """ if decoder_segment_ids is not None: assert query.shape[2] == decoder_segment_ids.q.shape[1], ( "Sharding along sequence dimension not allowed" From 7c918badc6a371936f409495814e5668dc2ffe72 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Sat, 7 Jun 2025 19:12:28 +0530 Subject: [PATCH 020/115] Update nn.py --- keras/src/backend/jax/nn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keras/src/backend/jax/nn.py b/keras/src/backend/jax/nn.py index 1a652539ffed..dbd91122ab84 100644 --- a/keras/src/backend/jax/nn.py +++ b/keras/src/backend/jax/nn.py @@ -1121,7 +1121,7 @@ def _dot_product_attention_core( return jnp.einsum("BNTS,BSNH->BTNH", probs, value) -def wrap_flash_attention( +def wrap_flash_attention( query, key, value, @@ -1131,7 +1131,7 @@ def wrap_flash_attention( head_shards=1, q_seq_shards=1, ): - """ Applies a wrapped flash attention mechanism using the Splash kernel. + """Applies a wrapped flash attention mechanism using the Splash kernel. This function prepares the appropriate attention mask (causal or custom), constructs a multi-head mask, and applies the Splash multi-head attention kernel to the provided query, key, and value tensors. It supports optional From 98877eb4e0afe86e1c474399094b7820632aceb1 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 18 Aug 2025 13:03:37 +0530 Subject: [PATCH 021/115] Created using Colab --- Model_Pruning.ipynb | 4273 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 4273 insertions(+) create mode 100644 Model_Pruning.ipynb diff --git a/Model_Pruning.ipynb b/Model_Pruning.ipynb new file mode 100644 index 000000000000..e4c7ade6eca3 --- /dev/null +++ b/Model_Pruning.ipynb @@ -0,0 +1,4273 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [], + "gpuType": "T4", + "mount_file_id": "1vgIMup-BMpDekluxQuvlxC2Eb7TgG0ZU", + "authorship_tag": "ABX9TyOtiH25/guWiSeu/gW4r4Wh", + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ShWb1fdNpcdg", + "outputId": "98484742-d8bc-4b57-a283-9d98a783aafb" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Requirement already satisfied: pre-commit in /usr/local/lib/python3.11/dist-packages (4.3.0)\n", + "Requirement already satisfied: cfgv>=2.0.0 in /usr/local/lib/python3.11/dist-packages (from pre-commit) (3.4.0)\n", + "Requirement already satisfied: identify>=1.0.0 in /usr/local/lib/python3.11/dist-packages (from pre-commit) (2.6.13)\n", + "Requirement already satisfied: nodeenv>=0.11.1 in /usr/local/lib/python3.11/dist-packages (from pre-commit) (1.9.1)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.11/dist-packages (from pre-commit) (6.0.2)\n", + "Requirement already satisfied: virtualenv>=20.10.0 in /usr/local/lib/python3.11/dist-packages (from pre-commit) (20.34.0)\n", + "Requirement already satisfied: distlib<1,>=0.3.7 in /usr/local/lib/python3.11/dist-packages (from virtualenv>=20.10.0->pre-commit) (0.4.0)\n", + "Requirement already satisfied: filelock<4,>=3.12.2 in /usr/local/lib/python3.11/dist-packages (from virtualenv>=20.10.0->pre-commit) (3.18.0)\n", + "Requirement already satisfied: platformdirs<5,>=3.9.1 in /usr/local/lib/python3.11/dist-packages (from virtualenv>=20.10.0->pre-commit) (4.3.8)\n", + "/content\n", + "rm: cannot remove 'keras_hub_repo/': No such file or directory\n", + "Cloning into 'keras_repo'...\n", + "remote: Enumerating objects: 97462, done.\u001b[K\n", + "remote: Counting objects: 100% (354/354), done.\u001b[K\n", + "remote: Compressing objects: 100% (210/210), done.\u001b[K\n", + "remote: Total 97462 (delta 274), reused 145 (delta 144), pack-reused 97108 (from 3)\u001b[K\n", + "Receiving objects: 100% (97462/97462), 46.82 MiB | 15.16 MiB/s, done.\n", + "Resolving deltas: 100% (76881/76881), done.\n", + "/content/keras_repo\n", + "Branch 'model-pruning' set up to track remote branch 'model-pruning' from 'origin'.\n", + "Switched to a new branch 'model-pruning'\n", + "Generating api directory with public APIs...\n", + "2025-08-18 04:52:58.601821: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", + "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", + "E0000 00:00:1755492778.622187 4059 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", + "E0000 00:00:1755492778.628272 4059 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", + "W0000 00:00:1755492778.644040 4059 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", + "W0000 00:00:1755492778.644069 4059 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", + "W0000 00:00:1755492778.644073 4059 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", + "W0000 00:00:1755492778.644081 4059 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", + "2025-08-18 04:52:58.648784: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", + "To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "Formatting api directory...\n", + "Obtaining file:///content/keras_repo\n", + " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", + " Checking if build backend supports build_editable ... \u001b[?25l\u001b[?25hdone\n", + " Getting requirements to build editable ... \u001b[?25l\u001b[?25hdone\n", + " Preparing editable metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", + "Requirement already satisfied: absl-py in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (1.4.0)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (2.0.2)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (13.9.4)\n", + "Requirement already satisfied: namex in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.1.0)\n", + "Requirement already satisfied: h5py in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (3.14.0)\n", + "Requirement already satisfied: optree in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.17.0)\n", + "Requirement already satisfied: ml-dtypes in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.5.3)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (25.0)\n", + "Requirement already satisfied: typing-extensions>=4.6.0 in /usr/local/lib/python3.11/dist-packages (from optree->keras==3.11.0) (4.14.1)\n", + "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.11/dist-packages (from rich->keras==3.11.0) (4.0.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.11/dist-packages (from rich->keras==3.11.0) (2.19.2)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.11/dist-packages (from markdown-it-py>=2.2.0->rich->keras==3.11.0) (0.1.2)\n", + "Building wheels for collected packages: keras\n", + " Building editable for keras (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for keras: filename=keras-3.11.0-0.editable-py3-none-any.whl size=9410 sha256=068d660e1dba2c4f1e4d66ff6ae58dc8965d0c57ba6f4748b8a4955fbd1f19b2\n", + " Stored in directory: /tmp/pip-ephem-wheel-cache-zet2_67e/wheels/09/7a/d4/6dbe98c57884e68eba731115af18ec3a7f493640582bacb80f\n", + "Successfully built keras\n", + "Installing collected packages: keras\n", + " Attempting uninstall: keras\n", + " Found existing installation: keras 3.11.0\n", + " Uninstalling keras-3.11.0:\n", + " Successfully uninstalled keras-3.11.0\n", + "Successfully installed keras-3.11.0\n" + ] + } + ], + "source": [ + "!pip install pre-commit\n", + "%cd /content/\n", + "%rm -r keras_repo/\n", + "%rm -r keras_hub_repo/\n", + "\n", + "!git clone https://github.com/pctablet505/keras.git keras_repo\n", + "%cd /content/keras_repo\n", + "!git checkout model-pruning\n", + "!bash /content/keras_repo/shell/api_gen.sh\n", + "%pip install -e .\n", + "\n", + "# %cd /content\n", + "# !git clone https://github.com/pctablet505/keras-hub.git keras_hub_repo\n", + "# %cd /content/keras_hub_repo\n", + "# !git checkout model-pruning\n", + "# %pip install -e .\n", + "# %cd /content" + ] + }, + { + "cell_type": "code", + "source": [ + "%cd /content" + ], + "metadata": { + "id": "hLHxr-Z8qI7X", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "a7e8816a-fda7-4afc-8335-6b494a537c6b" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "B6MlZUaxvSY8" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "f9996cef" + }, + "source": [ + "# Task\n", + "Clone the branch \"https://github.com/keras-team/keras/compare/master...pctablet505:keras:model-pruning\", install it with pip, and then write code to create a classifier on top of a ResNet50 backbone using Keras 3, and evaluate it on the CIFAR-100 dataset." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c0cea87b" + }, + "source": [ + "## Load and preprocess data\n", + "\n", + "### Subtask:\n", + "Load the CIFAR-100 dataset and preprocess it for use with the ResNet50 model.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aaeda69c" + }, + "source": [ + "**Reasoning**:\n", + "The subtask requires loading and preprocessing the CIFAR-100 dataset using Keras. This involves importing the dataset, loading the data, normalizing the images, and one-hot encoding the labels. These steps can be combined into a single code block.\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "bea81d25", + "outputId": "29593519-defb-4ff6-fe65-0b3053ae9753" + }, + "source": [ + "from keras.datasets import cifar100\n", + "from keras.utils import to_categorical\n", + "\n", + "# Load the CIFAR-100 dataset\n", + "(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n", + "\n", + "# Normalize the image data\n", + "x_train = x_train.astype('float32') / 255.0\n", + "x_test = x_test.astype('float32') / 255.0\n", + "\n", + "# Convert labels to one-hot encoding\n", + "num_classes = 100\n", + "y_train = to_categorical(y_train, num_classes)\n", + "y_test = to_categorical(y_test, num_classes)\n", + "\n", + "print(\"CIFAR-100 dataset loaded and preprocessed.\")\n", + "print(f\"Training data shape: {x_train.shape}\")\n", + "print(f\"Training labels shape: {y_train.shape}\")\n", + "print(f\"Testing data shape: {x_test.shape}\")\n", + "print(f\"Testing labels shape: {y_test.shape}\")" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "CIFAR-100 dataset loaded and preprocessed.\n", + "Training data shape: (50000, 32, 32, 3)\n", + "Training labels shape: (50000, 100)\n", + "Testing data shape: (10000, 32, 32, 3)\n", + "Testing labels shape: (10000, 100)\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4177d50b" + }, + "source": [ + "## Load the resnet50 model\n", + "\n", + "### Subtask:\n", + "Load the pre-trained ResNet50 model without the top classification layer.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "989069a2" + }, + "source": [ + "**Reasoning**:\n", + "Import the ResNet50 model and load it with pre-trained weights and without the top classification layer.\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "72c1f0de", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "1007e7f4-3d37-4410-8083-d4c937ee9096" + }, + "source": [ + "from keras.applications import ResNet50\n", + "\n", + "# Load the ResNet50 model\n", + "base_model = ResNet50(weights='imagenet', include_top=False, input_shape= (128, 128, 3))\n", + "base_model.trainable = True\n", + "\n", + "print(\"ResNet50 base model loaded.\")\n", + "print(f\"Base model output shape: {base_model.output_shape}\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "ResNet50 base model loaded.\n", + "Base model output shape: (None, 4, 4, 2048)\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d2bd7ce8" + }, + "source": [ + "## Build the classifier model\n", + "\n", + "### Subtask:\n", + "Add a global average pooling layer and a dense classification layer on top of the ResNet50 backbone.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5349d77c" + }, + "source": [ + "**Reasoning**:\n", + "Add the classification layers on top of the ResNet50 base model and create the final model.\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "e67685d6", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 397 + }, + "outputId": "2e7fbc5d-9dd7-4017-cf29-84575b238080" + }, + "source": [ + "from keras.layers import GlobalAveragePooling2D, Dense, Input, UpSampling2D, Conv2D, BatchNormalization\n", + "from keras.models import Model\n", + "\n", + "# Get the output tensor from the base_model\n", + "inputs = Input(shape=(32, 32, 3))\n", + "x = UpSampling2D(size=(4,4))(inputs)\n", + "x = base_model(x)\n", + "\n", + "# x = Conv2D(filters=256, kernel_size=(3,3), strides=(2,2))(x)\n", + "\n", + "# Add a GlobalAveragePooling2D layer\n", + "x = GlobalAveragePooling2D()(x)\n", + "x = Dense(256, activation='relu')(x)\n", + "x = BatchNormalization()(x)\n", + "\n", + "predictions = Dense(num_classes, activation='softmax')(x)\n", + "\n", + "# Create the final model\n", + "model = Model(inputs=inputs, outputs=predictions)\n", + "\n", + "# Print the model summary\n", + "model.summary()" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "\u001b[1mModel: \"functional\"\u001b[0m\n" + ], + "text/html": [ + "
Model: \"functional\"\n",
+              "
\n" + ] + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n", + "┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n", + "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n", + "│ input_layer_1 (\u001b[38;5;33mInputLayer\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m, \u001b[38;5;34m32\u001b[0m, \u001b[38;5;34m3\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n", + "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", + "│ up_sampling2d (\u001b[38;5;33mUpSampling2D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m128\u001b[0m, \u001b[38;5;34m128\u001b[0m, \u001b[38;5;34m3\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n", + "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", + "│ resnet50 (\u001b[38;5;33mFunctional\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m4\u001b[0m, \u001b[38;5;34m4\u001b[0m, \u001b[38;5;34m2048\u001b[0m) │ \u001b[38;5;34m23,587,712\u001b[0m │\n", + "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", + "│ global_average_pooling2d │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m2048\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n", + "│ (\u001b[38;5;33mGlobalAveragePooling2D\u001b[0m) │ │ │\n", + "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", + "│ dense (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m256\u001b[0m) │ \u001b[38;5;34m524,544\u001b[0m │\n", + "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", + "│ batch_normalization │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m256\u001b[0m) │ \u001b[38;5;34m1,024\u001b[0m │\n", + "│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n", + "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", + "│ dense_1 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m100\u001b[0m) │ \u001b[38;5;34m25,700\u001b[0m │\n", + "└─────────────────────────────────┴────────────────────────┴───────────────┘\n" + ], + "text/html": [ + "
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
+              "┃ Layer (type)                     Output Shape                  Param # ┃\n",
+              "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
+              "│ input_layer_1 (InputLayer)      │ (None, 32, 32, 3)      │             0 │\n",
+              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
+              "│ up_sampling2d (UpSampling2D)    │ (None, 128, 128, 3)    │             0 │\n",
+              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
+              "│ resnet50 (Functional)           │ (None, 4, 4, 2048)     │    23,587,712 │\n",
+              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
+              "│ global_average_pooling2d        │ (None, 2048)           │             0 │\n",
+              "│ (GlobalAveragePooling2D)        │                        │               │\n",
+              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
+              "│ dense (Dense)                   │ (None, 256)            │       524,544 │\n",
+              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
+              "│ batch_normalization             │ (None, 256)            │         1,024 │\n",
+              "│ (BatchNormalization)            │                        │               │\n",
+              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
+              "│ dense_1 (Dense)                 │ (None, 100)            │        25,700 │\n",
+              "└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
+              "
\n" + ] + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m24,138,980\u001b[0m (92.08 MB)\n" + ], + "text/html": [ + "
 Total params: 24,138,980 (92.08 MB)\n",
+              "
\n" + ] + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m24,085,348\u001b[0m (91.88 MB)\n" + ], + "text/html": [ + "
 Trainable params: 24,085,348 (91.88 MB)\n",
+              "
\n" + ] + }, + "metadata": {} + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m53,632\u001b[0m (209.50 KB)\n" + ], + "text/html": [ + "
 Non-trainable params: 53,632 (209.50 KB)\n",
+              "
\n" + ] + }, + "metadata": {} + } + ] + }, + { + "cell_type": "code", + "source": [ + "# base_model.summary()\n", + "model.load_weights(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")" + ], + "metadata": { + "id": "vpEcuJfN2Hqv", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 426 + }, + "outputId": "a62863e5-1f96-439b-b7bf-5fc52fdd8af4" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "error", + "ename": "ValueError", + "evalue": "A total of 1 objects could not be loaded. Example error message for object :\n\nLayer 'batch_normalization' expected 4 variables, but received 0 variables during loading. Expected: ['gamma', 'beta', 'moving_mean', 'moving_variance']\n\nList of objects that could not be loaded:\n[]", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipython-input-3650302260.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# base_model.summary()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0;31m# To get the full stack trace, call:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 121\u001b[0m \u001b[0;31m# `keras.config.disable_traceback_filtering()`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 122\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwith_traceback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfiltered_tb\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 123\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 124\u001b[0m \u001b[0;32mdel\u001b[0m \u001b[0mfiltered_tb\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/saving/saving_lib.py\u001b[0m in \u001b[0;36m_raise_loading_failure\u001b[0;34m(error_msgs, warn_only)\u001b[0m\n\u001b[1;32m 643\u001b[0m \u001b[0mwarnings\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwarn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 644\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 645\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 646\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 647\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mValueError\u001b[0m: A total of 1 objects could not be loaded. Example error message for object :\n\nLayer 'batch_normalization' expected 4 variables, but received 0 variables during loading. Expected: ['gamma', 'beta', 'moving_mean', 'moving_variance']\n\nList of objects that could not be loaded:\n[]" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "93a35201" + }, + "source": [ + "## Compile the model\n", + "\n", + "### Subtask:\n", + "Compile the model with an appropriate optimizer, loss function, and metrics.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "12a5076e" + }, + "source": [ + "**Reasoning**:\n", + "Compile the model with the specified optimizer, loss function, and metrics.\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "7597426d", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "d44e2010-71e3-4bcf-8037-89f2e9d61a23" + }, + "source": [ + "from keras.optimizers import Adam, SGD\n", + "from keras.losses import CategoricalCrossentropy\n", + "from keras.metrics import CategoricalAccuracy\n", + "\n", + "# Choose and instantiate the optimizer\n", + "optimizer = Adam(learning_rate=1e-4, )\n", + "\n", + "# Choose and instantiate the loss function\n", + "loss_fn = CategoricalCrossentropy()\n", + "\n", + "# Choose and instantiate the metric\n", + "metrics = [CategoricalAccuracy()]\n", + "\n", + "# Compile the model\n", + "model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics)\n", + "\n", + "print(\"Model compiled successfully.\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Model compiled successfully.\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "826ac27b" + }, + "source": [ + "## Train the model\n", + "\n", + "### Subtask:\n", + "Train the model on the preprocessed CIFAR-100 training data.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ded6c771" + }, + "source": [ + "**Reasoning**:\n", + "Train the compiled model using the fit method on the training data and validate on the test data.\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "eafc6cc5", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "9c88309c-216b-433f-a7c2-033e55049e7d" + }, + "source": [ + "from keras.callbacks import EarlyStopping\n", + "\n", + "epochs = 200\n", + "# Add early stopping\n", + "early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)\n", + "\n", + "history = model.fit(x_train, y_train, epochs=epochs, validation_data=(x_test, y_test),batch_size=800, callbacks=[early_stopping])\n", + "\n", + "print(f\"Model trained for {epochs} epochs.\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Epoch 1/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m154s\u001b[0m 1s/step - categorical_accuracy: 0.3758 - loss: 2.7659 - val_categorical_accuracy: 0.0099 - val_loss: 4.7074\n", + "Epoch 2/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.7463 - loss: 0.9840 - val_categorical_accuracy: 0.0103 - val_loss: 4.8237\n", + "Epoch 3/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9000 - loss: 0.4333 - val_categorical_accuracy: 0.0100 - val_loss: 5.0798\n", + "Epoch 4/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9775 - loss: 0.1587 - val_categorical_accuracy: 0.0100 - val_loss: 5.1278\n", + "Epoch 5/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9965 - loss: 0.0558 - val_categorical_accuracy: 0.0117 - val_loss: 5.3401\n", + "Epoch 6/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9991 - loss: 0.0250 - val_categorical_accuracy: 0.0146 - val_loss: 5.3616\n", + "Epoch 7/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9996 - loss: 0.0147 - val_categorical_accuracy: 0.0219 - val_loss: 5.2611\n", + "Epoch 8/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9997 - loss: 0.0103 - val_categorical_accuracy: 0.0384 - val_loss: 5.1634\n", + "Epoch 9/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9998 - loss: 0.0077 - val_categorical_accuracy: 0.0699 - val_loss: 4.9677\n", + "Epoch 10/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0061 - val_categorical_accuracy: 0.1234 - val_loss: 4.6203\n", + "Epoch 11/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 254ms/step - categorical_accuracy: 0.9997 - loss: 0.0051 - val_categorical_accuracy: 0.2005 - val_loss: 4.0977\n", + "Epoch 12/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 254ms/step - categorical_accuracy: 0.9997 - loss: 0.0046 - val_categorical_accuracy: 0.3009 - val_loss: 3.4119\n", + "Epoch 13/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 254ms/step - categorical_accuracy: 0.9998 - loss: 0.0037 - val_categorical_accuracy: 0.4052 - val_loss: 2.8126\n", + "Epoch 14/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0033 - val_categorical_accuracy: 0.4918 - val_loss: 2.3466\n", + "Epoch 15/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0029 - val_categorical_accuracy: 0.5530 - val_loss: 2.0029\n", + "Epoch 16/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9998 - loss: 0.0026 - val_categorical_accuracy: 0.6045 - val_loss: 1.7305\n", + "Epoch 17/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0023 - val_categorical_accuracy: 0.6521 - val_loss: 1.5021\n", + "Epoch 18/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0021 - val_categorical_accuracy: 0.6901 - val_loss: 1.3324\n", + "Epoch 19/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9998 - loss: 0.0020 - val_categorical_accuracy: 0.7139 - val_loss: 1.2191\n", + "Epoch 20/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0019 - val_categorical_accuracy: 0.7291 - val_loss: 1.1580\n", + "Epoch 21/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0017 - val_categorical_accuracy: 0.7398 - val_loss: 1.1306\n", + "Epoch 22/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9998 - loss: 0.0015 - val_categorical_accuracy: 0.7418 - val_loss: 1.1207\n", + "Epoch 23/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9997 - loss: 0.0015 - val_categorical_accuracy: 0.7438 - val_loss: 1.1222\n", + "Epoch 24/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9998 - loss: 0.0014 - val_categorical_accuracy: 0.7431 - val_loss: 1.1229\n", + "Epoch 25/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9997 - loss: 0.0014 - val_categorical_accuracy: 0.7444 - val_loss: 1.1300\n", + "Epoch 26/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9998 - loss: 0.0013 - val_categorical_accuracy: 0.7444 - val_loss: 1.1340\n", + "Epoch 27/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9998 - loss: 0.0012 - val_categorical_accuracy: 0.7448 - val_loss: 1.1375\n", + "Epoch 28/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9997 - loss: 0.0012 - val_categorical_accuracy: 0.7449 - val_loss: 1.1414\n", + "Epoch 29/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9997 - loss: 0.0011 - val_categorical_accuracy: 0.7448 - val_loss: 1.1467\n", + "Epoch 30/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9998 - loss: 0.0011 - val_categorical_accuracy: 0.7429 - val_loss: 1.1510\n", + "Epoch 31/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9997 - loss: 9.7342e-04 - val_categorical_accuracy: 0.7432 - val_loss: 1.1534\n", + "Epoch 32/200\n", + "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9998 - loss: 9.3112e-04 - val_categorical_accuracy: 0.7444 - val_loss: 1.1565\n", + "Model trained for 200 epochs.\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "887aa522" + }, + "source": [ + "## Evaluate the model\n", + "\n", + "### Subtask:\n", + "Evaluate the trained model on the preprocessed CIFAR-100 test data." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "67d1be63" + }, + "source": [ + "**Reasoning**:\n", + "Evaluate the trained model on the test dataset using the `evaluate` method." + ] + }, + { + "cell_type": "code", + "source": [ + "model.metrics" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "4F6dwwMbo81N", + "outputId": "d67a2356-e7c8-4635-9c51-5937e10d7e42" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[, ]" + ] + }, + "metadata": {}, + "execution_count": 17 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "65a189cb", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "4b99be46-8f79-44cb-a3cc-320d4630a9d1" + }, + "source": [ + "# Evaluate the model on the test data\n", + "loss, accuracy = model.evaluate(x_test, y_test, verbose=0,)\n", + "\n", + "print(f\"Test Loss: {loss:.4f}\")\n", + "print(f\"Test Accuracy: {accuracy:.4f}\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Test Loss: 1.1207\n", + "Test Accuracy: 0.7420\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "4mjtemwhrFXD" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5400d0b3" + }, + "source": [ + "# Task\n", + "Save the trained Keras model, load it, evaluate the loaded model, prune the loaded model, and evaluate the pruned model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8e39308c" + }, + "source": [ + "## Save the model\n", + "\n", + "### Subtask:\n", + "Save the trained model to a file.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "f731890a" + }, + "source": [ + "**Reasoning**:\n", + "Save the trained Keras model to a file.\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "d3a28c71", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "87dbf45c-9096-4a17-87ac-cd823f556110" + }, + "source": [ + "# model.save(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "# print(\"Model saved successfully.\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Model saved successfully.\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2f67026a" + }, + "source": [ + "## Load the model\n", + "\n", + "### Subtask:\n", + "Load the saved model from the file.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "665c2a00" + }, + "source": [ + "**Reasoning**:\n", + "Load the saved Keras model from the file using `load_model`.\n", + "\n" + ] + }, + { + "cell_type": "code", + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n" + ], + "metadata": { + "id": "RwAPp080l4DU", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 332 + }, + "outputId": "3cef2bef-aef8-449a-c65d-08a9b3e7c29a" + }, + "execution_count": 2, + "outputs": [ + { + "output_type": "error", + "ename": "ImportError", + "evalue": "cannot import name 'PruningConfig' from 'keras.pruning' (/content/keras_repo/keras/api/pruning/__init__.py)", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipython-input-1342103991.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodels\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mload_model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpruning\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mPruningConfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mLnPruning\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mImportError\u001b[0m: cannot import name 'PruningConfig' from 'keras.pruning' (/content/keras_repo/keras/api/pruning/__init__.py)", + "", + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n" + ], + "errorDetails": { + "actions": [ + { + "action": "open_url", + "actionText": "Open Examples", + "url": "/notebooks/snippets/importing_libraries.ipynb" + } + ] + } + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "ab89861a", + "outputId": "3ee300da-4574-4e03-d996-ed51b06b5718" + }, + "source": [ + "\n", + "# Load the saved model\n", + "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "print(\"Model loaded successfully.\")\n", + "\n", + "# Evaluate the loaded model on the test data\n", + "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", + "\n", + "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", + "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Model loaded successfully.\n", + "Loaded Model Test Loss: 1.5565\n", + "Loaded Model Test Accuracy: 0.5668\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "5b169cac", + "outputId": "6f5b283e-8617-4a52-e397-84e8a46ab127" + }, + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# config = PruningConfig(sparsity=0.1, method=\"l1\") # Old API\n", + "stats = loaded_model.prune(sparsity=0.1, method=\"l1\") # New API\n", + "\n", + "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", + "\n", + "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", + "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Loaded Model Test Loss: 3.6504\n", + "Loaded Model Test Accuracy: 0.1479\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# config = PruningConfig(sparsity=0.3, method=\"l1\") # Old API\n", + "stats = loaded_model.prune(sparsity=0.3, method=\"l1\") # New API\n", + "\n", + "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", + "\n", + "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", + "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "RBU4bMx4wU-Y", + "outputId": "f5301edc-c58a-4da7-827f-53cc86a9f39b" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Loaded Model Test Loss: 3.7006\n", + "Loaded Model Test Accuracy: 0.1382\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# config = PruningConfig(sparsity=0.5, method=\"l1\") # Old API\n", + "stats = loaded_model.prune(sparsity=0.5, method=\"l1\") # New API\n", + "\n", + "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", + "\n", + "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", + "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "kHbuTCBbw3en", + "outputId": "28462d57-4d31-4584-e547-87c4dd31df48" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Loaded Model Test Loss: 3.7443\n", + "Loaded Model Test Accuracy: 0.1329\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# config = PruningConfig(sparsity=0.7, method=LnPruning(n=4)) # Old API\n", + "stats = loaded_model.prune(sparsity=0.7, method=LnPruning(n=4)) # New API\n", + "\n", + "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", + "\n", + "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", + "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Amz1C_RexHC-", + "outputId": "313241c9-2b13-423b-ead2-8b48d205efe0" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Loaded Model Test Loss: 3.8623\n", + "Loaded Model Test Accuracy: 0.1145\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "loaded_model.loss" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "MvrldtC3k1Uk", + "outputId": "05ac513e-713f-4043-ee3c-ec4b9b5f75a3" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + ", kwargs={'from_logits': False, 'label_smoothing': 0.0, 'axis': -1})>" + ] + }, + "metadata": {}, + "execution_count": 12 + } + ] + }, + { + "cell_type": "code", + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# config = PruningConfig(sparsity=0.7, method=\"saliency\",dataset=(x_train, y_train),loss_fn=loaded_model.loss) # Old API\n", + "stats = loaded_model.prune(sparsity=0.7, method=\"saliency\", dataset=(x_train, y_train), loss_fn=loaded_model.loss) # New API\n", + "\n", + "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=1)\n", + "\n", + "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", + "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "0i2dhj-inUxX", + "outputId": "fa45e8df-977d-4cab-c69a-a4e4583212ec" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1m313/313\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 37ms/step - categorical_accuracy: 0.1457 - loss: 3.6570\n", + "Loaded Model Test Loss: 3.6570\n", + "Loaded Model Test Accuracy: 0.1457\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# config = PruningConfig(sparsity=0.7, method=\"taylor\",dataset=(x_train, y_train),loss_fn=loaded_model.loss) # Old API\n", + "stats = loaded_model.prune(sparsity=0.7, method=\"taylor\", dataset=(x_train, y_train), loss_fn=loaded_model.loss) # New API\n", + "\n", + "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=1)\n", + "\n", + "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", + "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "nQa4k63uhC2M", + "outputId": "e19ae0e9-dab4-4168-d367-30b0ad853b7b" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[1m313/313\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 37ms/step - categorical_accuracy: 0.1484 - loss: 3.6519\n", + "Loaded Model Test Loss: 3.6519\n", + "Loaded Model Test Accuracy: 0.1484\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "stats" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "qUtS1rZmqv19", + "outputId": "f1459330-7833-45fb-afd8-a7004c555f93" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'initial_sparsity': 0.00020536343876534775,\n", + " 'final_sparsity': 0.699755017773933,\n", + " 'pruned_layers': 2,\n", + " 'target_sparsity': 0.7,\n", + " 'method': 'taylor'}" + ] + }, + "metadata": {}, + "execution_count": 15 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "48fdb8de", + "outputId": "c5560c6e-f6c0-4343-9d2c-2dbd02c5c6f3" + }, + "source": [ + "with open(\"/content/PRUNING_DESIGN.md\", \"r\") as f:\n", + " print(f.read())" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "# Keras Model Pruning: Design Documentation\n", + "\n", + "## Table of Contents\n", + "- [Overview](#overview)\n", + "- [Architecture](#architecture)\n", + "- [Core Components](#core-components)\n", + "- [API Design](#api-design)\n", + "- [Pruning Methods](#pruning-methods)\n", + "- [Layer Selection System](#layer-selection-system)\n", + "- [Analysis and Verification Tools](#analysis-and-verification-tools)\n", + "- [Usage Examples](#usage-examples)\n", + "- [Implementation Details](#implementation-details)\n", + "- [Design Decisions](#design-decisions)\n", + "\n", + "## Overview\n", + "\n", + "The Keras Model Pruning system provides a comprehensive framework for reducing neural network model size by removing unnecessary weights while maintaining performance. The system supports multiple pruning algorithms, flexible layer selection, and extensive analysis tools.\n", + "\n", + "### Key Features\n", + "\n", + "- **Multiple Pruning Methods**: L1/L2 magnitude, structured, saliency-based, and Taylor expansion methods\n", + "- **Flexible Layer Selection**: Support for exact names, regex patterns, and mixed specifications\n", + "- **Direct Parameter API**: No configuration objects required - use parameters directly\n", + "- **Comprehensive Analysis**: Sparsity verification and performance benchmarking tools\n", + "- **Training Integration**: Callbacks for gradual pruning during training\n", + "- **Backend Agnostic**: Works with TensorFlow, JAX, and PyTorch backends\n", + "\n", + "## Architecture\n", + "\n", + "```\n", + "keras.src.pruning/\n", + "├── core.py # Core pruning logic and orchestration\n", + "├── pruning_method.py # Abstract base and concrete pruning methods\n", + "├── pruning_schedule.py # Scheduling for gradual pruning\n", + "├── pruning_utils.py # Analysis and verification utilities\n", + "├── config.py # Legacy configuration (deprecated)\n", + "└── __init__.py # Public API exports\n", + "\n", + "keras.src.callbacks/\n", + "└── pruning.py # Training callbacks for gradual pruning\n", + "\n", + "keras.src.models/\n", + "└── model.py # Model.prune() method integration\n", + "```\n", + "\n", + "### Component Relationships\n", + "\n", + "```mermaid\n", + "graph TD\n", + " A[Model.prune()] --> B[apply_pruning_to_model()]\n", + " B --> C[apply_pruning_to_layer()]\n", + " C --> D[PruningMethod.compute_mask()]\n", + " D --> E[Backend-specific implementation]\n", + " \n", + " F[PruningCallback] --> B\n", + " G[Analysis Tools] --> H[analyze_sparsity()]\n", + " G --> I[benchmark_inference()]\n", + " \n", + " J[Layer Selection] --> K[match_layers_by_patterns()]\n", + " K --> L[Regex Pattern Matching]\n", + "```\n", + "\n", + "## Core Components\n", + "\n", + "### 1. PruningMethod Base Class\n", + "\n", + "Abstract base class defining the pruning interface:\n", + "\n", + "```python\n", + "class PruningMethod(abc.ABC):\n", + " @abc.abstractmethod\n", + " def compute_mask(self, weights, sparsity_ratio, **kwargs):\n", + " \"\"\"Compute binary mask indicating which weights to prune.\"\"\"\n", + " pass\n", + " \n", + " def apply_mask(self, weights, mask):\n", + " \"\"\"Apply pruning mask to weights.\"\"\"\n", + " return weights * ops.cast(mask, weights.dtype)\n", + "```\n", + "\n", + "### 2. Core Pruning Functions\n", + "\n", + "#### `apply_pruning_to_model()`\n", + "Main orchestration function that applies pruning to selected layers:\n", + "\n", + "```python\n", + "def apply_pruning_to_model(model, sparsity, method=\"l1\", layers_to_prune=None, \n", + " dataset=None, loss_fn=None, **kwargs):\n", + " \"\"\"Apply pruning to specified layers in a model.\"\"\"\n", + "```\n", + "\n", + "#### `should_prune_layer()`\n", + "Determines if a layer should be pruned based on type and selection criteria:\n", + "\n", + "```python\n", + "def should_prune_layer(layer, layers_to_prune=None):\n", + " \"\"\"Determine if layer should be pruned based on type and selection.\"\"\"\n", + "```\n", + "\n", + "### 3. Layer Selection System\n", + "\n", + "#### Pattern Matching\n", + "The system supports flexible layer selection:\n", + "\n", + "- **Exact Names**: `[\"dense_1\", \"conv2d_3\"]`\n", + "- **Regex Patterns**: `[\"dense_.*\", \"conv2d_[0-9]\"]`\n", + "- **Mixed Specifications**: `[\"dense_input\", \"conv.*\", \"dense_hidden_2\"]`\n", + "- **Single String**: `\"dense_hidden_.*\"`\n", + "\n", + "#### Implementation\n", + "```python\n", + "def match_layers_by_patterns(model, patterns):\n", + " \"\"\"Find layers matching given patterns using exact and regex matching.\"\"\"\n", + "```\n", + "\n", + "## API Design\n", + "\n", + "### New Direct Parameter API\n", + "\n", + "The modern API accepts parameters directly without requiring configuration objects:\n", + "\n", + "```python\n", + "# Model pruning\n", + "model.prune(\n", + " sparsity=0.5, # Target sparsity level\n", + " method=\"l1\", # Pruning method\n", + " layers_to_prune=[\"dense_.*\"], # Layer selection (optional)\n", + " dataset=(x, y), # For gradient methods (optional)\n", + " loss_fn=\"mse\" # Loss function (optional)\n", + ")\n", + "\n", + "# Training callbacks\n", + "callback = keras.callbacks.PruningCallback(\n", + " sparsity=0.7,\n", + " method=\"structured\", \n", + " layers_to_prune=[\"conv.*\"],\n", + " start_step=100,\n", + " end_step=500,\n", + " frequency=50\n", + ")\n", + "```\n", + "\n", + "### Legacy Configuration API (Deprecated)\n", + "\n", + "For backwards compatibility, the old config-based API is still supported:\n", + "\n", + "```python\n", + "config = PruningConfig(sparsity=0.5, method=\"l1\")\n", + "model.prune(config=config) # Deprecated - issues warning\n", + "```\n", + "\n", + "## Pruning Methods\n", + "\n", + "### 1. Magnitude-Based Methods\n", + "\n", + "#### L1 Pruning\n", + "Prunes weights with smallest absolute values:\n", + "- **Formula**: Sort by `|w|`, remove smallest\n", + "- **Use Case**: General purpose, fast, no data required\n", + "- **Structured Option**: Can prune entire channels/filters\n", + "\n", + "```python\n", + "model.prune(sparsity=0.5, method=\"l1\") # Unstructured\n", + "model.prune(sparsity=0.3, method=\"l1_structured\") # Structured\n", + "```\n", + "\n", + "#### Ln Pruning\n", + "Generalizes to any norm order:\n", + "- **Formula**: Sort by `|w|^n`, configurable norm order\n", + "- **Use Case**: Research, experimentation with different norms\n", + "\n", + "```python\n", + "model.prune(sparsity=0.4, method=\"l2\") # L2 norm\n", + "```\n", + "\n", + "### 2. Gradient-Based Methods\n", + "\n", + "#### Saliency Pruning (First-Order)\n", + "Uses gradients to estimate weight importance:\n", + "- **Formula**: `|∂L/∂w × w|`\n", + "- **Mathematical Basis**: First-order Taylor approximation of loss change\n", + "- **Requirements**: Model, dataset, loss function\n", + "- **Backend Support**: TensorFlow (optimized), JAX/PyTorch (planned)\n", + "\n", + "```python\n", + "model.prune(\n", + " sparsity=0.4,\n", + " method=\"saliency\",\n", + " dataset=(x_sample, y_sample),\n", + " loss_fn=\"categorical_crossentropy\"\n", + ")\n", + "```\n", + "\n", + "#### Taylor Pruning (Second-Order)\n", + "Uses second-order approximation for better accuracy:\n", + "- **Formula**: `|∂L/∂w × w| + 0.5 × |H_ii × w²|`\n", + "- **Mathematical Basis**: Second-order Taylor approximation\n", + "- **Hessian Approximation**: Uses `(∂L/∂w)²` for computational efficiency\n", + "- **Requirements**: Model, dataset, loss function\n", + "\n", + "```python\n", + "model.prune(\n", + " sparsity=0.3,\n", + " method=\"taylor\",\n", + " dataset=(x_sample, y_sample),\n", + " loss_fn=\"mse\"\n", + ")\n", + "```\n", + "\n", + "### 3. Structured Methods\n", + "\n", + "#### Structured Pruning\n", + "Removes entire channels/filters based on L2 norms:\n", + "- **Formula**: Sort channels by `√(Σw²)`, remove smallest\n", + "- **Advantage**: Reduces model size and computation\n", + "- **Use Case**: Deployment optimization, hardware acceleration\n", + "\n", + "```python\n", + "model.prune(sparsity=0.4, method=\"structured\")\n", + "```\n", + "\n", + "### 4. Research Methods\n", + "\n", + "#### Random Pruning\n", + "Randomly selects weights to prune:\n", + "- **Use Case**: Baseline for research comparisons\n", + "- **Formula**: Random selection with specified sparsity\n", + "\n", + "```python\n", + "model.prune(sparsity=0.5, method=\"random\", seed=42)\n", + "```\n", + "\n", + "## Layer Selection System\n", + "\n", + "### Selection Criteria\n", + "\n", + "The system allows fine-grained control over which layers to prune:\n", + "\n", + "#### 1. All Eligible Layers (Default)\n", + "```python\n", + "model.prune(sparsity=0.5) # Prunes all Dense, Conv1D, Conv2D, Conv3D layers\n", + "```\n", + "\n", + "#### 2. Exact Layer Names\n", + "```python\n", + "model.prune(\n", + " sparsity=0.4, \n", + " layers_to_prune=[\"dense_1\", \"dense_2\", \"conv2d_features\"]\n", + ")\n", + "```\n", + "\n", + "#### 3. Regex Patterns\n", + "```python\n", + "model.prune(\n", + " sparsity=0.3,\n", + " layers_to_prune=[\n", + " \"dense_hidden_.*\", # All dense_hidden_* layers\n", + " \"conv2d_[0-9]+\", # conv2d_1, conv2d_2, etc.\n", + " \".*_features\" # Any layer ending with _features\n", + " ]\n", + ")\n", + "```\n", + "\n", + "#### 4. Mixed Specifications\n", + "```python\n", + "model.prune(\n", + " sparsity=0.6,\n", + " layers_to_prune=[\n", + " \"input_layer\", # Exact name\n", + " \"conv.*\", # Regex pattern\n", + " \"dense_output\" # Another exact name\n", + " ]\n", + ")\n", + "```\n", + "\n", + "### Pattern Matching Implementation\n", + "\n", + "The layer selection system uses a two-stage matching process:\n", + "\n", + "1. **Exact Match**: First tries exact string comparison\n", + "2. **Regex Match**: Falls back to regex pattern matching if exact match fails\n", + "\n", + "This ensures maximum flexibility while maintaining performance.\n", + "\n", + "## Analysis and Verification Tools\n", + "\n", + "### Sparsity Analysis\n", + "\n", + "#### `analyze_sparsity(model, layer_names=None)`\n", + "Provides detailed sparsity statistics:\n", + "\n", + "```python\n", + "stats = analyze_sparsity(model)\n", + "print(f\"Overall sparsity: {stats['overall_sparsity']:.3f}\")\n", + "print(f\"Layers analyzed: {stats['layers_analyzed']}\")\n", + "\n", + "# Analyze specific layer groups\n", + "hidden_stats = analyze_sparsity(model, layer_names=[\"dense_hidden_.*\"])\n", + "```\n", + "\n", + "#### `compare_sparsity(model_before, model_after)`\n", + "Compares sparsity between two models:\n", + "\n", + "```python\n", + "comparison = compare_sparsity(original_model, pruned_model)\n", + "print_sparsity_report(comparison)\n", + "```\n", + "\n", + "### Performance Benchmarking\n", + "\n", + "#### `benchmark_inference(model, test_data)`\n", + "Measures inference performance with statistical analysis:\n", + "\n", + "```python\n", + "benchmark = benchmark_inference(model, test_data, num_iterations=100)\n", + "print(f\"Mean time: {benchmark['mean_time']*1000:.3f} ms\")\n", + "print(f\"Throughput: {benchmark['throughput_samples_per_sec']:.1f} samples/sec\")\n", + "```\n", + "\n", + "#### `compare_inference_speed(model_before, model_after, test_data)`\n", + "Compares performance improvements:\n", + "\n", + "```python\n", + "comparison = compare_inference_speed(original_model, pruned_model, test_data)\n", + "print(f\"Speedup: {comparison['improvements']['speedup_factor']:.3f}x\")\n", + "print(f\"Time reduction: {comparison['improvements']['time_reduction_percent']:.2f}%\")\n", + "```\n", + "\n", + "### Complete Analysis\n", + "\n", + "#### `complete_pruning_analysis()`\n", + "Runs comprehensive analysis combining sparsity and performance metrics:\n", + "\n", + "```python\n", + "analysis = complete_pruning_analysis(\n", + " model_before=original_model,\n", + " model_after=pruned_model,\n", + " test_data=test_batch\n", + ")\n", + "# Automatically prints detailed reports\n", + "```\n", + "\n", + "## Usage Examples\n", + "\n", + "### Basic Usage\n", + "\n", + "```python\n", + "# Simple L1 pruning\n", + "model.prune(sparsity=0.5, method=\"l1\")\n", + "\n", + "# Structured pruning for deployment\n", + "model.prune(sparsity=0.3, method=\"structured\")\n", + "```\n", + "\n", + "### Advanced Usage\n", + "\n", + "```python\n", + "# Selective gradient-based pruning\n", + "model.prune(\n", + " sparsity=0.4,\n", + " method=\"saliency\",\n", + " layers_to_prune=[\"conv.*\", \"dense_hidden_.*\"],\n", + " dataset=(x_train[:100], y_train[:100]),\n", + " loss_fn=\"categorical_crossentropy\"\n", + ")\n", + "```\n", + "\n", + "### Training Integration\n", + "\n", + "```python\n", + "# Gradual pruning during training\n", + "callback = keras.callbacks.PruningCallback(\n", + " sparsity=0.8,\n", + " method=\"l1\",\n", + " layers_to_prune=[\"dense_.*\"],\n", + " start_step=100,\n", + " end_step=1000,\n", + " frequency=50,\n", + " schedule=\"polynomial\"\n", + ")\n", + "\n", + "model.fit(x, y, callbacks=[callback])\n", + "```\n", + "\n", + "### Analysis and Verification\n", + "\n", + "```python\n", + "# Complete analysis workflow\n", + "analysis = complete_pruning_analysis(\n", + " model_before=original,\n", + " model_after=pruned,\n", + " test_data=x_test[:32],\n", + " layer_names=[\"dense_.*\"] # Focus on specific layers\n", + ")\n", + "\n", + "# Custom analysis\n", + "sparsity_stats = analyze_sparsity(pruned_model, layer_names=[\"conv.*\"])\n", + "performance_stats = benchmark_inference(pruned_model, test_data)\n", + "```\n", + "\n", + "## Implementation Details\n", + "\n", + "### Backend Integration\n", + "\n", + "#### TensorFlow Backend (Optimized)\n", + "- Uses `GradientTape` for efficient gradient computation\n", + "- Batch processing for memory efficiency\n", + "- GPU acceleration support\n", + "\n", + "#### JAX/PyTorch Backends\n", + "- Uses backend-specific autodiff systems\n", + "- Clear error messages for unsupported methods\n", + "- Fallback to magnitude methods when gradients unavailable\n", + "\n", + "### Memory Management\n", + "\n", + "#### Gradient Computation\n", + "- Limits batch size to prevent OOM (default: 32 samples)\n", + "- Random sampling for large datasets\n", + "- Efficient tensor operations\n", + "\n", + "#### Model Cloning\n", + "- Supports deep copying for comparisons\n", + "- Preserves model architecture and compilation state\n", + "- Handles weight sharing correctly\n", + "\n", + "### Error Handling\n", + "\n", + "#### Clear Error Messages\n", + "- Specific requirements for each method\n", + "- Backend compatibility information\n", + "- Helpful suggestions for alternatives\n", + "\n", + "#### Graceful Degradation\n", + "- Falls back to magnitude methods when gradients fail\n", + "- Continues with available layers if some are incompatible\n", + "- Provides detailed statistics about what was processed\n", + "\n", + "## Design Decisions\n", + "\n", + "### 1. Direct Parameters vs Configuration Objects\n", + "\n", + "**Decision**: Moved from configuration objects to direct parameters\n", + "**Rationale**: \n", + "- Simpler API - no need to create config objects\n", + "- More intuitive for users\n", + "- Better IDE support with parameter hints\n", + "- Maintains backwards compatibility\n", + "\n", + "### 2. Layer Selection System\n", + "\n", + "**Decision**: Implemented flexible pattern matching with exact names and regex\n", + "**Rationale**:\n", + "- Complex architectures need fine-grained control\n", + "- Regex patterns enable powerful batch selection\n", + "- Mixed specifications provide maximum flexibility\n", + "- Performance optimized with two-stage matching\n", + "\n", + "### 3. Gradient Method Implementation\n", + "\n", + "**Decision**: Backend-specific implementations with clear error handling\n", + "**Rationale**:\n", + "- Different backends have different optimal approaches\n", + "- TensorFlow optimization provides significant speedup\n", + "- Clear errors better than incorrect fallbacks\n", + "- Maintains mathematical correctness\n", + "\n", + "### 4. Analysis Tool Design\n", + "\n", + "**Decision**: Comprehensive analysis suite with filtering capabilities\n", + "**Rationale**:\n", + "- Users need to verify actual vs target sparsity\n", + "- Performance measurement is crucial for deployment\n", + "- Layer-specific analysis enables targeted optimization\n", + "- Detailed reporting aids debugging and optimization\n", + "\n", + "### 5. Mathematical Correctness\n", + "\n", + "**Decision**: Prioritize mathematical accuracy over convenience\n", + "**Rationale**:\n", + "- Incorrect methods can mislead users\n", + "- Research applications need theoretical soundness\n", + "- Clear documentation of approximations and limitations\n", + "- Honest error reporting when methods don't work\n", + "\n", + "## Future Enhancements\n", + "\n", + "### Planned Features\n", + "\n", + "1. **Extended Backend Support**\n", + " - Full JAX gradient implementation\n", + " - PyTorch gradient optimization\n", + " - Custom gradient computation hooks\n", + "\n", + "2. **Advanced Pruning Methods**\n", + " - SNIP (Single-shot Network Pruning)\n", + " - LAMP (Layer-Adaptive Sparsity)\n", + " - Lottery Ticket Hypothesis support\n", + "\n", + "3. **Deployment Optimizations**\n", + " - Sparse tensor format support\n", + " - Hardware-specific optimizations\n", + " - Quantization integration\n", + "\n", + "4. **Enhanced Analysis**\n", + " - Model accuracy impact analysis\n", + " - Layer importance ranking\n", + " - Pruning sensitivity analysis\n", + "\n", + "### Research Integration\n", + "\n", + "The framework is designed to support research with:\n", + "- Extensible method base classes\n", + "- Comprehensive analysis tools\n", + "- Mathematical correctness verification\n", + "- Easy integration of new algorithms\n", + "\n", + "This design documentation reflects the current state of the Keras Model Pruning system, emphasizing practical usability while maintaining research-grade mathematical rigor and flexibility.\n", + "\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "PSaoe8pCHRLj" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "982c8923", + "outputId": "9ed91c25-ec7e-4396-a5af-62d8a5b39e07" + }, + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "# Load the saved model\n", + "loaded_model_l1_30 = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# Apply L1 pruning with 30% sparsity\n", + "# config_l1_30 = PruningConfig(sparsity=0.3, method=\"l1\") # Old API\n", + "stats_l1_30 = loaded_model_l1_30.prune(sparsity=0.3, method=\"l1\") # New API\n", + "\n", + "print(\"\\n--- L1 Pruning (30% sparsity) Results ---\")\n", + "print(f\"Initial Sparsity: {stats_l1_30['initial_sparsity']:.4f}\")\n", + "print(f\"Final Sparsity: {stats_l1_30['final_sparsity']:.4f}\")\n", + "print(f\"Target Sparsity: {stats_l1_30['target_sparsity']:.4f}\")\n", + "print(f\"Pruning Method: {stats_l1_30['method']}\")\n", + "print(f\"Pruned Layers: {stats_l1_30['pruned_layers']}\")\n", + "\n", + "\n", + "# Evaluate the pruned model\n", + "loss_l1_30, accuracy_l1_30 = loaded_model_l1_30.evaluate(x_test, y_test, verbose=0)\n", + "print(f\"L1 Pruned Model Test Loss: {loss_l1_30:.4f}\")\n", + "print(f\"L1 Pruned Model Test Accuracy: {accuracy_l1_30:.4f}\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "--- L1 Pruning (30% sparsity) Results ---\n", + "Initial Sparsity: 0.0002\n", + "Final Sparsity: 0.3000\n", + "Target Sparsity: 0.3000\n", + "Pruning Method: l1\n", + "Pruned Layers: 2\n", + "L1 Pruned Model Test Loss: 3.7006\n", + "L1 Pruned Model Test Accuracy: 0.1382\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "00c0b0f1", + "outputId": "983b743a-3ea4-45e5-a987-f6f6a1bbfe19" + }, + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "# Load the saved model\n", + "loaded_model_l2_50 = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# Apply L2 pruning with 50% sparsity\n", + "# config_l2_50 = PruningConfig(sparsity=0.5, method=\"l2\") # Old API\n", + "stats_l2_50 = loaded_model_l2_50.prune(sparsity=0.5, method=\"l2\") # New API\n", + "\n", + "print(\"\\n--- L2 Pruning (50% sparsity) Results ---\")\n", + "print(f\"Initial Sparsity: {stats_l2_50['initial_sparsity']:.4f}\")\n", + "print(f\"Final Sparsity: {stats_l2_50['final_sparsity']:.4f}\")\n", + "print(f\"Target Sparsity: {stats_l2_50['target_sparsity']:.4f}\")\n", + "print(f\"Pruning Method: {stats_l2_50['method']}\")\n", + "print(f\"Pruned Layers: {stats_l2_50['pruned_layers']}\")\n", + "\n", + "# Evaluate the pruned model\n", + "loss_l2_50, accuracy_l2_50 = loaded_model_l2_50.evaluate(x_test, y_test, verbose=0)\n", + "print(f\"L2 Pruned Model Test Loss: {loss_l2_50:.4f}\")\n", + "print(f\"L2 Pruned Model Test Accuracy: {accuracy_l2_50:.4f}\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "--- L2 Pruning (50% sparsity) Results ---\n", + "Initial Sparsity: 0.0002\n", + "Final Sparsity: 0.4999\n", + "Target Sparsity: 0.5000\n", + "Pruning Method: l2\n", + "Pruned Layers: 2\n", + "L2 Pruned Model Test Loss: 3.7443\n", + "L2 Pruned Model Test Accuracy: 0.1329\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "a7407947", + "outputId": "2d137af4-1c7f-418f-af2e-e69a5c702146" + }, + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "# Load the saved model\n", + "loaded_model_structured_40 = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# Apply Structured pruning with 40% sparsity\n", + "# config_structured_40 = PruningConfig(sparsity=0.4, method=\"structured\") # Old API\n", + "stats_structured_40 = loaded_model_structured_40.prune(sparsity=0.4, method=\"structured\") # New API\n", + "\n", + "\n", + "print(\"\\n--- Structured Pruning (40% sparsity) Results ---\")\n", + "print(f\"Initial Sparsity: {stats_structured_40['initial_sparsity']:.4f}\")\n", + "print(f\"Final Sparsity: {stats_structured_40['final_sparsity']:.4f}\")\n", + "print(f\"Target Sparsity: {stats_structured_40['target_sparsity']:.4f}\")\n", + "print(f\"Pruning Method: {stats_structured_40['method']}\")\n", + "print(f\"Pruned Layers: {stats_structured_40['pruned_layers']}\")\n", + "\n", + "# Evaluate the pruned model\n", + "loss_structured_40, accuracy_structured_40 = loaded_model_structured_40.evaluate(x_test, y_test, verbose=0)\n", + "print(f\"Structured Pruned Model Test Loss: {loss_structured_40:.4f}\")\n", + "print(f\"Structured Pruned Model Test Accuracy: {accuracy_structured_40:.4f}\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "--- Structured Pruning (40% sparsity) Results ---\n", + "Initial Sparsity: 0.0002\n", + "Final Sparsity: 0.4026\n", + "Target Sparsity: 0.4000\n", + "Pruning Method: structured\n", + "Pruned Layers: 2\n", + "Structured Pruned Model Test Loss: 3.9227\n", + "Structured Pruned Model Test Accuracy: 0.1330\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 384 + }, + "id": "e7d97366", + "outputId": "6d2193aa-8544-4623-f94c-c61cef1b5bf0" + }, + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "# Load the saved model\n", + "loaded_model_saliency_60_selective = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# Apply Saliency pruning with 60% sparsity on layers matching \"dense_.*\"\n", + "dataset_subset = (x_train[:1000], y_train[:1000]) # Use a smaller subset\n", + "loss_fn = loaded_model_saliency_60_selective.loss\n", + "\n", + "# config_saliency_60_selective = PruningConfig( # Old API\n", + "# sparsity=0.6,\n", + "# method=\"saliency\",\n", + "# layers_to_prune=[\"dense_.*\"], # Select layers by regex\n", + "# dataset=dataset_subset,\n", + "# loss_fn=loss_fn\n", + "# )\n", + "stats_saliency_60_selective = loaded_model_saliency_60_selective.prune( # New API\n", + " sparsity=0.6,\n", + " method=\"saliency\",\n", + " layers_to_prune=[\"dense_.*\"], # Select layers by regex\n", + " dataset=dataset_subset,\n", + " loss_fn=loss_fn\n", + ")\n", + "\n", + "print(\"\\n--- Saliency Pruning (60% sparsity, selective) Results ---\")\n", + "print(f\"Initial Sparsity: {stats_saliency_60_selective['initial_sparsity']:.4f}\")\n", + "print(f\"Final Sparsity: {stats_saliency_60_selective['final_sparsity']:.4f}\")\n", + "print(f\"Target Sparsity: {stats_saliency_60_selective['target_sparsity']:.4f}\")\n", + "print(f\"Pruning Method: {stats_saliency_60_selective['method']}\")\n", + "print(f\"Pruned Layers: {stats_saliency_60_selective['pruned_layers']}\")\n", + "\n", + "# Evaluate the pruned model\n", + "loss_saliency_60_selective, accuracy_saliency_60_selective = loaded_model_saliency_60_selective.evaluate(x_test, y_test, verbose=0)\n", + "print(f\"Saliency Pruned Model Test Loss: {loss_saliency_60_selective:.4f}\")\n", + "print(f\"Saliency Pruned Model Test Accuracy: {accuracy_saliency_60_selective:.4f}\")" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "error", + "ename": "ImportError", + "evalue": "cannot import name 'PruningConfig' from 'keras.pruning' (/content/keras_repo/keras/api/pruning/__init__.py)", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipython-input-2319192711.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodels\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mload_model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpruning\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mPruningConfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mLnPruning\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;31m# Load the saved model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mloaded_model_saliency_60_selective\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mImportError\u001b[0m: cannot import name 'PruningConfig' from 'keras.pruning' (/content/keras_repo/keras/api/pruning/__init__.py)", + "", + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n" + ], + "errorDetails": { + "actions": [ + { + "action": "open_url", + "actionText": "Open Examples", + "url": "/notebooks/snippets/importing_libraries.ipynb" + } + ] + } + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "21843d30", + "outputId": "bc60dce0-fc1e-4935-e85e-67a40e8c4bbe" + }, + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import PruningConfig, LnPruning\n", + "\n", + "# Load the saved model\n", + "loaded_model_taylor_70_selective = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# Apply Taylor pruning with 70% sparsity on layers matching \"conv.*\"\n", + "dataset_subset = (x_train[:1000], y_train[:1000]) # Use a smaller subset\n", + "loss_fn = loaded_model_taylor_70_selective.loss\n", + "\n", + "# config_taylor_70_selective = PruningConfig( # Old API\n", + "# sparsity=0.7,\n", + "# method=\"taylor\",\n", + "# layers_to_prune=[\"conv.*\"], # Select layers by regex\n", + "# dataset=dataset_subset,\n", + "# loss_fn=loss_fn\n", + "# )\n", + "stats_taylor_70_selective = loaded_model_taylor_70_selective.prune( # New API\n", + " sparsity=0.7,\n", + " method=\"taylor\",\n", + " layers_to_prune=[\"conv.*\"], # Select layers by regex\n", + " dataset=dataset_subset,\n", + " loss_fn=loss_fn\n", + ")\n", + "\n", + "print(\"\\n--- Taylor Pruning (70% sparsity, selective) Results ---\")\n", + "print(f\"Initial Sparsity: {stats_taylor_70_selective['initial_sparsity']:.4f}\")\n", + "print(f\"Final Sparsity: {stats_taylor_70_selective['final_sparsity']:.4f}\")\n", + "print(f\"Target Sparsity: {stats_taylor_70_selective['target_sparsity']:.4f}\")\n", + "print(f\"Pruning Method: {stats_taylor_70_selective['method']}\")\n", + "print(f\"Pruned Layers: {stats_taylor_70_selective['pruned_layers']}\")\n", + "\n", + "# Evaluate the pruned model\n", + "loss_taylor_70_selective, accuracy_taylor_70_selective = loaded_model_taylor_70_selective.evaluate(x_test, y_test, verbose=0)\n", + "print(f\"Taylor Pruned Model Test Loss: {loss_taylor_70_selective:.4f}\")\n", + "print(f\"Taylor Pruned Model Test Accuracy: {accuracy_taylor_70_selective:.4f}\")" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "--- Taylor Pruning (70% sparsity, selective) Results ---\n", + "Initial Sparsity: 0.0002\n", + "Final Sparsity: 0.0002\n", + "Target Sparsity: 0.7000\n", + "Pruning Method: taylor\n", + "Pruned Layers: 0\n", + "Taylor Pruned Model Test Loss: 3.6502\n", + "Taylor Pruned Model Test Accuracy: 0.1477\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "e1d5d66c", + "outputId": "e8b0df0b-e99d-4970-a6bd-06c47a5a60c8" + }, + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import LnPruning\n", + "\n", + "# Load the saved model\n", + "loaded_model_random_80 = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# Apply Random pruning with 80% sparsity\n", + "# config_random_80 = PruningConfig(sparsity=0.8, method=\"random\", seed=42) # Old API\n", + "stats_random_80 = loaded_model_random_80.prune(sparsity=0.8, method=\"l1\", seed=42) # New API\n", + "\n", + "print(\"\\n--- Random Pruning (80% sparsity) Results ---\")\n", + "print(f\"Initial Sparsity: {stats_random_80['initial_sparsity']:.4f}\")\n", + "print(f\"Final Sparsity: {stats_random_80['final_sparsity']:.4f}\")\n", + "print(f\"Target Sparsity: {stats_random_80['target_sparsity']:.4f}\")\n", + "print(f\"Pruning Method: {stats_random_80['method']}\")\n", + "print(f\"Pruned Layers: {stats_random_80['pruned_layers']}\")\n", + "\n", + "# Evaluate the pruned model\n", + "loss_random_80, accuracy_random_80 = loaded_model_random_80.evaluate(x_test, y_test, verbose=0)\n", + "print(f\"Random Pruned Model Test Loss: {loss_random_80:.4f}\")\n", + "print(f\"Random Pruned Model Test Accuracy: {accuracy_random_80:.4f}\")\n", + "\n", + "# print(\"Skipping random pruning test as it is not currently supported.\")" + ], + "execution_count": 5, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "--- Random Pruning (80% sparsity) Results ---\n", + "Initial Sparsity: 0.0000\n", + "Final Sparsity: 0.7991\n", + "Target Sparsity: 0.8000\n", + "Pruning Method: l1\n", + "Pruned Layers: 55\n", + "Random Pruned Model Test Loss: 4.9933\n", + "Random Pruned Model Test Accuracy: 0.0119\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "df2e703d", + "outputId": "16972751-965c-4654-ee82-9dde88ad29f9" + }, + "source": [ + "from keras.models import load_model\n", + "from keras.pruning import LnPruning\n", + "\n", + "# Load the saved model\n", + "loaded_model_ln4_50 = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + "# Apply Ln pruning (n=4) with 50% sparsity\n", + "# config_ln4_50 = PruningConfig(sparsity=0.5, method=LnPruning(n=4)) # Old API\n", + "stats_ln4_50 = loaded_model_ln4_50.prune(sparsity=0.5, method=LnPruning(n=4)) # New API\n", + "\n", + "\n", + "print(\"\\n--- Ln Pruning (n=4, 50% sparsity) Results ---\")\n", + "print(f\"Initial Sparsity: {stats_ln4_50['initial_sparsity']:.4f}\")\n", + "print(f\"Final Sparsity: {stats_ln4_50['final_sparsity']:.4f}\")\n", + "print(f\"Target Sparsity: {stats_ln4_50['target_sparsity']:.4f}\")\n", + "print(f\"Pruning Method: {stats_ln4_50['method']}\")\n", + "print(f\"Pruned Layers: {stats_ln4_50['pruned_layers']}\")\n", + "\n", + "# Evaluate the pruned model\n", + "loss_ln4_50, accuracy_ln4_50 = loaded_model_ln4_50.evaluate(x_test, y_test, verbose=0)\n", + "print(f\"Ln Pruned Model Test Loss: {loss_ln4_50:.4f}\")\n", + "print(f\"Ln Pruned Model Test Accuracy: {accuracy_ln4_50:.4f}\")" + ], + "execution_count": 6, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "--- Ln Pruning (n=4, 50% sparsity) Results ---\n", + "Initial Sparsity: 0.0000\n", + "Final Sparsity: 0.4994\n", + "Target Sparsity: 0.5000\n", + "Pruning Method: \n", + "Pruned Layers: 55\n", + "Ln Pruned Model Test Loss: 2.2994\n", + "Ln Pruned Model Test Accuracy: 0.4893\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "import keras_hub\n", + "import time" + ], + "metadata": { + "id": "JjTBElU7FDi7" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "model=keras_hub.models.Llama3CausalLM.from_preset(\"hf://meta-llama/Llama-3.2-1B\",dtype='bfloat16')" + ], + "metadata": { + "id": "do3zkG9gFlg5" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "model.generate(\"what is keras\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 140 + }, + "id": "InUr22QoTjVH", + "outputId": "4f3fef74-098e-456d-934e-1d0b677e5fec" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "'what is keras in machine learning\\nMachine learning is the process of building systems that can learn and adapt to change. The machine learning algorithms that are available in Python are: Linear Regression, Logistic Regression, SVM (Support Vector Machine), Decision Tree Classifier and K-Nearest Neighbour Classifier. The following are the steps involved in the process of machine learning. Machine learning is a branch of artificial intelligence which is used to create systems which can make decisions without being programmed to do so. In this article, we will discuss the steps involved in the process of machine learning, the various techniques used in it and some of its important applications. Machine Learning is the branch of Artificial Intelligence that is mainly concerned with the use of computers or other digital machines to perform tasks that require human intelligence to perform them. It involves the use of algorithms and data to create a model that can learn from past data. It is used to create systems that can make decisions without being programmed to do so. Machine Learning in Keras: A Complete Guide. Keras is one of the most popular libraries for machine learning in Python. It uses the concept of a neural network and is used to train a model. It is a tool that helps you to create models and train them. Machine learning in Python is one of the most popular libraries in the Python community. The following are the steps that are used in machine learning. Machine learning is the process where computers learn from past data, and then use that data to make predictions. Machine learning is a subset of artificial intelligence, a field that focuses on making machines that can perform tasks that would normally require human intelligence. In this article, we will be looking at the different steps involved in the process of machine learning. The following are the steps involved in the process of machine learning: The following are the steps of machine learning: In this article, we will discuss the steps involved in the process of machine learning. Machine learning is one of the most important branches of artificial intelligence that has been used to create a model that can learn from past data. In the process of machine learning, the following are the steps involved: The following are the steps of the process of machine learning: In this article, we will discuss the process of machine learning. It uses the concept of machine learning to train models and predict outcomes of new data. This is done by using a set of rules or algorithms, and then using that to create a model which can learn new things and predict future outcomes. Machine learning is a subset of artificial intelligence that is used to create a machine which can learn and adapt to new environments. Machine Learning in Keras is used for training neural networks. In machine learning, the following are the steps involved: Machine learning is used for the development of models and systems that can learn and adapt to new situations. Machine learning is a subset of artificial intelligence that is used to create a machine which can learn and adapt to new environments. The process of machine learning is divided into three main components. It uses a combination of data, algorithms and techniques to create systems that can learn new things and perform new tasks. The following are the steps that are used in machine learning: The steps involved in machine learning: Machine learning is a subset of artificial intelligence that is used to create a machine which can learn and adapt to new environments. Machine learning is a subset of artificial intelligence that is used to create a machine which can learn and adapt to new environments. The steps involved in machine learning: In machine learning, the following are the steps involved: The following are the steps involved in the process of machine learning: Machine Learning in Keras: A Complete Guide. The steps involved in machine learning: Machine learning is the process where computers learn from past data, and then use that data to make predictions.\\nThe Steps Involved In Machine Learning, Keras In Machine Learning , Steps Involved In Data Science , Steps In Machine Learning , Steps Involved In Data Analysis\\nwhat is keras in machine learning'" + ], + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + } + }, + "metadata": {}, + "execution_count": 10 + } + ] + }, + { + "cell_type": "code", + "source": [ + "\n", + "start_time = time.time()\n", + "for i in range(100):\n", + " model.generate(\"what is keras\")\n", + " print(i)\n", + "end_time = time.time()\n", + "\n", + "inference_time = end_time - start_time\n", + "print(f\"Inference time for 100 calls to model.generate: {inference_time:7f} seconds\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "daewQ0VOT8Wi", + "outputId": "447d57f2-522a-43a8-936f-786035c99d66" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "0\n", + "1\n", + "2\n", + "3\n", + "4\n", + "5\n", + "6\n", + "7\n", + "8\n", + "9\n", + "10\n", + "11\n", + "12\n", + "13\n", + "14\n", + "15\n", + "16\n", + "17\n", + "18\n", + "19\n", + "20\n", + "21\n", + "22\n", + "23\n", + "24\n", + "25\n", + "26\n", + "27\n", + "28\n", + "29\n", + "30\n", + "31\n", + "32\n", + "33\n", + "34\n", + "35\n", + "36\n", + "37\n", + "38\n", + "39\n", + "40\n", + "41\n", + "42\n", + "43\n", + "44\n", + "45\n", + "46\n", + "47\n", + "48\n", + "49\n", + "50\n", + "51\n", + "52\n", + "53\n", + "54\n", + "55\n", + "56\n", + "57\n", + "58\n", + "59\n", + "60\n", + "61\n", + "62\n", + "63\n", + "64\n", + "65\n", + "66\n", + "67\n", + "68\n", + "69\n", + "70\n", + "71\n", + "72\n", + "73\n", + "74\n", + "75\n", + "76\n", + "77\n", + "78\n", + "79\n", + "80\n", + "81\n", + "82\n", + "83\n", + "84\n", + "85\n", + "86\n", + "87\n", + "88\n", + "89\n", + "90\n", + "91\n", + "92\n", + "93\n", + "94\n", + "95\n", + "96\n", + "97\n", + "98\n", + "99\n", + "Inference time for 100 calls to model.generate: 380.907693 seconds\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "stats=model.prune(sparsity=0.7, method=\"l1\")\n" + ], + "metadata": { + "id": "PZfiXh1kGJ-f" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "stats" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "cs1itfE_Ggbj", + "outputId": "98d51b87-8c53-4aa9-c351-657884734a96" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "{'initial_sparsity': 0.0,\n", + " 'final_sparsity': 0.7014274586891306,\n", + " 'pruned_layers': 112,\n", + " 'target_sparsity': 0.7,\n", + " 'method': 'l1',\n", + " 'layers_pruned': ['feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query',\n", + " 'feedforward_output_dense',\n", + " 'feedforward_gate_dense',\n", + " 'feedforward_intermediate_dense',\n", + " 'attention_output',\n", + " 'value',\n", + " 'key',\n", + " 'query']}" + ] + }, + "metadata": {}, + "execution_count": 12 + } + ] + }, + { + "cell_type": "code", + "source": [ + "model.generate(\"what is keras\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 140 + }, + "id": "RHORpOomGhTq", + "outputId": "555b7216-6c89-497c-b41a-f0ae54c52252" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "'what is keraslegegeelgevaanggdegegabdizahddeygimgdegyydeyinindaydeyyyyyyinydeydmalialithemyydatyininyyininthemdinagdatatinthinagthatthagthemthatagthagproaginagthdinthemagagatagdagagatatinatagxaginaginagagatcriagagagatagcrileininaginagaginemaginieatinagdininagdeleagleaginagininieiniehiileiinininleleleinatiiagininagdeleagiininagdininleinleleleinemininemleleleleleleemleinininatinagdiininatleatatleleinininininininleininatinleatdeleinleagleleleleileinleinininleininieminleleleininleleleinemininleleatatdleleleatinleinchinlelelechleleininininleleleleleleleinininleleininleleleinlenatleatinleinleinleinatinlelechinlecinleleleleininleleyininleininleininlechchlelelelechinininleclelelelelelelelelelelelelenyleleinncininlechyyyinleinleleyinleleleleininlechkinkchlechinkinlelechyyleleleleleleininininchininleinleleleininlelechinchlelechinlecinininininincinleleleleinleleinleleleleleleleleleininchchinleleininininininkinleleinleinininininchinlekinleinclechchininininkinleinleleleleinleinlelelechylelelekkkleyyleinininleleleininlelelelechlekkinininclelelekchinleleleleleleleleinckininininccleinincktechylekininkcleinletekleinininininininteleleleleleleinleleleinleinklelecleclelekinlecinkkinleinlemitkkleleinininleleintekleinlekinlekleinteleinlelelelekinininteininlelelekkininklelekinleinininininleleinleleinleleleleleleleleleinincclelekkinlelechkteinklekkinleinlekinlelelelelemitinclekkintecinlekinleinininininlelelelelekinleleleleleleinlelelelecinininleleleleincleleincleleinleleleinleleleinleleleinleleininleininlelecleleinleleleleinlelelelekkchleckinlekclekkinlelelechleleinkleinleinleklelelelemitleclelecchinclekkcckininininlecleinleleleleleinlelelekkleleinlelelelelelekkkkkkkinleleinlelelelelemitmitlekkkininleleleleleleinlekkchkkinklelelemitkkinlelekckinkinumlelekinlemitkchcklekinktelelelekkumlekkininininumlelekinteinincckkinckkkkteleleinininctecclechkleteininkkininlekumcleinumk'" + ], + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + } + }, + "metadata": {}, + "execution_count": 13 + } + ] + }, + { + "cell_type": "code", + "source": [ + "\n", + "start_time = time.time()\n", + "for i in range(10):\n", + " model.generate(\"what is keras\")\n", + " print(i)\n", + "end_time = time.time()\n", + "\n", + "inference_time = end_time - start_time\n", + "print(f\"Inference time for 100 calls to model.generate: {inference_time:7f} seconds\")" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "7S-VXK0STiEq", + "outputId": "d7e24082-2de4-483f-bcf1-afb232eb726b" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "0\n", + "1\n", + "2\n", + "3\n", + "4\n", + "5\n", + "6\n", + "7\n", + "8\n", + "9\n", + "Inference time for 100 calls to model.generate: 42.329632 seconds\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "-kKBl2hQUSCR" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8e719c7a" + }, + "source": [ + "# Task\n", + "Modify the code in the selected cell to add early stopping criteria to the model fit." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aa1fc53e" + }, + "source": [ + "## Prune and evaluate in a loop\n", + "\n", + "### Subtask:\n", + "Iterate through the pruning configurations, load the model each time, prune it according to the current configuration, and evaluate the pruned model on the test data. Store the results (pruning ratio, method, loss, and accuracy).\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "44564af4" + }, + "source": [ + "**Reasoning**:\n", + "Iterate through the defined pruning ratios, load the model for each ratio, prune the loaded model, evaluate it, and store the results.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "521d81a5" + }, + "source": [ + "**Reasoning**:\n", + "The previous code failed because `x_test` and `y_test` were not defined in the current scope. I need to reload the CIFAR-100 dataset.\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "sBynZyCXtebS", + "outputId": "266466e0-1c6d-4619-a33f-422d3e505c29" + }, + "source": [ + "from keras.datasets import cifar100\n", + "from keras.utils import to_categorical\n", + "from keras.models import load_model\n", + "import pandas as pd\n", + "from keras.pruning import LnPruning\n", + "\n", + "# Load the CIFAR-100 dataset\n", + "(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n", + "\n", + "# Normalize the image data\n", + "x_train = x_train.astype('float32') / 255.0\n", + "x_test = x_test.astype('float32') / 255.0\n", + "\n", + "# Convert labels to one-hot encoding\n", + "num_classes = 100\n", + "y_train = to_categorical(y_train, num_classes)\n", + "y_test = to_categorical(y_test, num_classes)\n", + "\n", + "print(\"CIFAR-100 dataset loaded and preprocessed again.\")\n", + "print(f\"Training data shape: {x_train.shape}\")\n", + "print(f\"Training labels shape: {y_train.shape}\")\n", + "print(f\"Testing data shape: {x_test.shape}\")\n", + "print(f\"Testing labels shape: {y_test.shape}\")\n", + "\n", + "# Define pruning ratios\n", + "pruning_ratios = [i * 0.05 for i in range(1, 19)] + [0.95, 0.97, 0.99]\n", + "\n", + "# Define pruning methods\n", + "pruning_methods = [\"l1\", \"l2\", LnPruning(n=3), LnPruning(n=4), \"saliency\", \"taylor\"]\n", + "\n", + "# Initialize an empty list to store the results\n", + "pruning_results = []\n", + "\n", + "# Iterate through each pruning method\n", + "for pruning_method in pruning_methods:\n", + " print(f\"\\n--- Pruning Method: {pruning_method} ---\")\n", + " # Iterate through each pruning ratio\n", + " for ratio in pruning_ratios:\n", + " print(f\"Processing pruning ratio: {ratio}\")\n", + "\n", + " # Load the saved model\n", + " loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + " # Apply pruning\n", + " try:\n", + " if pruning_method in [\"saliency\", \"taylor\"]:\n", + " # Use a smaller subset for gradient-based methods to save time\n", + " dataset_subset = (x_train[:1000], y_train[:1000])\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method, dataset=dataset_subset, loss_fn=loaded_model.loss)\n", + " elif isinstance(pruning_method, str):\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", + " else:\n", + " # Assume it's a PruningMethod instance like LnPruning\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", + "\n", + "\n", + " print(f\" Pruning successful. Final sparsity: {stats['final_sparsity']:.4f}\")\n", + "\n", + " # Evaluate the pruned model\n", + " loss, accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", + "\n", + " # Append the results\n", + " pruning_results.append({\n", + " 'Pruning Ratio': ratio,\n", + " 'Pruning Method': str(pruning_method), # Convert method object to string\n", + " 'Test Loss': loss,\n", + " 'Test Accuracy': accuracy\n", + " })\n", + " print(f\" Evaluation complete. Test Loss: {loss:.4f}, Test Accuracy: {accuracy:.4f}\")\n", + "\n", + " except ValueError as e:\n", + " print(f\" Skipping pruning for ratio {ratio} with method '{pruning_method}' due to error: {e}\")\n", + " # Optionally store a record indicating failure\n", + " pruning_results.append({\n", + " 'Pruning Ratio': ratio,\n", + " 'Pruning Method': str(pruning_method), # Convert method object to string\n", + " 'Test Loss': None,\n", + " 'Test Accuracy': None,\n", + " 'Error': str(e)\n", + " })\n", + "\n", + "\n", + "# Convert results to a pandas DataFrame for easy display and analysis\n", + "results_df = pd.DataFrame(pruning_results)\n", + "\n", + "# Display the results table\n", + "print(\"\\n--- Pruning Evaluation Results ---\")\n", + "display(results_df)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "CIFAR-100 dataset loaded and preprocessed again.\n", + "Training data shape: (50000, 32, 32, 3)\n", + "Training labels shape: (50000, 100)\n", + "Testing data shape: (10000, 32, 32, 3)\n", + "Testing labels shape: (10000, 100)\n", + "\n", + "--- Pruning Method: l1 ---\n", + "Processing pruning ratio: 0.05\n", + " Pruning successful. Final sparsity: 0.0499\n", + " Evaluation complete. Test Loss: 1.1221, Test Accuracy: 0.7420\n", + "Processing pruning ratio: 0.1\n", + " Pruning successful. Final sparsity: 0.0999\n", + " Evaluation complete. Test Loss: 1.1254, Test Accuracy: 0.7417\n", + "Processing pruning ratio: 0.15000000000000002\n", + " Pruning successful. Final sparsity: 0.1498\n", + " Evaluation complete. Test Loss: 1.1429, Test Accuracy: 0.7361\n", + "Processing pruning ratio: 0.2\n", + " Pruning successful. Final sparsity: 0.1998\n", + " Evaluation complete. Test Loss: 1.1587, Test Accuracy: 0.7305\n", + "Processing pruning ratio: 0.25\n", + " Pruning successful. Final sparsity: 0.2497\n", + " Evaluation complete. Test Loss: 1.2086, Test Accuracy: 0.7187\n", + "Processing pruning ratio: 0.30000000000000004\n", + " Pruning successful. Final sparsity: 0.2997\n", + " Evaluation complete. Test Loss: 1.2585, Test Accuracy: 0.7042\n", + "Processing pruning ratio: 0.35000000000000003\n", + " Pruning successful. Final sparsity: 0.3496\n", + " Evaluation complete. Test Loss: 1.4132, Test Accuracy: 0.6687\n", + "Processing pruning ratio: 0.4\n", + " Pruning successful. Final sparsity: 0.3996\n", + " Evaluation complete. Test Loss: 1.4379, Test Accuracy: 0.6616\n", + "Processing pruning ratio: 0.45\n", + " Pruning successful. Final sparsity: 0.4495\n", + " Evaluation complete. Test Loss: 1.9416, Test Accuracy: 0.5578\n", + "Processing pruning ratio: 0.5\n", + " Pruning successful. Final sparsity: 0.4994\n", + " Evaluation complete. Test Loss: 2.2998, Test Accuracy: 0.4889\n", + "Processing pruning ratio: 0.55\n", + " Pruning successful. Final sparsity: 0.5494\n", + " Evaluation complete. Test Loss: 2.5721, Test Accuracy: 0.4360\n", + "Processing pruning ratio: 0.6000000000000001\n", + " Pruning successful. Final sparsity: 0.5993\n", + " Evaluation complete. Test Loss: 3.0715, Test Accuracy: 0.3502\n", + "Processing pruning ratio: 0.65\n", + " Pruning successful. Final sparsity: 0.6493\n", + " Evaluation complete. Test Loss: 3.7629, Test Accuracy: 0.2079\n", + "Processing pruning ratio: 0.7000000000000001\n", + " Pruning successful. Final sparsity: 0.6992\n", + " Evaluation complete. Test Loss: 4.2523, Test Accuracy: 0.1267\n", + "Processing pruning ratio: 0.75\n", + " Pruning successful. Final sparsity: 0.7492\n", + " Evaluation complete. Test Loss: 4.7808, Test Accuracy: 0.0330\n", + "Processing pruning ratio: 0.8\n", + " Pruning successful. Final sparsity: 0.7991\n", + " Evaluation complete. Test Loss: 4.9930, Test Accuracy: 0.0119\n", + "Processing pruning ratio: 0.8500000000000001\n", + " Pruning successful. Final sparsity: 0.8490\n", + " Evaluation complete. Test Loss: 5.3660, Test Accuracy: 0.0132\n", + "Processing pruning ratio: 0.9\n", + " Pruning successful. Final sparsity: 0.8990\n", + " Evaluation complete. Test Loss: 11.6017, Test Accuracy: 0.0121\n", + "Processing pruning ratio: 0.95\n", + " Pruning successful. Final sparsity: 0.9489\n", + " Evaluation complete. Test Loss: 71.2134, Test Accuracy: 0.0100\n", + "Processing pruning ratio: 0.97\n", + " Pruning successful. Final sparsity: 0.9689\n", + " Evaluation complete. Test Loss: 142.3717, Test Accuracy: 0.0100\n", + "Processing pruning ratio: 0.99\n", + " Pruning successful. Final sparsity: 0.9889\n", + " Evaluation complete. Test Loss: 9.3456, Test Accuracy: 0.0100\n", + "\n", + "--- Pruning Method: l2 ---\n", + "Processing pruning ratio: 0.05\n", + " Pruning successful. Final sparsity: 0.0499\n", + " Evaluation complete. Test Loss: 1.1221, Test Accuracy: 0.7420\n", + "Processing pruning ratio: 0.1\n", + " Pruning successful. Final sparsity: 0.0999\n", + " Evaluation complete. Test Loss: 1.1254, Test Accuracy: 0.7417\n", + "Processing pruning ratio: 0.15000000000000002\n", + " Pruning successful. Final sparsity: 0.1498\n", + " Evaluation complete. Test Loss: 1.1429, Test Accuracy: 0.7361\n", + "Processing pruning ratio: 0.2\n", + " Pruning successful. Final sparsity: 0.1998\n", + " Evaluation complete. Test Loss: 1.1587, Test Accuracy: 0.7305\n", + "Processing pruning ratio: 0.25\n", + " Pruning successful. Final sparsity: 0.2497\n", + " Evaluation complete. Test Loss: 1.2086, Test Accuracy: 0.7187\n", + "Processing pruning ratio: 0.30000000000000004\n", + " Pruning successful. Final sparsity: 0.2997\n", + " Evaluation complete. Test Loss: 1.2585, Test Accuracy: 0.7042\n", + "Processing pruning ratio: 0.35000000000000003\n", + " Pruning successful. Final sparsity: 0.3496\n", + " Evaluation complete. Test Loss: 1.4132, Test Accuracy: 0.6687\n", + "Processing pruning ratio: 0.4\n", + " Pruning successful. Final sparsity: 0.3996\n", + " Evaluation complete. Test Loss: 1.4379, Test Accuracy: 0.6616\n", + "Processing pruning ratio: 0.45\n", + " Pruning successful. Final sparsity: 0.4495\n", + " Evaluation complete. Test Loss: 1.9416, Test Accuracy: 0.5578\n", + "Processing pruning ratio: 0.5\n", + " Pruning successful. Final sparsity: 0.4994\n", + " Evaluation complete. Test Loss: 2.2998, Test Accuracy: 0.4889\n", + "Processing pruning ratio: 0.55\n", + " Pruning successful. Final sparsity: 0.5494\n", + " Evaluation complete. Test Loss: 2.5721, Test Accuracy: 0.4360\n", + "Processing pruning ratio: 0.6000000000000001\n", + " Pruning successful. Final sparsity: 0.5993\n", + " Evaluation complete. Test Loss: 3.0715, Test Accuracy: 0.3502\n", + "Processing pruning ratio: 0.65\n", + " Pruning successful. Final sparsity: 0.6493\n", + " Evaluation complete. Test Loss: 3.7629, Test Accuracy: 0.2079\n", + "Processing pruning ratio: 0.7000000000000001\n", + " Pruning successful. Final sparsity: 0.6992\n", + " Evaluation complete. Test Loss: 4.2523, Test Accuracy: 0.1267\n", + "Processing pruning ratio: 0.75\n", + " Pruning successful. Final sparsity: 0.7492\n", + " Evaluation complete. Test Loss: 4.7808, Test Accuracy: 0.0330\n", + "Processing pruning ratio: 0.8\n", + " Pruning successful. Final sparsity: 0.7991\n", + " Evaluation complete. Test Loss: 4.9930, Test Accuracy: 0.0119\n", + "Processing pruning ratio: 0.8500000000000001\n", + " Pruning successful. Final sparsity: 0.8490\n", + " Evaluation complete. Test Loss: 5.3660, Test Accuracy: 0.0132\n", + "Processing pruning ratio: 0.9\n", + " Pruning successful. Final sparsity: 0.8990\n", + " Evaluation complete. Test Loss: 11.6017, Test Accuracy: 0.0121\n", + "Processing pruning ratio: 0.95\n", + " Pruning successful. Final sparsity: 0.9489\n", + " Evaluation complete. Test Loss: 71.2134, Test Accuracy: 0.0100\n", + "Processing pruning ratio: 0.97\n", + " Pruning successful. Final sparsity: 0.9689\n", + " Evaluation complete. Test Loss: 142.3717, Test Accuracy: 0.0100\n", + "Processing pruning ratio: 0.99\n", + " Pruning successful. Final sparsity: 0.9889\n", + " Evaluation complete. Test Loss: 9.3456, Test Accuracy: 0.0100\n", + "\n", + "--- Pruning Method: ---\n", + "Processing pruning ratio: 0.05\n", + " Pruning successful. Final sparsity: 0.0499\n", + " Evaluation complete. Test Loss: 1.1221, Test Accuracy: 0.7420\n", + "Processing pruning ratio: 0.1\n", + " Pruning successful. Final sparsity: 0.0999\n", + " Evaluation complete. Test Loss: 1.1254, Test Accuracy: 0.7417\n", + "Processing pruning ratio: 0.15000000000000002\n", + " Pruning successful. Final sparsity: 0.1498\n", + " Evaluation complete. Test Loss: 1.1429, Test Accuracy: 0.7361\n", + "Processing pruning ratio: 0.2\n", + " Pruning successful. Final sparsity: 0.1998\n", + " Evaluation complete. Test Loss: 1.1587, Test Accuracy: 0.7305\n", + "Processing pruning ratio: 0.25\n", + " Pruning successful. Final sparsity: 0.2497\n", + " Evaluation complete. Test Loss: 1.2086, Test Accuracy: 0.7187\n", + "Processing pruning ratio: 0.30000000000000004\n", + " Pruning successful. Final sparsity: 0.2997\n", + " Evaluation complete. Test Loss: 1.2585, Test Accuracy: 0.7042\n", + "Processing pruning ratio: 0.35000000000000003\n", + " Pruning successful. Final sparsity: 0.3496\n", + " Evaluation complete. Test Loss: 1.4132, Test Accuracy: 0.6687\n", + "Processing pruning ratio: 0.4\n", + " Pruning successful. Final sparsity: 0.3996\n", + " Evaluation complete. Test Loss: 1.4379, Test Accuracy: 0.6616\n", + "Processing pruning ratio: 0.45\n", + " Pruning successful. Final sparsity: 0.4495\n", + " Evaluation complete. Test Loss: 1.9416, Test Accuracy: 0.5578\n", + "Processing pruning ratio: 0.5\n", + " Pruning successful. Final sparsity: 0.4994\n", + " Evaluation complete. Test Loss: 2.2998, Test Accuracy: 0.4889\n", + "Processing pruning ratio: 0.55\n", + " Pruning successful. Final sparsity: 0.5494\n", + " Evaluation complete. Test Loss: 2.5721, Test Accuracy: 0.4360\n", + "Processing pruning ratio: 0.6000000000000001\n", + " Pruning successful. Final sparsity: 0.5993\n", + " Evaluation complete. Test Loss: 3.0715, Test Accuracy: 0.3502\n", + "Processing pruning ratio: 0.65\n", + " Pruning successful. Final sparsity: 0.6493\n", + " Evaluation complete. Test Loss: 3.7629, Test Accuracy: 0.2079\n", + "Processing pruning ratio: 0.7000000000000001\n", + " Pruning successful. Final sparsity: 0.6992\n", + " Evaluation complete. Test Loss: 4.2523, Test Accuracy: 0.1267\n", + "Processing pruning ratio: 0.75\n", + " Pruning successful. Final sparsity: 0.7492\n", + " Evaluation complete. Test Loss: 4.7808, Test Accuracy: 0.0330\n", + "Processing pruning ratio: 0.8\n", + " Pruning successful. Final sparsity: 0.7991\n", + " Evaluation complete. Test Loss: 4.9930, Test Accuracy: 0.0119\n", + "Processing pruning ratio: 0.8500000000000001\n", + " Pruning successful. Final sparsity: 0.8490\n", + " Evaluation complete. Test Loss: 5.3660, Test Accuracy: 0.0132\n", + "Processing pruning ratio: 0.9\n", + " Pruning successful. Final sparsity: 0.8990\n", + " Evaluation complete. Test Loss: 11.6017, Test Accuracy: 0.0121\n", + "Processing pruning ratio: 0.95\n", + " Pruning successful. Final sparsity: 0.9489\n", + " Evaluation complete. Test Loss: 71.2134, Test Accuracy: 0.0100\n", + "Processing pruning ratio: 0.97\n", + " Pruning successful. Final sparsity: 0.9689\n", + " Evaluation complete. Test Loss: 142.3717, Test Accuracy: 0.0100\n", + "Processing pruning ratio: 0.99\n", + " Pruning successful. Final sparsity: 0.9889\n", + " Evaluation complete. Test Loss: 9.3456, Test Accuracy: 0.0100\n", + "\n", + "--- Pruning Method: ---\n", + "Processing pruning ratio: 0.05\n", + " Pruning successful. Final sparsity: 0.0499\n", + " Evaluation complete. Test Loss: 1.1221, Test Accuracy: 0.7420\n", + "Processing pruning ratio: 0.1\n", + " Pruning successful. Final sparsity: 0.0999\n", + " Evaluation complete. Test Loss: 1.1254, Test Accuracy: 0.7417\n", + "Processing pruning ratio: 0.15000000000000002\n", + " Pruning successful. Final sparsity: 0.1498\n", + " Evaluation complete. Test Loss: 1.1429, Test Accuracy: 0.7361\n", + "Processing pruning ratio: 0.2\n", + " Pruning successful. Final sparsity: 0.1998\n", + " Evaluation complete. Test Loss: 1.1587, Test Accuracy: 0.7305\n", + "Processing pruning ratio: 0.25\n", + " Pruning successful. Final sparsity: 0.2497\n", + " Evaluation complete. Test Loss: 1.2086, Test Accuracy: 0.7187\n", + "Processing pruning ratio: 0.30000000000000004\n", + " Pruning successful. Final sparsity: 0.2997\n", + " Evaluation complete. Test Loss: 1.2585, Test Accuracy: 0.7042\n", + "Processing pruning ratio: 0.35000000000000003\n", + " Pruning successful. Final sparsity: 0.3496\n", + " Evaluation complete. Test Loss: 1.4132, Test Accuracy: 0.6687\n", + "Processing pruning ratio: 0.4\n", + " Pruning successful. Final sparsity: 0.3996\n", + " Evaluation complete. Test Loss: 1.4379, Test Accuracy: 0.6616\n", + "Processing pruning ratio: 0.45\n", + " Pruning successful. Final sparsity: 0.4495\n", + " Evaluation complete. Test Loss: 1.9416, Test Accuracy: 0.5578\n", + "Processing pruning ratio: 0.5\n", + " Pruning successful. Final sparsity: 0.4994\n", + " Evaluation complete. Test Loss: 2.2998, Test Accuracy: 0.4889\n", + "Processing pruning ratio: 0.55\n", + " Pruning successful. Final sparsity: 0.5494\n", + " Evaluation complete. Test Loss: 2.5721, Test Accuracy: 0.4360\n", + "Processing pruning ratio: 0.6000000000000001\n", + " Pruning successful. Final sparsity: 0.5993\n", + " Evaluation complete. Test Loss: 3.0715, Test Accuracy: 0.3502\n", + "Processing pruning ratio: 0.65\n", + " Pruning successful. Final sparsity: 0.6493\n", + " Evaluation complete. Test Loss: 3.7629, Test Accuracy: 0.2079\n", + "Processing pruning ratio: 0.7000000000000001\n", + " Pruning successful. Final sparsity: 0.6992\n", + " Evaluation complete. Test Loss: 4.2523, Test Accuracy: 0.1267\n", + "Processing pruning ratio: 0.75\n", + " Pruning successful. Final sparsity: 0.7492\n", + " Evaluation complete. Test Loss: 4.7808, Test Accuracy: 0.0330\n", + "Processing pruning ratio: 0.8\n", + " Pruning successful. Final sparsity: 0.7991\n", + " Evaluation complete. Test Loss: 4.9930, Test Accuracy: 0.0119\n", + "Processing pruning ratio: 0.8500000000000001\n", + " Pruning successful. Final sparsity: 0.8490\n", + " Evaluation complete. Test Loss: 5.3660, Test Accuracy: 0.0132\n", + "Processing pruning ratio: 0.9\n", + " Pruning successful. Final sparsity: 0.8990\n", + " Evaluation complete. Test Loss: 11.6017, Test Accuracy: 0.0121\n", + "Processing pruning ratio: 0.95\n", + " Pruning successful. Final sparsity: 0.9489\n", + " Evaluation complete. Test Loss: 71.2134, Test Accuracy: 0.0100\n", + "Processing pruning ratio: 0.97\n", + " Pruning successful. Final sparsity: 0.9689\n", + " Evaluation complete. Test Loss: 142.3717, Test Accuracy: 0.0100\n", + "Processing pruning ratio: 0.99\n", + " Pruning successful. Final sparsity: 0.9889\n", + " Evaluation complete. Test Loss: 9.3456, Test Accuracy: 0.0100\n", + "\n", + "--- Pruning Method: saliency ---\n", + "Processing pruning ratio: 0.05\n", + " Skipping pruning for ratio 0.05 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.1\n", + " Skipping pruning for ratio 0.1 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.15000000000000002\n", + " Skipping pruning for ratio 0.15000000000000002 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.2\n", + " Skipping pruning for ratio 0.2 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.25\n", + " Skipping pruning for ratio 0.25 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.30000000000000004\n", + " Skipping pruning for ratio 0.30000000000000004 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.35000000000000003\n", + " Skipping pruning for ratio 0.35000000000000003 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.4\n", + " Skipping pruning for ratio 0.4 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.45\n", + " Skipping pruning for ratio 0.45 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.5\n", + " Skipping pruning for ratio 0.5 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.55\n", + " Skipping pruning for ratio 0.55 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.6000000000000001\n", + " Skipping pruning for ratio 0.6000000000000001 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.65\n", + " Skipping pruning for ratio 0.65 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.7000000000000001\n", + " Skipping pruning for ratio 0.7000000000000001 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.75\n", + " Skipping pruning for ratio 0.75 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.8\n", + " Skipping pruning for ratio 0.8 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.8500000000000001\n", + " Skipping pruning for ratio 0.8500000000000001 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.9\n", + " Skipping pruning for ratio 0.9 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.95\n", + " Skipping pruning for ratio 0.95 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.97\n", + " Skipping pruning for ratio 0.97 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.99\n", + " Skipping pruning for ratio 0.99 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", + "\n", + "--- Pruning Method: taylor ---\n", + "Processing pruning ratio: 0.05\n", + " Skipping pruning for ratio 0.05 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.1\n", + " Skipping pruning for ratio 0.1 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.15000000000000002\n", + " Skipping pruning for ratio 0.15000000000000002 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.2\n", + " Skipping pruning for ratio 0.2 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.25\n", + " Skipping pruning for ratio 0.25 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.30000000000000004\n", + " Skipping pruning for ratio 0.30000000000000004 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.35000000000000003\n", + " Skipping pruning for ratio 0.35000000000000003 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.4\n", + " Skipping pruning for ratio 0.4 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.45\n", + " Skipping pruning for ratio 0.45 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.5\n", + " Skipping pruning for ratio 0.5 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.55\n", + " Skipping pruning for ratio 0.55 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.6000000000000001\n", + " Skipping pruning for ratio 0.6000000000000001 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.65\n", + " Skipping pruning for ratio 0.65 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.7000000000000001\n", + " Skipping pruning for ratio 0.7000000000000001 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.75\n", + " Skipping pruning for ratio 0.75 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.8\n", + " Skipping pruning for ratio 0.8 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.8500000000000001\n", + " Skipping pruning for ratio 0.8500000000000001 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.9\n", + " Skipping pruning for ratio 0.9 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.95\n", + " Skipping pruning for ratio 0.95 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.97\n", + " Skipping pruning for ratio 0.97 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "Processing pruning ratio: 0.99\n", + " Skipping pruning for ratio 0.99 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", + "\n", + "--- Pruning Evaluation Results ---\n" + ] + }, + { + "output_type": "display_data", + "data": { + "text/plain": [ + " Pruning Ratio Pruning Method Test Loss Test Accuracy \\\n", + "0 0.05 l1 1.122052 0.7420 \n", + "1 0.10 l1 1.125432 0.7417 \n", + "2 0.15 l1 1.142868 0.7361 \n", + "3 0.20 l1 1.158665 0.7305 \n", + "4 0.25 l1 1.208641 0.7187 \n", + ".. ... ... ... ... \n", + "121 0.85 taylor NaN NaN \n", + "122 0.90 taylor NaN NaN \n", + "123 0.95 taylor NaN NaN \n", + "124 0.97 taylor NaN NaN \n", + "125 0.99 taylor NaN NaN \n", + "\n", + " Error \n", + "0 NaN \n", + "1 NaN \n", + "2 NaN \n", + "3 NaN \n", + "4 NaN \n", + ".. ... \n", + "121 Could not find layer corresponding to weight t... \n", + "122 Could not find layer corresponding to weight t... \n", + "123 Could not find layer corresponding to weight t... \n", + "124 Could not find layer corresponding to weight t... \n", + "125 Could not find layer corresponding to weight t... \n", + "\n", + "[126 rows x 5 columns]" + ], + "text/html": [ + "\n", + "
\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
Pruning RatioPruning MethodTest LossTest AccuracyError
00.05l11.1220520.7420NaN
10.10l11.1254320.7417NaN
20.15l11.1428680.7361NaN
30.20l11.1586650.7305NaN
40.25l11.2086410.7187NaN
..................
1210.85taylorNaNNaNCould not find layer corresponding to weight t...
1220.90taylorNaNNaNCould not find layer corresponding to weight t...
1230.95taylorNaNNaNCould not find layer corresponding to weight t...
1240.97taylorNaNNaNCould not find layer corresponding to weight t...
1250.99taylorNaNNaNCould not find layer corresponding to weight t...
\n", + "

126 rows × 5 columns

\n", + "
\n", + "
\n", + "\n", + "
\n", + " \n", + "\n", + " \n", + "\n", + " \n", + "
\n", + "\n", + "\n", + "
\n", + " \n", + "\n", + "\n", + "\n", + " \n", + "
\n", + "\n", + "
\n", + " \n", + " \n", + " \n", + "
\n", + "\n", + "
\n", + "
\n" + ], + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "dataframe", + "variable_name": "results_df", + "summary": "{\n \"name\": \"results_df\",\n \"rows\": 126,\n \"fields\": [\n {\n \"column\": \"Pruning Ratio\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.2973575048897971,\n \"min\": 0.05,\n \"max\": 0.99,\n \"num_unique_values\": 21,\n \"samples\": [\n 0.05,\n 0.9,\n 0.8\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Pruning Method\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 6,\n \"samples\": [\n \"l1\",\n \"l2\",\n \"taylor\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Test Loss\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 32.59375736132438,\n \"min\": 1.12205171585083,\n \"max\": 142.3716583251953,\n \"num_unique_values\": 21,\n \"samples\": [\n 1.12205171585083,\n 11.601749420166016,\n 4.992961406707764\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Test Accuracy\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.3082612624968978,\n \"min\": 0.009999999776482582,\n \"max\": 0.7419999837875366,\n \"num_unique_values\": 19,\n \"samples\": [\n 0.7419999837875366,\n 0.704200029373169,\n 0.35019999742507935\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Error\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 2,\n \"samples\": [\n \"Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\",\n \"Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}" + } + }, + "metadata": {} + } + ] + }, + { + "cell_type": "code", + "source": [ + "from keras.datasets import cifar100\n", + "from keras.utils import to_categorical\n", + "from keras.models import load_model\n", + "import pandas as pd\n", + "from keras.pruning import LnPruning\n", + "\n", + "# Load the CIFAR-100 dataset\n", + "(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n", + "\n", + "# Normalize the image data\n", + "x_train = x_train.astype('float32') / 255.0\n", + "x_test = x_test.astype('float32') / 255.0\n", + "\n", + "# Convert labels to one-hot encoding\n", + "num_classes = 100\n", + "y_train = to_categorical(y_train, num_classes)\n", + "y_test = to_categorical(y_test, num_classes)\n", + "\n", + "print(\"CIFAR-100 dataset loaded and preprocessed again.\")\n", + "print(f\"Training data shape: {x_train.shape}\")\n", + "print(f\"Training labels shape: {y_train.shape}\")\n", + "print(f\"Testing data shape: {x_test.shape}\")\n", + "print(f\"Testing labels shape: {y_test.shape}\")\n", + "\n", + "# Define pruning ratios\n", + "pruning_ratios = [i * 0.1 for i in range(1, 10)] + [0.95, 0.97, 0.99]\n", + "pruning_ratios = [.3]\n", + "\n", + "# Define pruning methods\n", + "pruning_methods = [\"saliency\"]\n", + "\n", + "# Initialize an empty list to store the results\n", + "pruning_results = []\n", + "\n", + "# Iterate through each pruning method\n", + "for pruning_method in pruning_methods:\n", + " print(f\"\\n--- Pruning Method: {pruning_method} ---\")\n", + " # Iterate through each pruning ratio\n", + " for ratio in pruning_ratios:\n", + " print(f\"Processing pruning ratio: {ratio}\")\n", + "\n", + " # Load the saved model\n", + " loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + " # Apply pruning\n", + " try:\n", + " if pruning_method in [\"saliency\", \"taylor\"]:\n", + " # Use a smaller subset for gradient-based methods to save time\n", + " dataset_subset = (x_train, y_train)\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method, dataset=dataset_subset, loss_fn=loaded_model.loss)\n", + " elif isinstance(pruning_method, str):\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", + " else:\n", + " # Assume it's a PruningMethod instance like LnPruning\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", + "\n", + "\n", + " print(f\" Pruning successful. Final sparsity: {stats['final_sparsity']:.4f}\")\n", + "\n", + " # Evaluate the pruned model\n", + " loss, accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", + "\n", + " # Append the results\n", + " pruning_results.append({\n", + " 'Pruning Ratio': ratio,\n", + " 'Pruning Method': str(pruning_method), # Convert method object to string\n", + " 'Test Loss': loss,\n", + " 'Test Accuracy': accuracy\n", + " })\n", + " print(f\" Evaluation complete. Test Loss: {loss:.4f}, Test Accuracy: {accuracy:.4f}\")\n", + "\n", + " except ValueError as e:\n", + " print(f\" Skipping pruning for ratio {ratio} with method '{pruning_method}' due to error: {e}\")\n", + " # Optionally store a record indicating failure\n", + " pruning_results.append({\n", + " 'Pruning Ratio': ratio,\n", + " 'Pruning Method': str(pruning_method), # Convert method object to string\n", + " 'Test Loss': None,\n", + " 'Test Accuracy': None,\n", + " 'Error': str(e)\n", + " })\n", + "\n", + "\n", + "# Convert results to a pandas DataFrame for easy display and analysis\n", + "results_df = pd.DataFrame(pruning_results)\n", + "\n", + "# Display the results table\n", + "print(\"\\n--- Pruning Evaluation Results ---\")\n", + "display(results_df)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 512 + }, + "id": "dmY8GKOBUpkM", + "outputId": "5cdbdb52-b950-4114-cb49-cf6d93d0696f" + }, + "execution_count": 6, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "CIFAR-100 dataset loaded and preprocessed again.\n", + "Training data shape: (50000, 32, 32, 3)\n", + "Training labels shape: (50000, 100)\n", + "Testing data shape: (10000, 32, 32, 3)\n", + "Testing labels shape: (10000, 100)\n", + "\n", + "--- Pruning Method: saliency ---\n", + "Processing pruning ratio: 0.3\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "Computing saliency gradients: 68%|██████▊ | 529/782 [04:15<02:02, 2.07batch/s, batches=529]\n" + ] + }, + { + "output_type": "error", + "ename": "KeyboardInterrupt", + "evalue": "", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipython-input-3734017654.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;31m# Use a smaller subset for gradient-based methods to save time\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0mdataset_subset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mx_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdataset_subset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/model.py\u001b[0m in \u001b[0;36mprune\u001b[0;34m(self, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 529\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 530\u001b[0m \u001b[0;31m# Use direct parameter approach\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 531\u001b[0;31m stats = apply_pruning_to_model(\n\u001b[0m\u001b[1;32m 532\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_model\u001b[0;34m(model, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 477\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlist_of_sublayers\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 478\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshould_prune_layer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayers_to_prune\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 479\u001b[0;31m if apply_pruning_to_layer(\n\u001b[0m\u001b[1;32m 480\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 481\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_layer\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 346\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0;31m# Use the new get_pruning_mask function for consistency\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 348\u001b[0;31m mask = get_pruning_mask(\n\u001b[0m\u001b[1;32m 349\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 350\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mget_pruning_mask\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, **kwargs)\u001b[0m\n\u001b[1;32m 248\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0;31m# Compute mask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 250\u001b[0;31m \u001b[0mmask\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpruning_method\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmask_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 251\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 252\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mcompute_mask\u001b[0;34m(self, weights, sparsity_ratio, **kwargs)\u001b[0m\n\u001b[1;32m 430\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 431\u001b[0m \u001b[0;31m# Compute saliency scores (pass validated loss_fn in kwargs)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 432\u001b[0;31m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'loss_fn'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 433\u001b[0m \u001b[0msaliency_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcalculate_scores\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights_tensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 434\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_saliency_scores\u001b[0;34m(self, weights, **kwargs)\u001b[0m\n\u001b[1;32m 547\u001b[0m \u001b[0mloss_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_obj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpredictions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 548\u001b[0m \u001b[0;31m# Ensure loss is scalar\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 549\u001b[0;31m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_val\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_val\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mloss_val\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 550\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 551\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mtape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgradient\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtf_var\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_tensorflow_gradients\u001b[0;34m(self, target_weight_var, model, batch_x, batch_y, loss_fn)\u001b[0m\n\u001b[1;32m 462\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 463\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 464\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtqdm\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtqdm\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 465\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 466\u001b[0m \u001b[0;31m# Extract parameters from kwargs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/backprop.py\u001b[0m in \u001b[0;36mgradient\u001b[0;34m(self, target, sources, output_gradients, unconnected_gradients)\u001b[0m\n\u001b[1;32m 1064\u001b[0m for x in output_gradients]\n\u001b[1;32m 1065\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1066\u001b[0;31m flat_grad = imperative_grad.imperative_grad(\n\u001b[0m\u001b[1;32m 1067\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_tape\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1068\u001b[0m \u001b[0mflat_targets\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/imperative_grad.py\u001b[0m in \u001b[0;36mimperative_grad\u001b[0;34m(tape, target, sources, output_gradients, sources_raw, unconnected_gradients)\u001b[0m\n\u001b[1;32m 65\u001b[0m \"Unknown value for unconnected_gradients: %r\" % unconnected_gradients)\n\u001b[1;32m 66\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 67\u001b[0;31m return pywrap_tfe.TFE_Py_TapeGradient(\n\u001b[0m\u001b[1;32m 68\u001b[0m \u001b[0mtape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_tape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/backprop.py\u001b[0m in \u001b[0;36m_gradient_function\u001b[0;34m(op_name, attr_tuple, num_inputs, inputs, outputs, out_grads, skip_input_indices, forward_pass_name_scope)\u001b[0m\n\u001b[1;32m 146\u001b[0m \u001b[0mgradient_name_scope\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mforward_pass_name_scope\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m\"/\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 147\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname_scope\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgradient_name_scope\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 148\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mgrad_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmock_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mout_grads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 149\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mgrad_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmock_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mout_grads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/math_grad.py\u001b[0m in \u001b[0;36m_AddGrad\u001b[0;34m(op, grad)\u001b[0m\n\u001b[1;32m 1376\u001b[0m \u001b[0mgx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mskip_input_indices\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mgrad\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1377\u001b[0m \u001b[0mgy\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;36m1\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mskip_input_indices\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mgrad\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1378\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_ReduceGradientArgs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1379\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1380\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/math_grad.py\u001b[0m in \u001b[0;36m_ReduceGradientArgs\u001b[0;34m(x, y, gx, gy)\u001b[0m\n\u001b[1;32m 142\u001b[0m \u001b[0mbx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mby\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSmartBroadcastGradientArgs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 143\u001b[0m \u001b[0mgx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_ReduceGradientArg\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 144\u001b[0;31m \u001b[0mgy\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_ReduceGradientArg\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgy\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mby\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 145\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mgx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgy\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 146\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/math_grad.py\u001b[0m in \u001b[0;36m_ReduceGradientArg\u001b[0;34m(grad, shape_axes_must_reduce)\u001b[0m\n\u001b[1;32m 133\u001b[0m \u001b[0;31m# emit extra ops to recover reduced indices for broadcasting.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 134\u001b[0m \u001b[0mgrad\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmath_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreduce_sum\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxes\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkeepdims\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 135\u001b[0;31m \u001b[0mgrad\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0marray_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 136\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mgrad\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 137\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/weak_tensor_ops.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_auto_dtype_conversion_enabled\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 88\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 89\u001b[0m \u001b[0mbound_arguments\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msignature\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 90\u001b[0m \u001b[0mbound_arguments\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_defaults\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/dispatch.py\u001b[0m in \u001b[0;36mop_dispatch_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 1258\u001b[0m \u001b[0;31m# Fallback dispatch system (dispatch v1):\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1259\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1260\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdispatch_target\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1261\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mTypeError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m \u001b[0;31m# Note: convert_to_eager_tensor currently raises a ValueError, not a\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0mA\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mTensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mHas\u001b[0m \u001b[0mthe\u001b[0m \u001b[0msame\u001b[0m \u001b[0mtype\u001b[0m \u001b[0;32mas\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 198\u001b[0m \"\"\"\n\u001b[0;32m--> 199\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgen_array_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 200\u001b[0m \u001b[0mshape_util\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmaybe_set_static_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/gen_array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 8785\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mtld\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_eager\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8786\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 8787\u001b[0;31m _result = pywrap_tfe.TFE_Py_FastPathExecute(\n\u001b[0m\u001b[1;32m 8788\u001b[0m _ctx, \"Reshape\", name, tensor, shape)\n\u001b[1;32m 8789\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ] + }, + { + "source": [ + "# @title Pruning Ratio\n", + "\n", + "from matplotlib import pyplot as plt\n", + "results_df['Pruning Ratio'].plot(kind='hist', bins=20, title='Pruning Ratio')\n", + "plt.gca().spines[['top', 'right',]].set_visible(False)" + ], + "cell_type": "code", + "execution_count": null, + "outputs": [ + { + "output_type": "error", + "ename": "NameError", + "evalue": "name 'results_df' is not defined", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipython-input-459143830.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mmatplotlib\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpyplot\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0mresults_df\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'Pruning Ratio'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkind\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'hist'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbins\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m20\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtitle\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'Pruning Ratio'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgca\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspines\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'top'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'right'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_visible\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mNameError\u001b[0m: name 'results_df' is not defined" + ] + } + ], + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 193 + }, + "id": "46ziXkflTK0P", + "outputId": "99d131cb-1652-4440-ee94-e48ebf2d5a8e" + } + }, + { + "cell_type": "code", + "source": [ + "from keras.datasets import cifar100\n", + "from keras.utils import to_categorical\n", + "from keras.models import load_model\n", + "import pandas as pd\n", + "from keras.pruning import LnPruning\n", + "\n", + "# Load the CIFAR-100 dataset\n", + "(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n", + "\n", + "# Normalize the image data\n", + "x_train = x_train.astype('float32') / 255.0\n", + "x_test = x_test.astype('float32') / 255.0\n", + "\n", + "# Convert labels to one-hot encoding\n", + "num_classes = 100\n", + "y_train = to_categorical(y_train, num_classes)\n", + "y_test = to_categorical(y_test, num_classes)\n", + "\n", + "print(\"CIFAR-100 dataset loaded and preprocessed again.\")\n", + "print(f\"Training data shape: {x_train.shape}\")\n", + "print(f\"Training labels shape: {y_train.shape}\")\n", + "print(f\"Testing data shape: {x_test.shape}\")\n", + "print(f\"Testing labels shape: {y_test.shape}\")\n", + "\n", + "# Define pruning ratios\n", + "pruning_ratios = [i * 0.2 for i in range(1, 4)] + [0.95, 0.97, 0.99]\n", + "\n", + "# Define pruning methods\n", + "pruning_methods = [\"saliency\",\"taylor\"]\n", + "\n", + "# Initialize an empty list to store the results\n", + "pruning_results = []\n", + "\n", + "# Iterate through each pruning method\n", + "for pruning_method in pruning_methods:\n", + " print(f\"\\n--- Pruning Method: {pruning_method} ---\")\n", + " # Iterate through each pruning ratio\n", + " for ratio in pruning_ratios:\n", + " print(f\"Processing pruning ratio: {ratio}\")\n", + "\n", + " # Load the saved model\n", + " loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + " # Apply pruning\n", + " try:\n", + " if pruning_method in [\"saliency\", \"taylor\"]:\n", + " # Use a smaller subset for gradient-based methods to save time\n", + " dataset_subset = (x_train, y_train)\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method, dataset=dataset_subset, loss_fn=loaded_model.loss)\n", + " elif isinstance(pruning_method, str):\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", + " else:\n", + " # Assume it's a PruningMethod instance like LnPruning\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", + "\n", + "\n", + " print(f\" Pruning successful. Final sparsity: {stats['final_sparsity']:.4f}\")\n", + "\n", + " # Evaluate the pruned model\n", + " loss, accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", + "\n", + " # Append the results\n", + " pruning_results.append({\n", + " 'Pruning Ratio': ratio,\n", + " 'Pruning Method': str(pruning_method), # Convert method object to string\n", + " 'Test Loss': loss,\n", + " 'Test Accuracy': accuracy\n", + " })\n", + " print(f\" Evaluation complete. Test Loss: {loss:.4f}, Test Accuracy: {accuracy:.4f}\")\n", + "\n", + " except ValueError as e:\n", + " print(f\" Skipping pruning for ratio {ratio} with method '{pruning_method}' due to error: {e}\")\n", + " # Optionally store a record indicating failure\n", + " pruning_results.append({\n", + " 'Pruning Ratio': ratio,\n", + " 'Pruning Method': str(pruning_method), # Convert method object to string\n", + " 'Test Loss': None,\n", + " 'Test Accuracy': None,\n", + " 'Error': str(e)\n", + " })\n", + "\n", + "\n", + "# Convert results to a pandas DataFrame for easy display and analysis\n", + "results_df = pd.DataFrame(pruning_results)\n", + "\n", + "# Display the results table\n", + "print(\"\\n--- Pruning Evaluation Results ---\")\n", + "display(results_df)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 512 + }, + "id": "tRuPuPaydqx4", + "outputId": "b5e2d516-95ac-4472-b28d-bcd834626794" + }, + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "CIFAR-100 dataset loaded and preprocessed again.\n", + "Training data shape: (50000, 32, 32, 3)\n", + "Training labels shape: (50000, 100)\n", + "Testing data shape: (10000, 32, 32, 3)\n", + "Testing labels shape: (10000, 100)\n", + "\n", + "--- Pruning Method: saliency ---\n", + "Processing pruning ratio: 0.2\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "Computing saliency gradients: 2%|▏ | 18/782 [00:09<06:30, 1.95batch/s, batches=18]\n" + ] + }, + { + "output_type": "error", + "ename": "KeyboardInterrupt", + "evalue": "", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipython-input-137121260.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 47\u001b[0m \u001b[0;31m# Use a smaller subset for gradient-based methods to save time\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0mdataset_subset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mx_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 49\u001b[0;31m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdataset_subset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 50\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/model.py\u001b[0m in \u001b[0;36mprune\u001b[0;34m(self, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 529\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 530\u001b[0m \u001b[0;31m# Use direct parameter approach\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 531\u001b[0;31m stats = apply_pruning_to_model(\n\u001b[0m\u001b[1;32m 532\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_model\u001b[0;34m(model, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 477\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlist_of_sublayers\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 478\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshould_prune_layer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayers_to_prune\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 479\u001b[0;31m if apply_pruning_to_layer(\n\u001b[0m\u001b[1;32m 480\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 481\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_layer\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 346\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0;31m# Use the new get_pruning_mask function for consistency\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 348\u001b[0;31m mask = get_pruning_mask(\n\u001b[0m\u001b[1;32m 349\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 350\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mget_pruning_mask\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, **kwargs)\u001b[0m\n\u001b[1;32m 248\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0;31m# Compute mask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 250\u001b[0;31m \u001b[0mmask\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpruning_method\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmask_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 251\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 252\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mcompute_mask\u001b[0;34m(self, weights, sparsity_ratio, **kwargs)\u001b[0m\n\u001b[1;32m 430\u001b[0m \u001b[0;31m# Compute saliency scores (pass validated loss_fn in kwargs)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 431\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'loss_fn'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 432\u001b[0;31m \u001b[0msaliency_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_saliency_scores\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 433\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 434\u001b[0m \u001b[0mflat_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msaliency_scores\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_saliency_scores\u001b[0;34m(self, weights, **kwargs)\u001b[0m\n\u001b[1;32m 547\u001b[0m \u001b[0;31m# Backend-specific gradient computation\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 548\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mbackend_name\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"tensorflow\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 549\u001b[0;31m \u001b[0mbatch_avg_gradients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_tensorflow_gradients\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 550\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mbackend_name\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"jax\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 551\u001b[0m \u001b[0mbatch_avg_gradients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_jax_gradients\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_tensorflow_gradients\u001b[0;34m(self, target_weight_var, model, batch_x, batch_y, loss_fn)\u001b[0m\n\u001b[1;32m 453\u001b[0m \u001b[0mtf_var\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_value\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'_value'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 454\u001b[0m \u001b[0mtape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwatch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf_var\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 455\u001b[0;31m \u001b[0mpredictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 456\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcallable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_fn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 457\u001b[0m \u001b[0mloss_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpredictions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training, mask, **kwargs)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmask\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_keras_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m outputs = self._run_through_graph(\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m operation_fn=lambda op: operation_fn(\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/function.py\u001b[0m in \u001b[0;36m_run_through_graph\u001b[0;34m(self, inputs, operation_fn, call_fn)\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0moperation\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_operation_for_node\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0mop\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moperation_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moperation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# Update tensor_dict.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 642\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 643\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 644\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moperation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 645\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 646\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcall\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training, mask, **kwargs)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmask\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_keras_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m outputs = self._run_through_graph(\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m operation_fn=lambda op: operation_fn(\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/function.py\u001b[0m in \u001b[0;36m_run_through_graph\u001b[0;34m(self, inputs, operation_fn, call_fn)\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0moperation\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_operation_for_node\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0mop\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moperation_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moperation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# Update tensor_dict.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 642\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 643\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 644\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moperation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 645\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 646\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcall\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/layers/convolutional/base_conv.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 257\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0mbias_shape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfilters\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrank\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 259\u001b[0;31m \u001b[0mbias\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbias_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 260\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbias\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 261\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/numpy.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(x, newshape)\u001b[0m\n\u001b[1;32m 5365\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0many_symbolic_tensors\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5366\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mReshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msymbolic_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 5367\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5368\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5369\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/backend/tensorflow/numpy.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(x, newshape)\u001b[0m\n\u001b[1;32m 2168\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2169\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2170\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2171\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2172\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/weak_tensor_ops.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_auto_dtype_conversion_enabled\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 88\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 89\u001b[0m \u001b[0mbound_arguments\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msignature\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 90\u001b[0m \u001b[0mbound_arguments\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_defaults\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/dispatch.py\u001b[0m in \u001b[0;36mop_dispatch_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 1258\u001b[0m \u001b[0;31m# Fallback dispatch system (dispatch v1):\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1259\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1260\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdispatch_target\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1261\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mTypeError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m \u001b[0;31m# Note: convert_to_eager_tensor currently raises a ValueError, not a\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0mA\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mTensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mHas\u001b[0m \u001b[0mthe\u001b[0m \u001b[0msame\u001b[0m \u001b[0mtype\u001b[0m \u001b[0;32mas\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 198\u001b[0m \"\"\"\n\u001b[0;32m--> 199\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgen_array_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 200\u001b[0m \u001b[0mshape_util\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmaybe_set_static_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/gen_array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 8793\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8794\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 8795\u001b[0;31m return reshape_eager_fallback(\n\u001b[0m\u001b[1;32m 8796\u001b[0m tensor, shape, name=name, ctx=_ctx)\n\u001b[1;32m 8797\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0m_core\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_SymbolicException\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/gen_array_ops.py\u001b[0m in \u001b[0;36mreshape_eager_fallback\u001b[0;34m(tensor, shape, name, ctx)\u001b[0m\n\u001b[1;32m 8815\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mreshape_eager_fallback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mAnnotated\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTV_Reshape_T\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mAnnotated\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTV_Reshape_Tshape\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mAnnotated\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTV_Reshape_T\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8816\u001b[0m \u001b[0m_attr_T\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_execute\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs_to_matching_eager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 8817\u001b[0;31m \u001b[0m_attr_Tshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_execute\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs_to_matching_eager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0m_dtypes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mint32\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_dtypes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mint64\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_dtypes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mint32\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8818\u001b[0m \u001b[0m_inputs_flat\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8819\u001b[0m \u001b[0m_attrs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m\"T\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_attr_T\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"Tshape\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_attr_Tshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/execute.py\u001b[0m in \u001b[0;36margs_to_matching_eager\u001b[0;34m(***failed resolving arguments***)\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0;31m# not list allowed dtypes, in which case we should skip this.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 250\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdtype\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mallowed_dtypes\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 251\u001b[0;31m \u001b[0mtensor\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtensor_conversion_registry\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconvert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 252\u001b[0m \u001b[0;31m# If we did not match an allowed dtype, try again with the default\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 253\u001b[0m \u001b[0;31m# dtype. This could be because we have an empty tensor and thus we\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor_conversion_registry.py\u001b[0m in \u001b[0;36mconvert\u001b[0;34m(value, dtype, name, as_ref, preferred_dtype, accepted_result_types)\u001b[0m\n\u001b[1;32m 232\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 234\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconversion_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mas_ref\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 235\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 236\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNotImplemented\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/constant_tensor_conversion.py\u001b[0m in \u001b[0;36m_constant_tensor_conversion_function\u001b[0;34m(v, dtype, name, as_ref)\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0m_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 29\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconstant_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconstant\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 30\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/weak_tensor_ops.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 141\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_auto_dtype_conversion_enabled\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 142\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 143\u001b[0m \u001b[0mbound_arguments\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msignature\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 144\u001b[0m \u001b[0mbound_arguments\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_defaults\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/constant_op.py\u001b[0m in \u001b[0;36mconstant\u001b[0;34m(value, dtype, shape, name)\u001b[0m\n\u001b[1;32m 274\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcalled\u001b[0m \u001b[0mon\u001b[0m \u001b[0ma\u001b[0m \u001b[0msymbolic\u001b[0m \u001b[0mtensor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 275\u001b[0m \"\"\"\n\u001b[0;32m--> 276\u001b[0;31m return _constant_impl(value, dtype, shape, name, verify_shape=False,\n\u001b[0m\u001b[1;32m 277\u001b[0m allow_broadcast=True)\n\u001b[1;32m 278\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/constant_op.py\u001b[0m in \u001b[0;36m_constant_impl\u001b[0;34m(value, dtype, shape, name, verify_shape, allow_broadcast)\u001b[0m\n\u001b[1;32m 287\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtrace\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTrace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"tf.constant\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 288\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_constant_eager_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mctx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverify_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 289\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_constant_eager_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mctx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverify_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 290\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 291\u001b[0m const_tensor = ops._create_graph_constant( # pylint: disable=protected-access\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/constant_op.py\u001b[0m in \u001b[0;36m_constant_eager_impl\u001b[0;34m(ctx, value, dtype, shape, verify_shape)\u001b[0m\n\u001b[1;32m 299\u001b[0m ) -> ops._EagerTensorBase:\n\u001b[1;32m 300\u001b[0m \u001b[0;34m\"\"\"Creates a constant on the current device.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 301\u001b[0;31m \u001b[0mt\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconvert_to_eager_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 302\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshape\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 303\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/constant_op.py\u001b[0m in \u001b[0;36mconvert_to_eager_tensor\u001b[0;34m(value, ctx, dtype)\u001b[0m\n\u001b[1;32m 106\u001b[0m \u001b[0mdtype\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdtypes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_dtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_datatype_enum\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 107\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mensure_initialized\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 108\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mEagerTensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 109\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 110\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "E4IAQjyoxi_V" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "%pwd\n", + "%cd /content/keras_repo/\n", + "!pip install -e ." + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 637 + }, + "id": "ggNuv7Gpis0U", + "outputId": "fea8224f-2eb5-40cd-beb5-c50fdaf658ee" + }, + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "/content/keras_repo\n", + "Obtaining file:///content/keras_repo\n", + " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", + " Checking if build backend supports build_editable ... \u001b[?25l\u001b[?25hdone\n", + " Getting requirements to build editable ... \u001b[?25l\u001b[?25hdone\n", + " Preparing editable metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", + "Requirement already satisfied: absl-py in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (1.4.0)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (2.0.2)\n", + "Requirement already satisfied: rich in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (13.9.4)\n", + "Requirement already satisfied: namex in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.1.0)\n", + "Requirement already satisfied: h5py in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (3.14.0)\n", + "Requirement already satisfied: optree in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.17.0)\n", + "Requirement already satisfied: ml-dtypes in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.5.3)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (25.0)\n", + "Requirement already satisfied: typing-extensions>=4.6.0 in /usr/local/lib/python3.11/dist-packages (from optree->keras==3.11.0) (4.14.1)\n", + "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.11/dist-packages (from rich->keras==3.11.0) (4.0.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.11/dist-packages (from rich->keras==3.11.0) (2.19.2)\n", + "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.11/dist-packages (from markdown-it-py>=2.2.0->rich->keras==3.11.0) (0.1.2)\n", + "Building wheels for collected packages: keras\n", + " Building editable for keras (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for keras: filename=keras-3.11.0-0.editable-py3-none-any.whl size=9410 sha256=14a9f79fa98ab9a13b5339f11ce408f6e2badd5a481b5d1057488251bd657e3e\n", + " Stored in directory: /tmp/pip-ephem-wheel-cache-tniy7f4x/wheels/09/7a/d4/6dbe98c57884e68eba731115af18ec3a7f493640582bacb80f\n", + "Successfully built keras\n", + "Installing collected packages: keras\n", + " Attempting uninstall: keras\n", + " Found existing installation: keras 3.11.0\n", + " Uninstalling keras-3.11.0:\n", + " Successfully uninstalled keras-3.11.0\n", + "Successfully installed keras-3.11.0\n" + ] + }, + { + "output_type": "display_data", + "data": { + "application/vnd.colab-display-data+json": { + "pip_warning": { + "packages": [ + "keras" + ] + }, + "id": "9253b25bc602444b8c1ab91268b664d8" + } + }, + "metadata": {} + } + ] + }, + { + "cell_type": "code", + "source": [ + "from keras.datasets import cifar100\n", + "from keras.utils import to_categorical\n", + "from keras.models import load_model\n", + "import pandas as pd\n", + "from keras.pruning import LnPruning\n", + "\n", + "# Load the CIFAR-100 dataset\n", + "(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n", + "\n", + "# Normalize the image data\n", + "x_train = x_train.astype('float32') / 255.0\n", + "x_test = x_test.astype('float32') / 255.0\n", + "\n", + "# Convert labels to one-hot encoding\n", + "num_classes = 100\n", + "y_train = to_categorical(y_train, num_classes)\n", + "y_test = to_categorical(y_test, num_classes)\n", + "\n", + "print(\"CIFAR-100 dataset loaded and preprocessed again.\")\n", + "print(f\"Training data shape: {x_train.shape}\")\n", + "print(f\"Training labels shape: {y_train.shape}\")\n", + "print(f\"Testing data shape: {x_test.shape}\")\n", + "print(f\"Testing labels shape: {y_test.shape}\")\n", + "\n", + "# Define pruning ratios\n", + "pruning_ratios = [i * 0.2 for i in range(1, 4)] + [0.95, 0.97, 0.99]\n", + "pruning_ratios = [.3]\n", + "\n", + "# Define pruning methods\n", + "pruning_methods = [\"saliency\",\"taylor\"]\n", + "\n", + "# Initialize an empty list to store the results\n", + "pruning_results = []\n", + "\n", + "# Iterate through each pruning method\n", + "for pruning_method in pruning_methods:\n", + " print(f\"\\n--- Pruning Method: {pruning_method} ---\")\n", + " # Iterate through each pruning ratio\n", + " for ratio in pruning_ratios:\n", + " print(f\"Processing pruning ratio: {ratio}\")\n", + "\n", + " # Load the saved model\n", + " loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", + "\n", + " # Apply pruning\n", + " try:\n", + " if pruning_method in [\"saliency\", \"taylor\"]:\n", + " # Use a smaller subset for gradient-based methods to save time\n", + " dataset_subset = (x_train[:256], y_train[:256])\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method, dataset=dataset_subset, loss_fn=loaded_model.loss)\n", + " elif isinstance(pruning_method, str):\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", + " else:\n", + " # Assume it's a PruningMethod instance like LnPruning\n", + " stats = loaded_model.prune(sparsity=ratio, method=pruning_method, pruning_batch_size=32)\n", + "\n", + "\n", + " print(f\" Pruning successful. Final sparsity: {stats['final_sparsity']:.4f}\")\n", + "\n", + " # Evaluate the pruned model\n", + " loss, accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", + "\n", + " # Append the results\n", + " pruning_results.append({\n", + " 'Pruning Ratio': ratio,\n", + " 'Pruning Method': str(pruning_method), # Convert method object to string\n", + " 'Test Loss': loss,\n", + " 'Test Accuracy': accuracy\n", + " })\n", + " print(f\" Evaluation complete. Test Loss: {loss:.4f}, Test Accuracy: {accuracy:.4f}\")\n", + "\n", + " except ValueError as e:\n", + " print(f\" Skipping pruning for ratio {ratio} with method '{pruning_method}' due to error: {e}\")\n", + " # Optionally store a record indicating failure\n", + " pruning_results.append({\n", + " 'Pruning Ratio': ratio,\n", + " 'Pruning Method': str(pruning_method), # Convert method object to string\n", + " 'Test Loss': None,\n", + " 'Test Accuracy': None,\n", + " 'Error': str(e)\n", + " })\n", + "\n", + "\n", + "# Convert results to a pandas DataFrame for easy display and analysis\n", + "results_df = pd.DataFrame(pruning_results)\n", + "\n", + "# Display the results table\n", + "print(\"\\n--- Pruning Evaluation Results ---\")\n", + "display(results_df)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "id": "jJnwc8i2xptV", + "outputId": "957c279e-5ffa-4587-dc4b-53dff728c01e" + }, + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "CIFAR-100 dataset loaded and preprocessed again.\n", + "Training data shape: (50000, 32, 32, 3)\n", + "Training labels shape: (50000, 100)\n", + "Testing data shape: (10000, 32, 32, 3)\n", + "Testing labels shape: (10000, 100)\n", + "\n", + "--- Pruning Method: saliency ---\n", + "Processing pruning ratio: 0.3\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "WARNING:tensorflow:5 out of the last 5 calls to .compute_gradients_cached at 0x7dd5a3b63b00> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n", + "WARNING:tensorflow:6 out of the last 6 calls to .compute_gradients_cached at 0x7dd5a3b63b00> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" + ] + }, + { + "output_type": "error", + "ename": "KeyboardInterrupt", + "evalue": "", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipython-input-1493771993.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;31m# Use a smaller subset for gradient-based methods to save time\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0mdataset_subset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mx_train\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m256\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m256\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdataset_subset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/model.py\u001b[0m in \u001b[0;36mprune\u001b[0;34m(self, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 529\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 530\u001b[0m \u001b[0;31m# Use direct parameter approach\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 531\u001b[0;31m stats = apply_pruning_to_model(\n\u001b[0m\u001b[1;32m 532\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_model\u001b[0;34m(model, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 477\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlist_of_sublayers\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 478\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshould_prune_layer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayers_to_prune\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 479\u001b[0;31m if apply_pruning_to_layer(\n\u001b[0m\u001b[1;32m 480\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 481\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_layer\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 346\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0;31m# Use the new get_pruning_mask function for consistency\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 348\u001b[0;31m mask = get_pruning_mask(\n\u001b[0m\u001b[1;32m 349\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 350\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mget_pruning_mask\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, **kwargs)\u001b[0m\n\u001b[1;32m 248\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0;31m# Compute mask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 250\u001b[0;31m \u001b[0mmask\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpruning_method\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmask_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 251\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 252\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mcompute_mask\u001b[0;34m(self, weights, sparsity_ratio, **kwargs)\u001b[0m\n\u001b[1;32m 1007\u001b[0m \u001b[0;31m# Compute saliency scores (pass validated loss_fn in kwargs)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1008\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'loss_fn'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1009\u001b[0;31m \u001b[0msaliency_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcalculate_scores\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights_tensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1010\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1011\u001b[0m \u001b[0mflat_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msaliency_scores\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mcalculate_scores\u001b[0;34m(self, weights, **kwargs)\u001b[0m\n\u001b[1;32m 1027\u001b[0m \"\"\"\n\u001b[1;32m 1028\u001b[0m \u001b[0;31m# Use efficient gradient computation (matches model.fit performance)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1029\u001b[0;31m \u001b[0mgradients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_gradients\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1030\u001b[0m \u001b[0;31m# JAXPruner's simple and clean saliency calculation\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1031\u001b[0m \u001b[0msaliency_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mabs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mgradients\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_gradients\u001b[0;34m(self, weights, **kwargs)\u001b[0m\n\u001b[1;32m 294\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 295\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mbackend_name\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"tensorflow\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 296\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_gradients_tf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 297\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mbackend_name\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"jax\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 298\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_gradients_jax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_gradients_tf\u001b[0;34m(self, variable, model, loss_fn, dataset)\u001b[0m\n\u001b[1;32m 337\u001b[0m \u001b[0;31m# Use cached tf.function following standard Keras pattern\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 338\u001b[0m \u001b[0mgradient_fn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_get_tf_gradient_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 339\u001b[0;31m return gradient_fn(\n\u001b[0m\u001b[1;32m 340\u001b[0m \u001b[0mtrainable_variables\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 341\u001b[0m \u001b[0mtf_dataset\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 831\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 832\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mOptionalXlaContext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_jit_compile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 833\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 834\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 835\u001b[0m \u001b[0mnew_tracing_count\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental_get_tracing_count\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36m_call\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 876\u001b[0m \u001b[0;31m# In this case we have not created variables on the first call. So we can\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 877\u001b[0m \u001b[0;31m# run the first trace but we should fail if variables are created.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 878\u001b[0;31m results = tracing_compilation.call_function(\n\u001b[0m\u001b[1;32m 879\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_variable_creation_config\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 880\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36mcall_function\u001b[0;34m(args, kwargs, tracing_options)\u001b[0m\n\u001b[1;32m 130\u001b[0m \u001b[0margs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0margs\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0margs\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 131\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 132\u001b[0;31m function = trace_function(\n\u001b[0m\u001b[1;32m 133\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtracing_options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 134\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36mtrace_function\u001b[0;34m(args, kwargs, tracing_options)\u001b[0m\n\u001b[1;32m 176\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 177\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 178\u001b[0;31m concrete_function = _maybe_define_function(\n\u001b[0m\u001b[1;32m 179\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 180\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36m_maybe_define_function\u001b[0;34m(args, kwargs, tracing_options)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 282\u001b[0m \u001b[0mtarget_func_type\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlookup_func_type\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 283\u001b[0;31m concrete_function = _create_concrete_function(\n\u001b[0m\u001b[1;32m 284\u001b[0m \u001b[0mtarget_func_type\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlookup_func_context\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfunc_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 285\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36m_create_concrete_function\u001b[0;34m(function_type, type_context, func_graph, tracing_options)\u001b[0m\n\u001b[1;32m 308\u001b[0m \u001b[0mattributes_lib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDISABLE_ACD\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 309\u001b[0m )\n\u001b[0;32m--> 310\u001b[0;31m traced_func_graph = func_graph_module.func_graph_from_py_func(\n\u001b[0m\u001b[1;32m 311\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 312\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython_function\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/func_graph.py\u001b[0m in \u001b[0;36mfunc_graph_from_py_func\u001b[0;34m(name, python_func, args, kwargs, signature, func_graph, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, create_placeholders)\u001b[0m\n\u001b[1;32m 1058\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1059\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moriginal_func\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_decorator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0munwrap\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpython_func\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1060\u001b[0;31m \u001b[0mfunc_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpython_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mfunc_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mfunc_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1061\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1062\u001b[0m \u001b[0;31m# invariant: `func_outputs` contains only Tensors, CompositeTensors,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36mwrapped_fn\u001b[0;34m(*args, **kwds)\u001b[0m\n\u001b[1;32m 597\u001b[0m \u001b[0;31m# the function a weak reference to itself to avoid a reference cycle.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 598\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mOptionalXlaContext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcompile_with_xla\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 599\u001b[0;31m \u001b[0mout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mweak_wrapped_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__wrapped__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 600\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 601\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/autograph_util.py\u001b[0m in \u001b[0;36mautograph_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0;34m\"\"\"Calls a converted version of original_func.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m return api.converted_call(\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0moriginal_func\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36mconverted_call\u001b[0;34m(f, args, kwargs, caller_fn_scope, options)\u001b[0m\n\u001b[1;32m 437\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 438\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 439\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconverted_f\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0meffective_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 440\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 441\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconverted_f\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0meffective_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mtf__compute_gradients_cached\u001b[0;34m(trainable_variables, tf_dataset, model, loss_fn, target_var_index)\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0mgrad\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mUndefined\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'grad'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0mbatch_gradients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mUndefined\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'batch_gradients'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 66\u001b[0;31m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfor_stmt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf_dataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloop_body_1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mget_state_2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mset_state_2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m'total_samples'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m'iterate_names'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'(batch_x, batch_y)'\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 67\u001b[0m \u001b[0maveraged_gradients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mif_exp\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtotal_samples\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mlambda\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtotal_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mlambda\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'total_samples > 0'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mg\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0maccumulated_gradients\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 68\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/operators/control_flow.py\u001b[0m in \u001b[0;36mfor_stmt\u001b[0;34m(iter_, extra_test, body, get_state, set_state, symbol_names, opts)\u001b[0m\n\u001b[1;32m 447\u001b[0m \u001b[0mfor_fn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_tf_distributed_iterable_for_stmt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 448\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 449\u001b[0;31m \u001b[0mfor_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miter_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mextra_test\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbody\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mget_state\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mset_state\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msymbol_names\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mopts\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 450\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 451\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/dataset_autograph.py\u001b[0m in \u001b[0;36m_tf_ag_dataset_for_stmt\u001b[0;34m(ds, extra_test, body, get_state, set_state, symbol_names, opts)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnew_reduce_state\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 118\u001b[0;31m \u001b[0mds\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_general_purpose_scan\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minit_vars\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mscan_body\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 119\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mextra_test\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0mds\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mds\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtake_while_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtake_while\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtake_while_predicate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/dataset_autograph.py\u001b[0m in \u001b[0;36m_general_purpose_scan\u001b[0;34m(ds, init_state, body)\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;31m# pylint: disable=g-import-not-at-top,protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mscan_op\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 45\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mscan_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_ScanDataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minit_state\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbody\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0muse_default_device\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 46\u001b[0m \u001b[0;31m# pylint: enable=g-import-not-at-top,protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 47\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/scan_op.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, input_dataset, initial_state, scan_func, use_default_device, name)\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0mneed_to_rerun\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 57\u001b[0;31m wrapped_func = structured_function.StructuredFunctionWrapper(\n\u001b[0m\u001b[1;32m 58\u001b[0m \u001b[0mscan_func\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 59\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_transformation_name\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/structured_function.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, func, transformation_name, dataset, input_classes, input_shapes, input_types, input_structure, add_to_graph, use_legacy_function, defun_kwargs)\u001b[0m\n\u001b[1;32m 263\u001b[0m \u001b[0mfn_factory\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrace_tf_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdefun_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 264\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 265\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_function\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfn_factory\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 266\u001b[0m \u001b[0;31m# There is no graph to add in eager mode.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 267\u001b[0m \u001b[0madd_to_graph\u001b[0m \u001b[0;34m&=\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecuting_eagerly\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36mget_concrete_function\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1254\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_concrete_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1255\u001b[0m \u001b[0;31m# Implements PolymorphicFunction.get_concrete_function.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1256\u001b[0;31m \u001b[0mconcrete\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_concrete_function_garbage_collected\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1257\u001b[0m \u001b[0mconcrete\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_garbage_collector\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelease\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1258\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mconcrete\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36m_get_concrete_function_garbage_collected\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1224\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_variable_creation_config\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1225\u001b[0m \u001b[0minitializers\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1226\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_initialize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0madd_initializers_to\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minitializers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1227\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_initialize_uninitialized_variables\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minitializers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1228\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36m_initialize\u001b[0;34m(self, args, kwds, add_initializers_to)\u001b[0m\n\u001b[1;32m 694\u001b[0m )\n\u001b[1;32m 695\u001b[0m \u001b[0;31m# Force the definition of the function for these arguments\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 696\u001b[0;31m self._concrete_variable_creation_fn = tracing_compilation.trace_function(\n\u001b[0m\u001b[1;32m 697\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_variable_creation_config\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 698\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36mtrace_function\u001b[0;34m(args, kwargs, tracing_options)\u001b[0m\n\u001b[1;32m 176\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 177\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 178\u001b[0;31m concrete_function = _maybe_define_function(\n\u001b[0m\u001b[1;32m 179\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 180\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36m_maybe_define_function\u001b[0;34m(args, kwargs, tracing_options)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 282\u001b[0m \u001b[0mtarget_func_type\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlookup_func_type\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 283\u001b[0;31m concrete_function = _create_concrete_function(\n\u001b[0m\u001b[1;32m 284\u001b[0m \u001b[0mtarget_func_type\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlookup_func_context\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfunc_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 285\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36m_create_concrete_function\u001b[0;34m(function_type, type_context, func_graph, tracing_options)\u001b[0m\n\u001b[1;32m 308\u001b[0m \u001b[0mattributes_lib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDISABLE_ACD\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 309\u001b[0m )\n\u001b[0;32m--> 310\u001b[0;31m traced_func_graph = func_graph_module.func_graph_from_py_func(\n\u001b[0m\u001b[1;32m 311\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 312\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython_function\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/func_graph.py\u001b[0m in \u001b[0;36mfunc_graph_from_py_func\u001b[0;34m(name, python_func, args, kwargs, signature, func_graph, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, create_placeholders)\u001b[0m\n\u001b[1;32m 1058\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1059\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moriginal_func\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_decorator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0munwrap\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpython_func\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1060\u001b[0;31m \u001b[0mfunc_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpython_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mfunc_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mfunc_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1061\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1062\u001b[0m \u001b[0;31m# invariant: `func_outputs` contains only Tensors, CompositeTensors,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36mwrapped_fn\u001b[0;34m(*args, **kwds)\u001b[0m\n\u001b[1;32m 597\u001b[0m \u001b[0;31m# the function a weak reference to itself to avoid a reference cycle.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 598\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mOptionalXlaContext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcompile_with_xla\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 599\u001b[0;31m \u001b[0mout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mweak_wrapped_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__wrapped__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 600\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 601\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/structured_function.py\u001b[0m in \u001b[0;36mwrapped_fn\u001b[0;34m(*args)\u001b[0m\n\u001b[1;32m 229\u001b[0m \u001b[0;31m# Note: wrapper_helper will apply autograph based on context.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 230\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapped_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pylint: disable=missing-docstring\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 231\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mwrapper_helper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 232\u001b[0m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstructure\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_tensor_list\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_output_structure\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mret\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconvert_to_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mt\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mret\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/structured_function.py\u001b[0m in \u001b[0;36mwrapper_helper\u001b[0;34m(*args)\u001b[0m\n\u001b[1;32m 159\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0m_should_unpack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnested_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0mnested_args\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mnested_args\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 161\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mautograph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtf_convert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_func\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mag_ctx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mnested_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 162\u001b[0m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvariable_utils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconvert_variables_to_tensors\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mret\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0m_should_pack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mret\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 688\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 689\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mconversion_ctx\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 690\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconverted_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 691\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pylint:disable=broad-except\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 692\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'ag_error_metadata'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36mconverted_call\u001b[0;34m(f, args, kwargs, caller_fn_scope, options)\u001b[0m\n\u001b[1;32m 375\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 376\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muser_requested\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mconversion\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_allowlisted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 377\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_call_unconverted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 378\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 379\u001b[0m \u001b[0;31m# internal_convert_user_code is for example turned off when issuing a dynamic\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36m_call_unconverted\u001b[0;34m(f, args, kwargs, options, update_cache)\u001b[0m\n\u001b[1;32m 457\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 458\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 459\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 460\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 461\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/dataset_autograph.py\u001b[0m in \u001b[0;36mscan_body\u001b[0;34m(scan_state, scan_inputs)\u001b[0m\n\u001b[1;32m 102\u001b[0m \u001b[0;31m# TODO(mdan): the optimizer should be able to remove an invariant cond?\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[0mextra_cond\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mconstant_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconstant\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# dummy value, unused\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 104\u001b[0;31m \u001b[0mnew_loop_vars\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmain_path\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 106\u001b[0m \u001b[0mscan_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnew_loop_vars\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mextra_cond\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/dataset_autograph.py\u001b[0m in \u001b[0;36mmain_path\u001b[0;34m()\u001b[0m\n\u001b[1;32m 84\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 85\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mmain_path\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 86\u001b[0;31m \u001b[0mbody\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 87\u001b[0m \u001b[0mnew_loop_vars\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_state\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 88\u001b[0m control_flow.verify_tf_loop_vars(\n", + "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mloop_body_1\u001b[0;34m(itr_1)\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_y\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mitr_1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGradientTape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtape\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 26\u001b[0;31m \u001b[0mpredictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconverted_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_x\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfscope\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 27\u001b[0m \u001b[0mloss_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconverted_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_fn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_y\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpredictions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfscope\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconverted_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreduce_mean\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_val\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfscope\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36mconverted_call\u001b[0;34m(f, args, kwargs, caller_fn_scope, options)\u001b[0m\n\u001b[1;32m 329\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mconversion\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_in_allowlist_cache\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 330\u001b[0m \u001b[0mlogging\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlog\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'Allowlisted %s: from cache'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 331\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_call_unconverted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 332\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 333\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mag_ctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontrol_status_ctx\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstatus\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mag_ctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mStatus\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDISABLED\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36m_call_unconverted\u001b[0;34m(f, args, kwargs, options, update_cache)\u001b[0m\n\u001b[1;32m 457\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 458\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 459\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 460\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 461\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training, mask, **kwargs)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmask\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_keras_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m outputs = self._run_through_graph(\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m operation_fn=lambda op: operation_fn(\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/function.py\u001b[0m in \u001b[0;36m_run_through_graph\u001b[0;34m(self, inputs, operation_fn, call_fn)\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0moperation\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_operation_for_node\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0mop\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moperation_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moperation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# Update tensor_dict.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 642\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 643\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 644\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moperation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 645\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 646\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcall\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training, mask, **kwargs)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmask\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_keras_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m outputs = self._run_through_graph(\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m operation_fn=lambda op: operation_fn(\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/function.py\u001b[0m in \u001b[0;36m_run_through_graph\u001b[0;34m(self, inputs, operation_fn, call_fn)\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0moperation\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_operation_for_node\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0mop\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moperation_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moperation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# Update tensor_dict.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 642\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 643\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 644\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moperation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 645\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 646\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcall\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/layers/convolutional/base_conv.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 257\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0mbias_shape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfilters\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrank\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 259\u001b[0;31m \u001b[0mbias\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbias_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 260\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbias\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 261\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/ops/numpy.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(x, newshape)\u001b[0m\n\u001b[1;32m 5365\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0many_symbolic_tensors\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5366\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mReshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msymbolic_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 5367\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5368\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5369\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/backend/tensorflow/numpy.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(x, newshape)\u001b[0m\n\u001b[1;32m 2168\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2169\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2170\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2171\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2172\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/weak_tensor_ops.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_auto_dtype_conversion_enabled\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 88\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 89\u001b[0m \u001b[0mbound_arguments\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msignature\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 90\u001b[0m \u001b[0mbound_arguments\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_defaults\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/dispatch.py\u001b[0m in \u001b[0;36mop_dispatch_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 1258\u001b[0m \u001b[0;31m# Fallback dispatch system (dispatch v1):\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1259\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1260\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdispatch_target\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1261\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mTypeError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m \u001b[0;31m# Note: convert_to_eager_tensor currently raises a ValueError, not a\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0mA\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mTensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mHas\u001b[0m \u001b[0mthe\u001b[0m \u001b[0msame\u001b[0m \u001b[0mtype\u001b[0m \u001b[0;32mas\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 198\u001b[0m \"\"\"\n\u001b[0;32m--> 199\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgen_array_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 200\u001b[0m \u001b[0mshape_util\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmaybe_set_static_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/gen_array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 8798\u001b[0m \u001b[0;32mpass\u001b[0m \u001b[0;31m# Add nodes to the TensorFlow graph.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8799\u001b[0m \u001b[0;31m# Add nodes to the TensorFlow graph.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 8800\u001b[0;31m _, _, _op, _outputs = _op_def_library._apply_op_helper(\n\u001b[0m\u001b[1;32m 8801\u001b[0m \"Reshape\", tensor=tensor, shape=shape, name=name)\n\u001b[1;32m 8802\u001b[0m \u001b[0m_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_outputs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/op_def_library.py\u001b[0m in \u001b[0;36m_apply_op_helper\u001b[0;34m(op_type_name, name, **keywords)\u001b[0m\n\u001b[1;32m 776\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname_scope\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mscope\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 777\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfallback\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 778\u001b[0;31m _ExtractInputsAndAttrs(op_type_name, op_def, allowed_list_attr_map,\n\u001b[0m\u001b[1;32m 779\u001b[0m \u001b[0mkeywords\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdefault_type_attr_map\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mattrs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 780\u001b[0m input_types)\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/op_def_library.py\u001b[0m in \u001b[0;36m_ExtractInputsAndAttrs\u001b[0;34m(op_type_name, op_def, allowed_list_attr_map, keywords, default_type_attr_map, attrs, inputs, input_types)\u001b[0m\n\u001b[1;32m 549\u001b[0m preferred_dtype=default_dtype)\n\u001b[1;32m 550\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 551\u001b[0;31m values = ops.convert_to_tensor(\n\u001b[0m\u001b[1;32m 552\u001b[0m \u001b[0mvalues\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 553\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minput_arg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/profiler/trace.py\u001b[0m in \u001b[0;36mwrapped\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mTrace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrace_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mtrace_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mwrapped\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/ops.py\u001b[0m in \u001b[0;36mconvert_to_tensor\u001b[0;34m(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)\u001b[0m\n\u001b[1;32m 734\u001b[0m \u001b[0;31m# TODO(b/142518781): Fix all call-sites and remove redundant arg\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 735\u001b[0m \u001b[0mpreferred_dtype\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpreferred_dtype\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mdtype_hint\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 736\u001b[0;31m return tensor_conversion_registry.convert(\n\u001b[0m\u001b[1;32m 737\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpreferred_dtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maccepted_result_types\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 738\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor_conversion_registry.py\u001b[0m in \u001b[0;36mconvert\u001b[0;34m(value, dtype, name, as_ref, preferred_dtype, accepted_result_types)\u001b[0m\n\u001b[1;32m 207\u001b[0m \u001b[0moverload\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"__tf_tensor__\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0moverload\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 209\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moverload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# pylint: disable=not-callable\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 210\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 211\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mbase_type\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconversion_func\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/content/keras_repo/keras/src/backend/tensorflow/core.py\u001b[0m in \u001b[0;36m__tf_tensor__\u001b[0;34m(self, dtype, name)\u001b[0m\n\u001b[1;32m 82\u001b[0m \u001b[0;31m# Overload native accessor.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 83\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__tf_tensor__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 84\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconvert_to_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 85\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0;31m# Methods below are for SavedModel support\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/dispatch.py\u001b[0m in \u001b[0;36mop_dispatch_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 1258\u001b[0m \u001b[0;31m# Fallback dispatch system (dispatch v1):\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1259\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1260\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdispatch_target\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1261\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mTypeError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m \u001b[0;31m# Note: convert_to_eager_tensor currently raises a ValueError, not a\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor_conversion.py\u001b[0m in \u001b[0;36mconvert_to_tensor_v2_with_dispatch\u001b[0;34m(value, dtype, dtype_hint, name)\u001b[0m\n\u001b[1;32m 159\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mIf\u001b[0m \u001b[0mthe\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;31m`\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0ma\u001b[0m \u001b[0mtensor\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mof\u001b[0m \u001b[0mgiven\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;31m`\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mgraph\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \"\"\"\n\u001b[0;32m--> 161\u001b[0;31m return convert_to_tensor_v2(\n\u001b[0m\u001b[1;32m 162\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype_hint\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype_hint\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor_conversion.py\u001b[0m in \u001b[0;36mconvert_to_tensor_v2\u001b[0;34m(value, dtype, dtype_hint, name)\u001b[0m\n\u001b[1;32m 169\u001b[0m \u001b[0;34m\"\"\"Converts the given `value` to a `Tensor`.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 170\u001b[0m \u001b[0;31m# preferred_dtype = preferred_dtype or dtype_hint\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 171\u001b[0;31m return tensor_conversion_registry.convert(\n\u001b[0m\u001b[1;32m 172\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpreferred_dtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype_hint\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 173\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor_conversion_registry.py\u001b[0m in \u001b[0;36mconvert\u001b[0;34m(value, dtype, name, as_ref, preferred_dtype, accepted_result_types)\u001b[0m\n\u001b[1;32m 232\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 234\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconversion_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mas_ref\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 235\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 236\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNotImplemented\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36m_dense_var_to_tensor\u001b[0;34m(var, dtype, name, as_ref)\u001b[0m\n\u001b[1;32m 2376\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2377\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_dense_var_to_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvar\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2378\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mvar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dense_var_to_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mas_ref\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2379\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2380\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36m_dense_var_to_tensor\u001b[0;34m(***failed resolving arguments***)\u001b[0m\n\u001b[1;32m 1622\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1623\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1624\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1625\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1626\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__iadd__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0munused_other\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36mvalue\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 656\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_cached_value\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 657\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolocate_with\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mignore_existing\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 658\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_read_variable_op\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 659\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 660\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_as_graph_element\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36m_read_variable_op\u001b[0;34m(self, no_copy)\u001b[0m\n\u001b[1;32m 841\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mread_and_set_handle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mno_copy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 842\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 843\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mread_and_set_handle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mno_copy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 844\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 845\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecuting_eagerly\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36mread_and_set_handle\u001b[0;34m(no_copy)\u001b[0m\n\u001b[1;32m 831\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mno_copy\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mforward_compat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward_compatible\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2022\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m5\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 832\u001b[0m \u001b[0mgen_resource_variable_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdisable_copy_on_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 833\u001b[0;31m result = gen_resource_variable_ops.read_variable_op(\n\u001b[0m\u001b[1;32m 834\u001b[0m self.handle, self._dtype)\n\u001b[1;32m 835\u001b[0m \u001b[0m_maybe_set_handle_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/gen_resource_variable_ops.py\u001b[0m in \u001b[0;36mread_variable_op\u001b[0;34m(resource, dtype, name)\u001b[0m\n\u001b[1;32m 552\u001b[0m \u001b[0m_attrs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m\"dtype\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_attr_type\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"dtype\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 553\u001b[0m \u001b[0m_inputs_flat\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 554\u001b[0;31m _execute.record_gradient(\n\u001b[0m\u001b[1;32m 555\u001b[0m \"ReadVariableOp\", _inputs_flat, _attrs, _result)\n\u001b[1;32m 556\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/backprop.py\u001b[0m in \u001b[0;36mrecord_gradient\u001b[0;34m(op_name, inputs, attrs, outputs)\u001b[0m\n\u001b[1;32m 169\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mA\u001b[0m \u001b[0mlist\u001b[0m \u001b[0mof\u001b[0m \u001b[0mtensor\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 170\u001b[0m \"\"\"\n\u001b[0;32m--> 171\u001b[0;31m pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs,\n\u001b[0m\u001b[1;32m 172\u001b[0m ops.get_name_scope())\n\u001b[1;32m 173\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor.py\u001b[0m in \u001b[0;36m_shape_tuple\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 343\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 344\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 345\u001b[0;31m \u001b[0;32mdef\u001b[0m \u001b[0m_shape_tuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 346\u001b[0m \u001b[0mshape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_shape_as_list\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshape\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ] + }, + { + "source": [ + "from google.colab import sheets\n", + "sheet = sheets.InteractiveSheet(df=results_df)" + ], + "cell_type": "code", + "execution_count": 2, + "outputs": [ + { + "output_type": "error", + "ename": "NameError", + "evalue": "name 'results_df' is not defined", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipython-input-3464966735.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mgoogle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolab\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0msheets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0msheet\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msheets\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mInteractiveSheet\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mresults_df\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mNameError\u001b[0m: name 'results_df' is not defined" + ] + } + ], + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 159 + }, + "id": "zgzIjLwxze6j", + "outputId": "458ec0b1-c6ee-4910-8b09-bb87ab684336" + } + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "TgcQ_o3Kwq0G" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c3f7a9af" + }, + "source": [ + "## Visualize results with a line plot\n", + "\n", + "### Subtask:\n", + "Create a line plot showing the accuracy and loss for each pruning ratio and method." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c0605653" + }, + "source": [ + "**Reasoning**:\n", + "Visualize the pruning results using a line plot to compare the performance of different pruning methods across various sparsity levels. Plotting both accuracy and loss will provide a comprehensive view of how pruning affects the model." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 389 + }, + "id": "c1c68d7f", + "outputId": "c3e73461-402c-4511-d1c4-421cd27b3ae5" + }, + "source": [ + "import matplotlib.pyplot as plt\n", + "import seaborn as sns\n", + "\n", + "# Set the style for the plots\n", + "sns.set_style(\"whitegrid\")\n", + "\n", + "# Create a figure and a set of subplots\n", + "fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(18, 6))\n", + "\n", + "# Plot Test Accuracy\n", + "sns.lineplot(data=results_df, x='Pruning Ratio', y='Test Accuracy', hue='Pruning Method', marker='o', ax=axes[0])\n", + "axes[0].set_title('Test Accuracy vs. Pruning Ratio')\n", + "axes[0].set_xlabel('Pruning Ratio')\n", + "axes[0].set_ylabel('Test Accuracy')\n", + "axes[0].set_ylim(0, 1) # Set y-axis limit for accuracy between 0 and 1\n", + "axes[0].legend(title='Pruning Method')\n", + "\n", + "# Plot Test Loss\n", + "sns.lineplot(data=results_df, x='Pruning Ratio', y='Test Loss', hue='Pruning Method', marker='o', ax=axes[1])\n", + "axes[1].set_title('Test Loss vs. Pruning Ratio')\n", + "axes[1].set_xlabel('Pruning Ratio')\n", + "axes[1].set_ylabel('Test Loss')\n", + "# axes[1].set_ylim(0, 5) # Optional: set a reasonable y-limit for loss\n", + "axes[1].legend(title='Pruning Method')\n", + "\n", + "# Adjust layout to prevent overlapping titles and labels\n", + "plt.tight_layout()\n", + "\n", + "# Display the plots\n", + "plt.show()" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "
" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAABv4AAAJOCAYAAAB/dnBOAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3XVcFOkfB/DP0iUmdqO7GKQoiiiKnh2oZwsGnt16it2HeQYYnIF9JmC3P+uMszvOAhMVg4Zld35/cDvHuouCAivyeb9e+4Kd/M7ss7PfmWfmeSSCIAggIiIiIiIiIiIiIiIiohxNT9cBEBEREREREREREREREdG3Y8UfERERERERERERERER0Q+AFX9EREREREREREREREREPwBW/BERERERERERERERERH9AFjxR0RERERERERERERERPQDYMUfERERERERERERERER0Q+AFX9EREREREREREREREREPwBW/BERERERERERERERERH9AFjxR0RERERERERERERERPQDYMUfERERfbXz589DJpPh/Pnzug4ly/n7+0Mmk+k6DCIiIiIionTJTecwXl5e8PLy0nUYRETfBQNdB0BE9CNKb2K9bt06uLi4fNO64uPjsXLlStSoUSPDyzpx4gT69OkDKysrnDx5Enp6vB/ke5a6XEkkEhQqVAhSqRR9+/b95nKUEwUHB2Ps2LHie319fRQsWBC1a9fG8OHDUaRIkQwv81u+T0RERERE3+p7Ppc8f/48vL29sWjRIjRp0uSb1v2jUe0bFQMDAxQtWhTVqlXD4MGDUapUKR1Gpxu+vr4ICQkR3xsaGqJEiRJo1qwZ+vXrB2Nj4wwv88GDB9i/fz/atGmDkiVLZma4REQ/FFb8ERFlgTlz5qi937lzJ/766y+N4dbW1t+8rvj4eAQEBGDQoEEZPvHbtWsXSpQogefPn+PcuXNwdXX95ngoa9WuXRutW7eGIAh49uwZ/vzzT3Tv3h2BgYFwd3fP9niqV6+O69evw9DQMNvXrTJkyBCULFkSSUlJuHr1KkJCQnDp0iXs2bMnwyeTn/s+9e/fH3369MnM0ImIiIiI1OSUc0nSzsvLC7a2tkhOTsbt27exZcsWnDhxArt27fqqGxO/la7PYYyMjDBjxgwAQExMDI4ePYqlS5ciPDwc8+fPz/DyHjx4gICAANSoUUOj4m/VqlWZEjMR0Y+AFX9ERFmgdevWau+vXbuGv/76S2O4LsXFxeHYsWMYMWIEgoODsXv37u+24i8uLg5mZma6DuO7ULZsWbVy9NNPP6FVq1ZYt25dmhV/iYmJMDQ0zJInOvX09L7qTs3MVLduXdja2gIA2rdvj/z582PFihU4evQomjVrlmnrMTAwgIEBUyciIiIiyjo54VyS0ubs7Cw+DdmuXTuULVsWM2bMQGhoKPr27at1nqw839X1OYyBgYFa2e3SpQs6deqEvXv3YuzYsShUqFCmrcvIyCjTlkVElNOxTTciIh1RKpVYs2YNmjdvDltbW7i6umLSpEn4+PGj2nQ3btyAj48PXFxcYGdnBw8PD7F5w2fPnqFWrVoAgICAAMhkMshkMvj7+39x/YcPH0ZCQgKaNGmCZs2a4dChQ0hMTNSYLjExEf7+/mjcuDFsbW3h5uaGQYMGITw8XG1b1q5di5YtW8LW1hY1a9aEj48Pbty4IcYpk8kQHByssfxP41X1QfDgwQOMHDkS1atXR5cuXQAAd+/eha+vLxo0aABbW1vUrl0bY8eOxfv37zWWGxERgXHjxsHNzQ1Vq1aFh4cHJk+ejKSkJDx9+hQymQxr1qzRmO/y5cuQyWTYs2eP1v329u1bVK5cGQEBARrjHj16BJlMhg0bNgAA5HI5AgIC0KhRI9ja2sLFxQWdO3fGX3/9pXXZX0MmkyF//vx49uwZgP/63Nu7dy8WLFiAOnXqwN7eHjExMWn27xAcHAyZTCYuAwA8PDzQt29fXLx4ET///DNsbW3RoEEDhIaGqs2rrY8/Ly8vtGjRAg8ePICXlxfs7e1Rp04drFixQmPdz58/R79+/eDg4IBatWrht99+w6lTp76p30BnZ2cAwNOnT8VhSUlJWLRoEdq2bYtq1arBwcEBXbp0wblz58RpvvR90rb/kpOTsWTJEjRs2FAsZ7///juSkpK+KnYiIiIioi/R9bnklzx9+hRDhgxBjRo1YG9vjw4dOuD48eMa061fvx7NmzeHvb09qlevjrZt22L37t3i+JiYGMycORMeHh6oWrUqatWqhZ49e+LWrVtprvvAgQOQyWT4+++/NcZt3rwZMpkM9+/fBwC8efMGY8eORd26dVG1alW4ubmhf//+audF36pmzZoAIC7zc+e7afVR5+vrCw8PD/G96vx61apV2LJli3gu0q5dO1y/fl1tXm3nMDKZDNOmTcORI0fQokULVK1aFc2bN8fJkyc11n3+/Hm0bdsWtra2aNiwITZv3vxN/QZKJBI4OTlBEAS187Xnz59jypQpaNy4Mezs7ODi4oIhQ4aofRbBwcEYOnQoAMDb21sss6rzRm37LzIyEuPGjYOrqytsbW3RqlUrteZHiYh+VLxtnYhIRyZNmoSQkBC0bdsWXl5eePbsGTZu3Ijbt2/jzz//hKGhISIjI+Hj44P8+fOjT58+sLS0xLNnz3D48GEAQIECBTBlyhRMmTIFP/30E3766ScA6esXYvfu3XBxcYGVlRWaN2+O+fPn49ixY2jatKk4jUKhQN++fXH27Fk0b94c3t7eiI2NxV9//YX79++jdOnSAIDx48cjODgYdevWxc8//wyFQoGLFy/i2rVr4pNYGTV06FCUKVMGw4cPhyAIAIAzZ87g6dOnaNu2LaysrPDPP/9g69atePDgAbZu3QqJRAIgpdLv559/RnR0NDp06IDy5csjIiICBw8eREJCAkqVKgUnJyfs2rULPXr00Ngv5ubmaNCggda4ChUqhOrVq2P//v0YNGiQ2rh9+/ZBX19fvMMzICAAgYGBaN++Pezs7BATE4ObN2/i1q1bqF279lftl099/PgRUVFRKFOmjNrwpUuXwtDQED4+PkhKSvqqpjjDwsIwdOhQ/Pzzz2jTpg127NgBX19fVKlSBRUrVvxiXL1798ZPP/2Epk2b4uDBg5g3bx6kUqn4ZGJcXBy6d++ON2/ewNvbG4UKFcKePXu+usJP5fnz5wAAS0tLcVhMTAy2bduGFi1aoH379oiNjcX27dvRu3dvbNu2DZUqVfqq79OECRMQEhKCxo0bo2fPnrh+/ToCAwPx8OFDLFmy5Ju2g4iIiIhIG12fS37O27dv0alTJ8THx8PLywv58+dHSEgI+vfvj8WLF4vr2bp1K2bMmIHGjRvD29sbiYmJuHfvHq5du4aWLVsCACZPnoyDBw+iW7dusLa2xocPH3Dp0iU8fPgQVapU0br+evXqwczMDPv370eNGjXUxu3btw8VK1aEVCoFAAwePBgPHjxAt27dUKJECbx79w5//fUXXr58mWn9x6lumM2XL5/acG3nuxm1Z88exMbGomPHjpBIJFi5ciUGDx6MI0eOfPH879KlSzh06BC6dOkCc3NzrF+/HkOGDMH//vc/5M+fHwBw+/Zt9O7dG1ZWVhg8eDCUSiWWLFmCAgUKfFW8KtrO127cuIErV66gefPmKFq0KJ4/f44///wT3t7e2Lt3L0xNTVG9enV4eXlh/fr16NevH8qXLw8g7WZvExIS4OXlhfDwcHTt2hUlS5bEgQMH4Ovri6ioKHTv3v2btoOI6LsmEBFRlps6daoglUrF9xcuXBCkUqmwa9cutelOnjypNvzw4cOCVCoVrl+/nuayIyMjBalUKixevDjd8bx9+1aoXLmysHXrVnFYx44dhf79+6tNt337dkEqlQpBQUEay1AqlYIgCMLZs2cFqVQqTJ8+Pc1pnj59KkilUmHHjh0a03wa++LFiwWpVCqMGDFCY9r4+HiNYXv27BGkUqlw4cIFcdjo0aMFGxsbrftNFdPmzZsFqVQqPHjwQByXlJQkuLi4CGPGjNGYLzXVvPfu3VMb3qxZM8Hb21t836pVK6FPnz6fXVZGSKVSYdy4cUJkZKQQGRkpXLt2TejevbsglUqF1atXC4IgCOfOnROkUqnQoEEDjf2l2ref2rFjhyCVSoWnT5+Kw+rXr6+xXyMjI4WqVasKs2bNEoep1nfu3DlxWLdu3QSpVCqEhISIwxITE4XatWsLgwcPFoetXr1akEqlwuHDh8VhCQkJQpMmTTSWqY0q7jNnzgiRkZHCy5cvhQMHDgg1a9YUqlatKrx8+VKcNjk5WUhMTFSb/+PHj4Krq6swduxYtW1M6/v06f67c+eOIJVKhfHjx6tNN2vWLEEqlQpnz579bPxERERERF/yPZ1LqnL//fv3pznNzJkzNc4jYmJiBA8PD6F+/fqCQqEQBEEQ+vfvLzRv3vyz66tWrZowderUdMWW2ogRI4RatWoJycnJ4rDXr18LNjY2QkBAgCAIKecCUqlUWLlyZYaXr41q32zfvl2IjIwUIiIihOPHjwv169cXZDKZ+Dl87ny3W7duQrdu3TSGjxkzRqhfv774XnV+XaNGDeHDhw/i8CNHjghSqVQ4duyYOEzbOaBUKhWqVKkihIWFicNU5zbr168Xh/Xt21ewt7cXXr16JQ578uSJULlyZa3nldridnBwEM9fw8LChFWrVgkymUxo0aKFeG4uCNrP9a9cuaJxXrl///40zxU/3X9r1qwRpFKpsHPnTnFYUlKS0LFjR8HBwUGIjo7+4jYQEeVUbOqTiEgHDhw4gDx58qB27dp49+6d+KpSpQrMzMzEJ57y5MkDADh+/DjkcnmmrX/v3r2QSCRo1KiROKxFixY4efKkWvMwhw4dQv78+dGtWzeNZaierjt06BAkEonG02+pp/kanTp10hhmYmIi/p+YmIh3797B3t4eAMTmXpRKJY4cOYL69etrfdpQFVPTpk1hbGys1pTM6dOn8f79e7Rq1eqzsf30008wMDDAvn37xGH379/HgwcP1PqUs7S0xD///IMnT56kY4vTZ/v27ahVqxZq1aqF9u3b4/Lly+jZs6fG3Yqenp5q++trVKhQQWw2E0i5K7hcuXJqTbKkxczMTK0vByMjI9ja2qrNe+rUKRQpUkTt6UpjY2N06NAhQ3H26NEDtWrVgru7O4YMGQJTU1MsW7YMRYsWFafR19cX+3xQKpX48OEDkpOTUbVqVdy+fTtD61M5ceIEAKBnz55qw3v16qU2noiIiIgos+j6XPJLTpw4ATs7O7XzCHNzc3Ts2BHPnz/HgwcPAKScK7169UqjacrULC0tce3aNURERGQohqZNmyIyMlKtuc+DBw9CqVSK52smJiYwNDTE33//rdFE6rcYN24catWqhTp16qBPnz6Ij4/HrFmzNM5NtZ3vZlSzZs2QN29e8b22Lg/S4urqKrbgAwA2NjawsLAQ51UoFDh79iwaNGiAIkWKiNOVKVMGderUSXeMcXFx4vnrTz/9hNmzZ8PJyQlLly5Vu16Q+txVLpfj/fv3KF26NCwtLb/6fO3kyZOwsrJCixYtxGGGhobw8vJCXFwcLly48FXLJSLKCdjUJxGRDoSFhSE6OlrsU+FTkZGRAIAaNWqgcePGCAgIwJo1a1CjRg00bNgQLVu2/KaOq3ft2gU7Ozt8+PABHz58AABUqlQJcrkcBw4cQMeOHQGkNEtSrly5z3YGHh4ejsKFC2s0XfKttDWt8uHDBwQEBGDfvn3iPlKJjo4GALx79w4xMTFfbIrS0tIS9evXx549ezBs2DAAKc18FilSROyHIS0FChRAzZo1sX//fnHeffv2wcDAQGy6BgCGDBmCAQMGoHHjxpBKpXBzc0Pr1q1hY2Pzpc1PU4MGDdCtWzdIJBKYm5ujQoUKWjuCz4ymaYoVK6YxLG/evOk6MS5atKhGxW/evHlx79498f3z589RunRpjelSn4Cmx6RJk1CuXDlER0djx44duHDhgtbvR0hICFavXo3Hjx+rXfz42n31/Plz6OnpacRrZWUFS0tLsQkbIiIiIqLMoutzyS958eKFeHNmaqpmGV+8eAGpVIpffvkFZ86cQfv27VGmTBnUrl0bLVq0QLVq1cR5Ro0aBV9fX9SrVw9VqlSBu7s7PD09UapUqc/GULduXeTJkwf79u0T99O+fftQqVIllCtXDkDKjYmjRo3C7NmzUbt2bdjb26NevXrw9PSElZXVV2//wIED4ezsDD09PeTPnx/W1tZaz6ez4nxNVQkYFRWV4XlV86vmjYyMREJCgkaXEgC0DkuLsbExli9fDgB49eoVVq5cicjISBgbG6tNl5CQgMDAQAQHByMiIkKt+VPVuX5GPX/+HGXKlIGenvpzL6qmQV+8ePFVyyUiyglY8UdEpANKpRIFCxbEvHnztI5XtZkvkUiwePFiXL16Ff/73/9w6tQpjBs3DkFBQdiyZQvMzc0zvO4nT57gxo0bAKD2xJ/K7t27xYq/zJLWk38KhSLNeT49EQCAYcOG4cqVK/Dx8UGlSpVgZmYGpVKJ3r17f1W/CJ6enjhw4AAuX74MqVSKY8eOoXPnzhonBto0b94cY8eOxZ07d1CpUiXs378fNWvWVOvvoHr16jh8+DCOHj2Kv/76C9u3b8fatWsxdepUtG/fPsPxAikVaq6url+cTtvTfhn9HPT19TMWXCbNm1F2dnbiHbQNGzZEly5dMHLkSBw4cED8juzcuRO+vr5o2LAhfHx8ULBgQejr6yMwMDBdd8R+zrc82UpERERElBG6PJfMTNbW1jhw4ACOHz+OU6dO4dChQ9i0aRMGDhyIIUOGAEh5os3Z2RmHDx/GX3/9hVWrVmHFihXw9/cX+w3XxsjICA0bNsThw4cxefJkREZG4vLlyxgxYoTadD169ICHhweOHDmC06dPY9GiRfjjjz+wdu1aVK5c+au2SyqVput8Tdv5bloyer6WnnPjb5k3I/T19dX2h5ubG5o2bYpJkyaJFYIAMH36dAQHB6N79+5wcHBAnjx5IJFIvqkPRCKi3IwVf0REOlC6dGmcPXsWTk5O6WqO0cHBAQ4ODhg+fDh2796NUaNGYd++fWjfvn2GKx12794NQ0NDzJkzR6OC69KlS1i/fj1evHiB4sWLo3Tp0rh27RrkcnmanYOXLl0ap0+fxocPH9J86i+tOw8zcofdx48fcfbsWQwePFitWdFPm9EsUKAALCws8M8//3xxmXXq1EGBAgWwe/du2NvbIz4+Xq15ys9p2LAhJk2aJDb3+eTJE/Tt21djunz58qFdu3Zo164dYmNj0a1bN/j7+391xd+3UHWeHhUVpdaRuq7udCxRogQePHgAQRDUynF4ePhXL1NfXx8jRoyAt7c3Nm7ciD59+gBIadqnVKlSCAgIUFvX4sWL1ebPyPepRIkSUCqVCAsLU+tQ/u3bt4iKikKJEiW+ejuIiIiIiLTR5blkehQvXhyPHz/WGP7o0SNxvIqZmRmaNWuGZs2aISkpCYMHD8by5cvRt29fsWKscOHC6Nq1K7p27YrIyEi0adMGy5cv/2zFH5DS3GdISAjOnj2Lhw8fQhAENG3aVGO60qVLo1evXujVqxeePHkCT09PrF69Os2K1ayUN29erTcl6uJ8rWDBgjA2NkZYWJjGOG3D0qtw4cLo0aMHAgICcPXqVTg4OABIOV/z9PSEr6+vOG1iYqLG034ZPV+7d+8elEql2rUPbWWRiOhHwz7+iIh0oGnTplAoFFi6dKnGuOTkZLGC7OPHjxp3t1WqVAkAkJSUBAAwNTUFkL7mPICUir9q1aqhWbNmaNKkidqrd+/eAIA9e/YASHki8P3799i4caPGclRxNWrUCIIgICAgIM1pLCwskD9/fly8eFFt/KZNm9IVM5D2HYlr165Ve6+np4eGDRvif//7n/hko7aYAMDAwADNmzfH/v37ERwcDKlUmu5mOC0tLeHm5ob9+/dj7969MDQ0RMOGDdWmef/+vdp7c3NzlC5dWvzsgJRmSx4+fPjVzZdkhKpJytR9GcTFxSE0NDTL162Nm5sbIiIicPToUXFYYmIitm7d+k3LdXFxgZ2dHdauXYvExEQA/5Wf1J//tWvXcPXqVbV5M/J9Ul1s+LQMBgUFqY0nIiIiIsosujyXTA93d3dcv34dV65cEYfFxcVh69atKFGiBCpUqABA81zJyMgI1tbWEAQBcrkcCoVC4xypYMGCKFy4sNr5VFpcXV2RL18+7Nu3D/v374ednZ1aE6Hx8fHiuYJK6dKlYW5urrb8169f4+HDh9nST2KpUqXw6NEjvHv3Thx29+5dXL58OcvX/SnVk3pHjx5V62MxLCwMp06d+qZld+vWDaampvjjjz/U1vep9evXazztqCqz6Tl/rlu3Lt68eSPerAukfEfWr18PMzMzVK9e/Ws3gYjou8cn/oiIdKBGjRro2LEjAgMDcefOHdSuXRuGhoZ48uQJDhw4gPHjx6NJkyYICQnBn3/+iYYNG6J06dKIjY3F1q1bYWFhgbp16wJIadKxQoUK2L9/P8qWLYt8+fKhYsWKkEqlGuu9du0awsLC0LVrV61xFSlSBJUrV8bu3bvRp08feHp6IjQ0FH5+frh+/TqqVauG+Ph4nD17Fp07d0bDhg1Rs2ZNtG7dGuvXr0dYWBjq1KkDpVKJS5cuwcXFBd26dQMAtG/fHn/88QfGjx+PqlWr4uLFi1rvBE2LhYUFqlevjpUrV0Iul6NIkSL466+/8OzZM41pR4wYgb/++gteXl7o0KEDrK2t8ebNGxw4cACbNm1Se9rN09MT69evx/nz5zFq1Kh0xwOkND3z66+/YtOmTXBzc1NbLpDSHGiNGjVQpUoV5MuXDzdu3MDBgwfFfQIAhw8fxtixY+Hn54e2bdtmaP0ZVbt2bRQvXhzjx4/Ho0ePoK+vjx07diB//vw6uYu0Y8eO2LBhA0aOHAlvb29YWVlh9+7d4t2933IHso+PD4YOHYrg4GB07twZ9erVw6FDhzBw4EDUq1cPz549w+bNm1GhQgXExcWJ82Xk+2RjY4M2bdpgy5YtiIqKQvXq1XHjxg2EhISI3w0iIiIiosykq3PJ1A4dOiQ+NZVamzZt0KdPH+zduxe//PILvLy8kDdvXoSGhuLZs2fw9/cXn7zy8fFBoUKF4OTkhIIFC+LRo0fYsGED3N3dYWFhgaioKLi7u6Nx48awsbGBmZkZzpw5gxs3bqg9FZYWQ0ND/PTTT9i7dy/i4+MxZswYtfFPnjxBjx490KRJE1SoUAH6+vo4cuQI3r59i+bNm4vT/f777wgJCcHRo0czpV++z/n555+xZs0a+Pj44Oeff0ZkZKR4zhIbG5ul69Zm0KBBOH36NDp37ozOnTtDqVRiw4YNqFixIu7cufPVy82fPz/atm2LTZs24eHDh7C2tka9evWwc+dOWFhYoEKFCrh69SrOnDmj0apQpUqVoK+vjxUrViA6OhpGRkaoWbMmChYsqLGejh07YsuWLfD19cWtW7dQokQJHDx4EJcvX8a4ceNgYWHx1dtARPS9Y8UfEZGOTJs2DVWrVsXmzZuxYMEC6Ovro0SJEmjVqhWcnJwApJzU3bhxA/v27cPbt2+RJ08e2NnZYd68eWp3K86YMQPTp0+Hn58f5HI5Bg0apPVkbffu3QAADw+PNOPy8PCAv78/7t69CxsbG6xYsQLLli3Dnj17cOjQIeTLlw9OTk6QyWTiPH5+fpDJZNi+fTvmzJmDPHnyoGrVqnB0dBSnGThwIN69e4eDBw9i//79qFu3LlauXJlmp/TazJ8/H9OnT8emTZsgCAJq166NFStWoE6dOmrTFSlSBFu3bsWiRYuwe/duxMTEoEiRIqhbt65GczhVq1ZFxYoV8fDhQ7Rq1Srdsaj2lYmJCWJjY9GsWTON8V5eXjh27Bj++usvJCUloXjx4hg2bBh8fHwytJ7MYmhoiICAAEydOhWLFi2ClZUVunfvDktLS4wdOzbb4zE3N8fatWsxY8YMrFu3DmZmZvD09ISjoyMGDx6coX4vPtWoUSOULl0aq1evRocOHdC2bVu8ffsWW7ZswenTp1GhQgXMnTsXBw4cwN9//602b3q/T6ppS5YsiZCQEBw5cgSFChVC37591ZqjJSIiIiLKTLo4l0xt7969WofXqFEDzs7O2Lx5M+bOnYsNGzYgMTERMpkMy5cvR7169cRpO3bsiN27dyMoKAhxcXEoWrQovLy8MGDAAAAplZKdO3fGX3/9hUOHDkEQBJQuXRqTJ09Gly5d0rWfmjVrhm3btkEikWg081m0aFE0b94cZ8+exa5du6Cvr4/y5ctj4cKFaNy4cbqWn9msra0xe/ZsLF68GH5+fqhQoQLmzJmDPXv2aJyzZIeqVatixYoVmDNnDhYtWoRixYphyJAhePTokdaK34zo2bMnNm/ejBUrVmDWrFkYP3489PT0sHv3biQmJsLJyQlBQUFiq0QqVlZWmDp1KgIDAzF+/HgoFAqsW7dOa8WfiYkJ1q9fj3nz5iEkJAQxMTEoV65cttx0S0SkaxKBPaQSEVEu5+npibx582o02Ui6sWbNGvj5+eHkyZMoUqSIrsMhIiIiIiKifw0YMAAPHjzAoUOHdB0KERGlgX38ERFRrnbjxg3cuXMHnp6eug4lV0pISFB7n5iYiC1btqBs2bKs9CMiIiIiItKhT8/Xnjx5gpMnT6JGjRo6ioiIiNKDTX0SEVGudP/+fdy6dQurV6+GlZWV1qY6KesNGjQIxYsXh42NDWJiYrBr1y48evQI8+bN03VoREREREREuVrDhg3Rpk0blCpVCs+fP8fmzZthaGio0QQnERF9X1jxR0REudLBgwexZMkSlCtXDr///vs39SdHX8/NzQ3bt2/H7t27oVAoUKFCBSxYsIAVsURERERERDpWp04d7N27F2/evIGRkREcHBwwYsQIlC1bVtehERHRZ+i0j78LFy5g1apVuHnzJt68eYMlS5agYcOGn53n/PnzmDVrFv755x8UK1YM/fv3Z4esRERERERERERERERElOvptI+/uLg4yGQyTJ48OV3TP336FH379oWLiwt27tyJ7t27Y8KECTh16lQWR0pERERERERERERERET0fdNpU5/u7u5wd3dP9/SbN29GyZIl4evrCwCwtrbGpUuXsGbNGtSpUyerwiQiIiIiIiIiIiIiIiL67uWoPv6uXr2KWrVqqQ1zc3PDb7/9lu5lKJVKJCcnQ09PDxKJJLNDJCIiIiIiyjaCIECpVMLAwAB6ejpt0OW7w3M/IiIiIiL6UWTk3C9HVfy9ffsWhQoVUhtWqFAhxMTEICEhASYmJl9cRnJyMm7cuJFVIRIREREREWU7W1tbGBkZ6TqM7wrP/YiIiIiI6EeTnnO/HFXxlxlUNaGVK1eGvr6+jqOh3EChUOD27dssc6QTLH+kKyx7pCsse6RLuih/qnXyaT9Nqn1ia2vL4wFlC4VCgRs3brDMUbZj2SNdYvkjXWHZI13SRflTrTM95345quKvUKFCePv2rdqwt2/fwsLCIl1P+wEQm3gxMjLiAYGyhUKhAMAyR7rB8ke6wrJHusKyR7qki/KnWiebstSk2if6+vo8HlC2YpkjXWHZI11i+SNdYdkjXdJF+UvPuV+Oui3UwcEB586dUxt25swZODg46CYgIiIiIiIiIiIiIiIiou+ETiv+YmNjcefOHdy5cwcA8OzZM9y5cwcvXrwAAMyfPx+jR48Wp+/UqROePn2KOXPm4OHDh9i4cSP279+PHj166CJ8IiIiIiIiIiIiIiIiou+GTpv6vHnzJry9vcX3fn5+AIA2bdpg1qxZePPmDV6+fCmOL1WqFAIDA+Hn54d169ahaNGimDFjBurUqZPtsRMRERERERERERERERF9T3Ra8efi4oJ79+6lOX7WrFla5wkNDc3CqIjoe6dQKCCXy3UdRo6g6vcnISGB7Z1TtmLZI11h2SNdyqryZ2RklK4O3OnrMLekzMLfINIVlj3SJZa/nMXQ0JCfE1EuoNOKPyKijBAEAa9evcKHDx90HUqOIQgCDAwMEBYWlq6OX4kyC8se6QrLHulSVpU/PT09lCtXDkZGRpm2TGJuSZmPv0GkKyx7pEssfzlPvnz5ULRoUX5eRD8wVvwRUY6hujBTuHBhmJmZMUFJB0EQEB8fD1NTU+4vylYse6QrLHukS1lR/pRKJV68eIGXL1+idOnSLNeZiLklZTb+BpGusOyRLrH85RyCICAuLg6vX78GABQrVkzHERFRVmHFHxHlCAqFQrwwU7BgQV2Hk2MIggClUgkTExMm4JStWPZIV1j2SJeyqvxZWVnhxYsXSE5OhqGhYaYtNzdjbklZgb9BpCsse6RLLH85i6mpKQDg9evXKFy4MJv9JPpBsaMIIsoRVP2umJmZ6TgSIiIiouylauJT1YcOfTvmlkRERJRbqfIf9nFM9ONixR8R5Si8e4yIiIhyG+Y/WYf7loiIiHIb5j9EPz5W/BERERERERERERERERH9AFjxR0RERD+U4OBgODs76zqMTOPv74/WrVtn+nLPnz8PmUyGqKioTF82ERER0Y+AeWX6MK8kIiL6vrDij4iIiDKdr68vnJycYGNjg6pVq+Knn35CQEAAkpOTs3zdzZo1w8GDB7N8PcHBwZDJZGjatKnGuP3790Mmk8HDwyNDy5TJZDhy5EhmhUhERESU4/n6+kImk0EmkzGvzADmlURERLkXK/6IiIgoS7i6uuLUqVM4ePAgevbsiYCAAKxatUrrtElJSZm2XhMTExQsWDDTlvc5ZmZmePfuHa5cuaI2fPv27ShevHi2xEBERET0o6tTpw5Onz7NvJKIiIgoHVjxR0RERFnCyMgIVlZWKFGiBLp06QJXV1ccO3YMQMqd2wMGDMCyZcvg5uaGJk2aANB+Z7KzszOCg4MBAM+ePYNMJsOhQ4fg5eUFe3t7tGrVSu0CyadNMqmaNAoNDYWHhweqVauG4cOHIyYmRpwmJiYGI0eOhIODA9zc3LBmzRp4eXlh5syZn91GfX19tGjRAjt27BCHvXr1Cn///TdatGihMf2RI0fQpk0b2NraokGDBmp3q6vu4h44cKDWu7o/F39SUhJmzJiBWrVqwdbWFp07d8b169fV5j9x4gQaN24MOzs7eHl54fnz55/dNiIiIqLvBfPKz+eVDRs2RGBgIPNKIiIiAsCKPyIiIsomxsbGkMvl4vuzZ8/i8ePHCAoKQmBgYIaWtWDBAvj4+CA0NBRly5bFyJEjP9vcU3h4OI4ePYrly5cjMDAQFy5cwIoVK8Txs2bNwpUrV7Bs2TKsXr0aFy9exK1bt9IVS7t27bB//37Ex8cDSLlAVKdOHY27wy9evIgxY8bA29sb+/btw7Rp0xAcHIzly5cDSLmbGwD8/Pxw+vRp8X164p8zZw4OHjyIWbNmISQkBGXKlEHv3r3x4cMHAMDLly8xaNAg1K9fH6GhoWjfvj3mz5+fru0jIiIi+t4wr1TPK6dOnYrdu3czryQiIiIArPgjIiKiLCYIAs6cOYPTp0/DxcVFHG5mZoYZM2agYsWKqFixYoaW2atXL9SrVw/lypXDkCFD8Pz5c4SFhX02Bj8/P0ilUjg7O6NVq1Y4e/YsgJS7skNDQzF69GjUqlULUqkUfn5+UCqV6YqlcuXKKFWqFA4ePAhBEBASEoJ27dppTBcQEIA+ffqgTZs2KFWqFGrXro2hQ4di8+bNAIACBQoAACwtLWFlZSW+/1L8cXFx2Lx5M0aPHg13d3dUqFAB06dPh7GxsXiR588//0Tp0qXh6+uL8uXLo1WrVmjTpk26to+IiIjoe8G8MoW2vLJ///7YsmULAOaVREREuZ2BrgMgIiKiH9OpU6fg5OQEuVwOQRDQokULDB48WBwvlUphZGT0VcuWyWTi/1ZWVgCAd+/ewdraWuv0JUqUgIWFhfi+cOHCiIyMBJDSzJNcLoednZ04Pk+ePChXrly642nXrh127NiBYsWKIT4+Hu7u7tiwYYPaNHfv3sXly5fFO7EBQKFQIDExEfHx8TA1NU1z+Z+LPzw8HHK5HE5OTuJ4Q0ND2NnZ4eHDhwCAhw8fqm0fADg4OKR7+4iIiIh06fjx43B0dGRe+S/mlURERGmTx0ZD38gEiqiP0LfMC0VSAgzN8+g6rGzFij8iIiLKEs7Ozpg2bRqMjIxQuHBhGBiopx3aLkhIJBIIgqA2TFtTS4aGhmrzAPjsndSfrhuAxnq+RcuWLTF37lwEBASgVatWWtcXFxeHwYMHo1GjRhrjjI2NP7v8rI6fiIiI6Hvm4uKCKVOmwNDQkHklNPNKQRDECj/mlURElJspExPxflUQ3m/YCGVUFPQsLZHfqxsK9ekDvS/8Rv5I2NQnERERZQlTU1OUKVMGxYsX13qBQZsCBQrg9evX4vsnT56IfZxklZIlS8LQ0BA3btwQh0VHR+PJkyfpXka+fPng4eGBv//+W2tzTEBK002PHz9GmTJlNF56eikpmaGhIRQKRYbiL126NAwNDXH58mVxmFwux40bN1ChQgUAgLW1tdr2AcC1a9cytB4iIiIiXWFeqU5bXlm6dGnmlURElKvJY6PxNjAQkUuXQRkVBQBQRkUhcslSvP3jD8hjo3UcYfbhE39ERET03ahZsyY2btwIR0dHKBQKzJs3T+0u7KxgYWEBT09PzJkzB3nz5kXBggXh7+8PiUQi3vWdHrNmzcLkyZORP39+reMHDhyIfv36oXjx4mjcuDH09PRw9+5d3L9/H8OHDweQ0vTS2bNn4eTkBCMjI+TNm/eL6zUzM0Pnzp3F+IsXL46VK1ciISEBP//8MwCgU6dOWL16NWbPno327dvj1q1bCAkJSfe2EREREeU0uSmvlEgkuH79OsLCwphXEhFRrqVvZIL3GzZqHfd+/QYU6tcvmyPSHT7xR0RERN+NMWPGoFixYujatStGjRqFXr16wcTEJMvX6+vrCwcHB/Tr1w89e/aEk5MTrK2tv9hUUmomJiZpXpwBgDp16mD58uU4ffo0fv75Z3To0AFr1qxBiRIlxGnGjBmDM2fOoF69emjTpk261z1q1Cg0btwYo0ePRps2bRAWFoaVK1eKF3iKFy8Of39/HD16FK1bt8bmzZvFi0JEREREP6LclFd27NgRGzduRPHixcVpmFcSEVFuo4j6KD7p9yllVBQU0drH/YgkQi5ryFuhUODq1atwcHCAvr6+rsOhXIBlLnMkJCTg8ePHKFeuXLacrP0oBEFAXFwczMzMMnSHKdG3yullLy4uDnXr1sWYMWPQvn17XYdDGZDTyx7lbFlV/j6XBzHXTNvn9g1zS8oK/A0ibbIjr2TZI11i+ct5fpQ8iHkwfUopl+Of2m5aK//0LC1R8a/T0Mukp/91Uf4ysk429UlERES53u3bt/Ho0SPY2dkhOjoaS5YsAQA0aNBAx5ERERERUU7CvJKIiEg3FEkJyO/VDZFLlmqMy+/VDYqkhEyr+PvesalPIiIiIgCrV69G69at0bNnT8THx2Pjxo0oUKCArsMiIspxLly4gH79+sHNzQ0ymQxHjhxJc9pJkyZBJpNhzZo1asM/fPiAkSNHwsnJCc7Ozhg3bhxiY2OzOHIioszBvJKIiCj7GZrnQaE+fVBwQH/oWVoCSHnSr+DAASjUpw8MzfPoOMLswyf+iIiIKNerXLkygoODdR0GEdEPIS4uDjKZDO3atcOgQYPSnO7w4cO4du0aChcurDFu1KhRePPmDYKCgiCXyzFu3DhMmjQJ8+fPz8rQiYi+GfNKIiIi3dEzNoZJzRqo2Ls3FB8+QN/KKuVJvwz0tfsjYMUfERERERERZRp3d3e4u7t/dpqIiAhMnz4dq1atQt++fdXGPXz4EKdOncL27dtha2sLAJgwYQL69OmD0aNHo0iRIlkWOxERERER5Wx/j+qJ0vL8iJUWh+Pa7bmmec/U2NQnERERERERZRulUolff/0VPj4+qFixosb4K1euwNLSUqz0AwBXV1fo6enh+vXr2RkqERERERHlMCYJgOL9e8TeuaXrUHSGT/wRERERERFRtlmxYgUMDAzg7e2tdfzbt281+sIyMDBA3rx58ebNmwyvT6FQaB0mCIL4IsoMqrLEMkXZjWWPdInlL+dR5T8KhUJrnpRTqGLPydtAWcM0IeWvYXLWlQ9dlL+MrIsVf0RERERERJQtbt68iXXr1iE4OBgSiSRb1nnjxg2tww0MDBAfHw+lUpktcVDuER8fr+sQKJdi2SNdYvnLORITEyGXy3H37l1dh5Ip0sr1KHdKSohDvqSU/w2TgatXr2bp+r7X8seKPyIiIiIiIsoWFy9eRGRkJOrXry8OUygUmD17NtatW4djx46hUKFCePfundp8ycnJ+PjxI6ysrDK8TltbW+jr66sNS0hIQFhYGExNTWFiYvJ1G0P0CUEQEB8fD1NT02yr2CYCWPZIt1j+ch49PT0YGhqiQoUKOToPUigUuHHjhtZcj3Kvx3f+hvzf/43kgLRqVegbZH41mC7Kn2qd6cGKPyIiIiIiIsoWrVu3hqurq9owHx8ftG7dGm3btgUAODo6IioqCjdv3kTVqlUBAOfOnYNSqYSdnV2G16mvr69xMq6vrw+JRCK+iDITyxXpCsse6RLLX86h+qy05Ug50Y+yHZQ5Ip/9A8t//9cXgKSEWFjkLfDZeb7F91r+WPFHRJTFvLy8YGNjg/Hjx+s6FCIiIqIsFxsbi/DwcPH9s2fPcOfOHeTNmxfFixdH/vz51aY3NDREoUKFUL58eQCAtbU16tSpg4kTJ2Lq1KmQy+WYPn06mjdvjiJFimTrtnyPmFsSEREREWn38VW4WPEHAB/fvcrSir/vlZ6uAyAiyk0OHTqEXr16wcXFBTKZDHfu3NF1SERERESZ6ubNm/D09ISnpycAwM/PD56enli8eHG6lzFv3jyUL18e3bt3R58+feDk5IRp06ZlUcQ5F3NLIiIiIqL/xEe+VHsf/e6VjiLRLT7xR0S5TnxSMvT19BCdIEceE0MkK5UwM8qew2FcXBycnJzQtGlTTJgwIVvWSURERJSdXFxccO/evXRPf+zYMY1h+fLlw/z58zMzrCyhy7wSYG5JRERERJRa0vu3au9jP0TqKBLdYsUfEeUqiXIFlp94hKAzjxEVnwxLUwP0dC2HAfWsYWyY9e0xq+58f/bsWZavi4iIiIiyjq7zSoC5JRERERFRaoqYj2rv46LepjHlj40Vf0SUowmCgHi5Il3TKpUCVpx6jEVH/xGHRcUni+9/qVMOenqf74ja1FCfnVUTERER/YCyO68EmFsSEREREWWqmFi1t4mfVATmFqz4I6IcSxAE/Lz8LC6Fvf/itAXMjXB6TH0EnXmsdXzQmcfo614ebrP/h3exSWkux7lMfmzrV4sXaIiIiIh+ILrIKwHmlkREREREmUkvLkHtfW6t+NPTdQBERN8ivZdIrCyMERmThKj4ZK3jo+KT8S42CVYWxpkXHBERERHlGMwriYiIiIhyNv0Eudr75LgYHUWiW3zij4hyLIlEgm39aqW7SSYDPT1YmhpovUhjaWqAwnlMEDLQ9bPLYHNMRERERD8eXeSVAHNLIiIiIqLMZPhJfp6cEK2jSHSLFX9ElKNJJBKYGaXvUBaflIyeruXU+mJR6elaDslKZbqXRUREREQ/FuaVREREREQ5m1GiUu29Mj5eR5HoFs9EiCjXMDUywIB61gBS+l6Jik+GpakBerqWw4B61jA21M/yGD58+ICXL1/i9evXAIDHj1P6hilUqBCsrKyyfP1ERERE9O2+h7wSYG5JRERERJSacYIAAIg3AkyTAGViwhfm+DGx4o+IchVjQ330dS+PgfUrIDpBjjwmhkhWKrPt4syxY8cwduxY8f3w4cMBAIMGDcLgwYOzJQYiIiIi+na6zisB5pZERERERKmZ/lvP99ECMH0HCKz4IyLKHVTNLhW0MAYAGEEvS9e3fv168f+2bduibdu2Wbo+IiIiIsoe2Z1XAswtiYiIiIi0USQnw/zfer44Cz3gnRJCUpJug9KRrD8rISIiIiIiIiIiIiIiIsoi7988hcG/XfwlWBgCACSs+CMiIiIiIiIiIiIiIiLKWV48vg0ASNYDks2MAAASebIuQ9IZVvwRERERERERERERERFRjvXu+SMAQKwJIBj++8QfK/6IiIiIiIiIiIiIiIiIcpbYN88AAPEmAIxSKv705AodRqQ7rPgjIiIiIiIiIiIiIiKiHCvhfQQAINFEAhilNPWpn6zUZUg6w4o/IiIiIiIiIiIiIiIiyrGSP7wHACQZ6wHGxgAAPVb8EREREREREREREREREeUsQmw0AEBuagA9YxMAgH6yoMuQdIYVf0RERERERERERERERJRzxcYDABQmhtA3MQMAGLDij4iIiIiIiIiIiIiIiChn0Y9LBAAozUygb2qeMixZlxHpDiv+iIiymJeXF2bOnKnrMIiIiIjoB8DckoiIiIhIk0HCv7V8FuYwNMuTMkyeO5/4M9B1AEREuYVcLsfChQtx8uRJPH36FBYWFnB1dcXIkSNRpEgRXYdHRERERDkIc0siIiIiov8YJSoAAPoWecWKP0M+8UdElEskxQGKJCD2TcrfpLhsWW1CQgJu376N/v37Izg4GAEBAXj8+DH69++fLesnIiIiokymo7wSYG5JRERERJSacYISAGCUvxBMLfMDyL0Vf3zij4hyl+QE4K+FwPlAIOEDYJIPcOkL1BkBGJhk6arz5MmDoKAgtWETJ05E+/bt8eLFCxQvXjxL109EREREmUiHeSXA3JKIiIiIKDWThJS/pgWLwcSiAABW/BER5UyCAMjTeWe1oATO+AMnZv83LOHDf+9dBwOSLzwIbWgGSCRfFao2MTExkEgksLS0zLRlEhEREdFXyO68EmBuSURERESUSUz/rfjLW7Q0LPJbAQCM5DoMSIdY8UdEOZcgAKsbA0/Pf3las4LAsBspd2Rrcz4QqD0UWGgLxEWmvZxSNYFeBzLlAk1iYiLmzZuH5s2bw8LC4puXR0RERERfSRd5JcDckoiIiIgoE8THRsEsKeV/q9I2MDA0RiwAfQGIjf4I8zx5dRpfdmMff0SUw6XzIolFESD2bcqd2NokfADi3qZMlw3kcjmGDh0KQRAwderUbFknEREREX1OzswrAeaWRERERJS7vQy7K/5frIwNLAsUFt9/ePtcFyHpFJ/4I6KcSyJJuUM6vU0y6Rum9L2i7SKNST4gTzGg95HPLyMTmmOSy+UYNmwYXrx4gbVr1/KObCIiIiJd00VeCTC3JCIiIiLKBG/C78ISQJwRYGpuCSNjMzxHypNvMR9eA6is2wCzGSv+iChnk0gAI/P0TZsUB7j0Ve+LRcWlL6BITv+yvpLqwkxYWBjWrVuH/PnzZ+n6iIiIiCidclheCTC3JCIiIiICgI+vwmAJIN4k5b2+gQHkhoCxHIj78IXm939ArPgjotzDyAyoMyLl//OBKXdom+RLuThTZwRgYJKlq5fL5RgyZAhu376NwMBAKBQKvHnzBgCQN29eGBkZZen6iYiIiCiT6DivBJhbEhERERGpxEe+SvmbKg1PMkip+IuNYsUfEdGPzcAEqD0MqDsKSIgCTCwBhTxbLs5ERETg2LFjAIDWrVurjVu3bh1cXFyyPAYiIiIiyiQ6zCsB5pZERERERCpJ79+m/DXRE4fJ/639Soz5oIOIdIsVf0SU+xiZpfw1L5TyVz9r74Zev369+P+9e/eydF1ERERElI2yOa8EmFsSEREREX0qOfojACDJWF8cJjdM+ZsY/UEHEemW3pcnISIiIiIiIiIiIiIiIvr+SGJjAQDJpv8965b8bx1gcnyMLkLSKVb8ERERERERERERERERUY6kF5cAAFCYGovDkg0lAAB5HCv+iIiIiIiIiIiIiIiIiHIE/fiklH/MTcVhCoOUij9FQpwuQtIpVvwRERERERERERERERFRjmSYoAAASCwsxWEK/ZSKPyEhXicx6RIr/oiIiIiIiIiIiIiIiChHMkpUAgAM8uYThykMU6q/lEmJughJp1jxR0RERERERERERERERDmSSYKQ8jd/EXGY0iCl+kuSlKSTmHSJFX9ERERERERERERERESUI5kkpPzNU7iUOEz57xN/YMUfERERERERERERERER0fdPkZwM838r/vIVLycOVxoYAAAkcrkuwtIpVvwRERERERERERERERFRjhMZEQaDlC7+ULxcZXG4YGQIAJAkKXQRlk6x4o+I6Dvy7NkzyGQy3LlzR9eh5HrBwcFwdnbWdRhZ7kffzqzavtzwXZXJZDhy5Iiuw4Cvry8GDBjwzdMQ4O/vj9atW+s6DCLKJrnhtyqn+NHzLZUffTuZV3495pU/HuaVRPQ9eRV2FwAg1wfyW/3X1KdgmPLEn14yK/6IiIgIQLNmzXDw4EFdh5HlfqTt9PDwwJo1a3QdhsjLywszZ8786vnPnz8PmUwmvlxdXTF48GA8ffo0E6NM2+nTp1G3bt1sWde3Gj9+PGbNmpVpy8usCz4ZuZD34sUL9OnTB/b29qhVqxZmz56N5OTkdK8rODhYrbykfkVGRn7LZmh1/PhxtG/fHnZ2dqhevbra/nr//j18fHzg5uaGqlWrwt3dHdOmTUNMTIw4zaflW/V68+aNOI2/v7/G+CZNmqjF4eXlpTHNpEmT1Ka5fv06unfvDmdnZ1SvXh0+Pj64e/eu1u0KCwuDo6OjxoVdbfvX1tZWbRpBELB48WI0atQI9vb26NGjB548eZKh/UpEP6YfKd/6nB9pO5lXZi7mlcwrP4d5pfa8ctGiRXBzc4OdnR3zSqIvePf8EQAgzhjQ/7d5TwCQGBsDAPTkua/iz+DLkxAR/Vji5fEw0DNAdFI08hjlQbIyGaaGpjqL5+PHjzAwMIC5uXmWLF/+HbRjLZfLYWhomOXrSUpKgpGRUaYsy8TEBCYmJpmyrMyWW7aTUhw4cADm5uYICwvDxIkT0a9fP+zatQv6+vpq0wmCAIVCAQODzEnvrKysMmU52SFPnjy6DuGbKBQK9O3bF4UKFcLmzZvx+vVrjBkzBoaGhhgxYkS6ltGsWTPUqVNHbZivry+SkpJQsGDBTI334MGDmDhxIoYPH46aNWtCoVDg/v374ng9PT00aNAAw4YNQ4ECBRAeHo6pU6fi48ePmD9/vtqyDhw4AAsLC/H9p7FWrFgRQUFB4vtPyz0AdOjQAUOGDBHfm5r+95seGxuLX375BR4eHpg8eTIUCgX8/f3h4+OD48ePq/02yeVyjBgxAs7Ozrhy5YrGeiwsLHDgwAHxvUQiURu/YsUKrF+/HlOnToW1tTUWLVoEHx8f7Nu3D8b/nnC+ePECxYsX11g25Vy5Ma/Mjpzue4ght+RbuWU7KQXzyi9jXsm88nvKK2fNmoWSJUsyryT6gpjX4SgCIP6TNETyb46jnyxkf1A6xif+iChXSVQkYvXN1ai3tR7ct7qj3tZ6CLoZhERFYrbGkZycjOPHj2PIkCFwc3NL805LhUKBsWPHokmTJnjx4gUA4MiRI2jTpg1sbW3RoEEDBAQEqN29J5PJsGnTJvTr1w+Ojo5YtWoVFAoFxo0bBw8PD9jZ2aFx48ZYu3at2rrOnz+Pn3/+GQ4ODnB2dkanTp3w/PlzrXElJSVh2rRpcHNzg62tLerXr4/AwECtMTg4OGD58uUAgGPHjqFdu3awtbWFi4sLBg4cmOY+UjUdsnnzZri7u8Pe3h5Dhw5FdHS0OI3q7slly5bBzc1NvGNPW1Myzs7OCA4OBvDf3ZKHDh2Cl5cX7O3t0apVK7WE/NOmfFTxhIaGwsPDA9WqVcPw4cPV7jSMiYnByJEj4eDgADc3N6xZs+aLd+fmlu1MzcPDA0uXLsXo0aPh6OiI+vXr4+jRo3j37h369+8PR0dHtGzZEjdu3FCb7+LFi+jSpQvs7Ozg7u6OGTNmIC4uDkDK3ZnPnz+Hn5+feNdkaqdOnULTpk3h6OgIHx8fvH79WhynVCoREBCAunXromrVqmjdujVOnjypNv/169fh6ekJW1tbtG3b9pubYkrPZ6NSsGBBFC5cGNWrV8fAgQPx4MEDhIWFiXe2njhxAm3btoWtrS0uXbqk9a7imTNnwsvLS3zv5eWFGTNmYM6cOahRowZq164Nf39/tXlSl6/0xrt161axHA8cOBBBQUFfbBLr3r178Pb2hp2dHVxcXDBx4kTExsZqTBcQEICaNWvCyckJkyZNQlJSkjju021WKpUIDAwUj3mtWrVSO7EGgH/++Qd9+/aFk5MTHB0d0aVLF4SHh8Pf3x8hISE4evSoWJbOnz+vNfaTJ0+ic+fOcHZ2houLC/r27Yvw8HBxfIMGDQAAnp6ekMlkap9BaqdPn8aDBw8wd+5cVKpUCe7u7hg6dCg2btyIpKQkCIKAHj16wMfHB4KQcsLy4cMH1K1bF4sWLQKQcrHVyspKfOnr6+P8+fNo166dxvo+d7wBgG3btqFp06awtbVFkyZNsHHjRnFccnIyZs6ciV9//RWdO3dGuXLlUKFCBTRr1kycJm/evOjSpQtsbW1RokQJ1KpVC126dMHFixc1YilYsKBa3Hp66qcn+vr6auMLFCigsYxPtz31BZ9Hjx7hw4cPGDJkCMqXL4+KFSti4MCBePv2rfi7qrJw4UKUL18eTZs21fo5SSQStfUUKlRIHCcIAtatW4d+/fqhXr16kMlkmDNnDl6/fq12nO7WrRs6dOiATZs24ePHj1rXQzlHbssrVTkd88q08y1vb2+4urqidevWzCuzcDtTY16Z8tnY2Njg6NGj8Pb2Zl7JvDLb88p69erB1dUVw4YNY175r8zIK/v374+GDRvCxsaGeSXRFyR+SHnCN9FEvRJdYpJSea+frMz2mHSNFX9ElKMJgoA4eVy6XjFJMVh5fSWWX1+OqKQoAEBUUhSWXV+GlddXIiYp5ovLUCXGX+vevXuYNWsW3N3dMWbMGBQoUADr1q2DjY2NxrRJSUkYOnQo7t69i02bNqF48eK4ePEixowZA29vb+zbtw/Tpk1DcHCweAFEJSAgAD/99BN27dqF1q1bQ6lUomjRoli0aBH27t2LgQMHYsGCBdi3bx+AlIR74MCBqF69Onbt2oUtW7agY8eOGnedqaxfvx7Hjh3DwoULceDAAcydOxclSpTQGsPu3bvRrl07HD9+HIMGDYK7uztCQ0Oxdu1a2NnZfXZ/hYeHY//+/Vi+fDlWrlyJO3fuYMqUKWrTnD17Fo8fP0ZQUJDaRaL0WLBgAXx8fBAaGoqyZcti5MiRn20CJTw8HEePHsXy5csRGBiICxcuYMWKFeL4WbNm4cqVK1i2bBlWr16Nixcv4tatW1+MI7dsZ2pr166Fk5MTQkJC4O7ujtGjR2P06NFo1aoVgoODUbp0aYwZM0b8zoWHh+OXX35Bo0aNsGvXLixYsACXLl3C9OnTAaRcWCpatCiGDBmC06dP4/Tp0+K6EhISsHr1asyZMwcbNmzAy5cvMXv2bHH8unXrEBQUhDFjxmDXrl1wc3PDgAEDxKZUYmNj0bdvX1hbWyM4OBiDBw9Wm/9bZPSzUd1Fn/pJ3vnz52PkyJHYt2+fxoWpzwkJCYGZmRm2bt2KX3/9FUuWLMFff/311fFeunQJkydPhre3N0JDQ+Hq6qpxbPpUXFwcfHx8kDdvXmzfvh0LFy7EmTNnxM9V5ezZs3j48CHWr1+P33//HYcPH8aSJUvSXO7q1auxc+dOTJ06FXv37kWPHj3w66+/4u+//wYAREREoFu3bjAyMsLatWsRHByMdu3aITk5Gb169ULTpk1Rp04dsSw5OjpqXU98fDx69uyJHTt2YM2aNZBIJBg4cCCUypSTim3btgEA1qxZg9OnT2tcBFO5evUqpFKp2gm/m5sbYmJi8ODBA0gkEsyePRs3btzAunXrAACTJ09GkSJF0rzQHRoaChMTE40mjL50vNm1axcWLVqE4cOHY9++fRgxYgQWL16MkJAQAMDt27cREREBPT09eHp6ws3NDb1791a7M/tTEREROHz4MKpXr64xTrWMnj174tKlSxrjw8LC4ObmhgYNGmDkyJEaF1UAYPfu3XBxcUGLFi0wf/58xMfHi+PKlSuHfPnyYfv27UhKSkJCQgK2b98Oa2trtd+ts2fP4sCBA5g8eXKa2xEXF4f69evD3d0d/fv3xz///COOe/bsGd68eQNXV1dxWJ48eWBvb692IXPjxo3w8PDA+vXr4ebmhqFDh+L48eNQKHJf0zPfo+zOK781t8zuvFKV0zGv1G7BggXo1asX/vzzT+aV2bCdqTGvTLFkyRL06tWLeWUm55WBgYEIDQ1lXvmZvHLZsmXw9/dnXvkv5pVE2S/5w3sAQJLxJxX+xqqKv9z3xB+b+iSiHEsQBHjv98bVN1e/OG1+4/w40O4ANt3dpHX8prub0LNqTzTZ0QTvE9+nuRzHwo5Y22RtmhcutHn//j127dqF0NBQ/PPPP3B3d8fkyZNRr169NJvViY2NRZ8+fZCUlIR169aJzY0EBASgT58+aNOmDQCgVKlSGDp0KObOnYtBgwaJ87do0QLt2rVLuYAVFwdDQ0O15ipKlSqFq1ev4sCBA2jWrBliYmIQHR2N+vXro3Tp0gAAa2vrNLfp5cuXKFOmDKpVqwaJRKJxcSZ1DCojRoxAs2bN1OLQdmEqtcTERMyZMwdFihQBAEyYMAF9+/aFr6+v2FyMmZkZZsyY8VVNFPXq1Qv16tUDAAwZMgTNmzdHWFhYmtsuCAL8/PzEu/5atWqFs2fPincuh4aGYt68eahVqxYAwM/PT6OJlNy8nanVrVsXnTp1AgAMHDgQf/75J2xtbcU7In/55Rd07NgRb9++hZWVFQIDA9GyZUv06NEDAFC2bFmMHz8eXl5emDJlCvLlywd9fX2Ym5trNCUkl8sxdepUsWx37doVS5cuFcevWrUKv/zyC5o3bw4A+PXXX3H+/HmsXbsWkydPxp49e6BUKvHbb7/B2NgYFStWxKtXrzQuon2NjHw2r1+/xqpVq1CkSBGUK1dOPOkbMmQIateuneF1y2Qy8bhRtmxZbNiwAWfPnv3ssj4X74YNG1C3bl34+PgAgBjj8ePH01zenj17kJSUhNmzZ8PMzAwAMGnSJPTr1w+jRo0SL1gYGRnht99+g6mpKSpWrIghQ4Zgzpw5GDp0qMadvElJSVi9ejWCgoLg5OQEIOWYd+nSJWzZsgU1atTAxo0bYWFhgd9//11skqdcuXLiMkxMTJCUlPTFZqkaN26s9v63335DrVq18ODBA0ilUvEu4nz58n12WW/fvlW7OANAfK/qm6RIkSKYOnUqxowZg7dv3+LkyZMICQlJswmu7du3o0WLFhpNrn3peOPv7w9fX180atRI3HcPHjzAli1b0KZNG/FJooCAAPj6+qJEiRIICgqCl5cXDh48iHz58onrGjFiBI4ePYqEhATUr19f7ekNKysrTJ06FVWrVkVSUhK2bdsGb29vbN26FVWqVAEA2NnZwc/PD+XKlcObN2+wZMkSdO3aFbt37xaPTy1atEDx4sVRuHBh3Lt3D/PmzcPjx48REBAAIKUZpfXr12PgwIHi975MmTJYtWqVuO/ev3+PsWPHYu7cuWp3dadWrlw5/Pbbb5DJZIiOjsbq1avRqVMn7N27F0WLFhU/p0+blCpYsCDevn0rvi9WrBj69euHfv364fr16wgNDYWvry8MDAzQsmVLtGnTBlKpVGsMlLV0kVcCGc8tdZlXpsa8UpPqNzIuLg6DBw9GixYtmFdm4Xamxrwyhbe3N+rVqweJRMK8MhPzysDAQAQFBYkVdswr/6M63hQuXBhxcXHMK8G8kkhXhJiUp43lpurHMX3TlN8Dg/R3c/rDYMUfEeVo6b1IUsi0EN4lvBPvyP5UVFIU3ie+RyHTQl+8QJNRGzZsQEBAAJydnXH48GEUK1bsi/OMHDkSRYsWxdq1a9WS67t37+Ly5ctqdzsqFAokJiYiPj5ebH++atWqGsvcuHEjduzYgRcvXiAxMRFyuVy8QJIvXz60bdsWPj4+qF27NmrVqoWmTZuicOHCWuNr06YNevXqhSZNmqBOnTqoV68e3Nzc1Kb5NIY7d+6gffv2X9z21IoVKyZetAAAR0dHKJVKPH78WDzZkUqlX90vSeq7WFXLe/fuXZoXLkqUKKGWvBcuXFjs3PzZs2eQy+Vqd5vnyZNH7aQvLbllO9OKSXUimvqkRHWSExkZCSsrK9y9exf37t3D7t27xWkEQYBSqcSzZ88+e0HR1NRUvDjz6fbExMTg9evXYgWRipOTk9hJ+8OHDyGTycS+FACkeaduRqXns3F3d4cgCIiPj4eNjQ38/f3VysKnHcF/zbpV61ftl6+J9/Hjx2jYsKHa9HZ2dp+9QKPat6qLM0DKvleVf1XZkMlkav1rODo6Ii4uDi9fvtS4QBwWFoaEhATxQpGKXC5HpUqVAKQcj5ydnb+5j6gnT55g8eLFuHbtGt6/fy8+SfDy5cssOclu2rQpjhw5gj/++ANTpkxB2bJltU535coVPHz4EHPmzNEY97njjbm5OcLDwzF+/HhMnDhRnCY5OVmsKFDddd6vXz/xApWfnx/q1q2LAwcOiBdeAWDs2LEYOHAgnjx5gt9//x1+fn7ihc3y5cujfPny4rROTk54+vQp1qxZg7lz5wJIKfsqNjY2sLe3R/369bF//37x96Rjx47iNDKZDFZWVujRowfCw8NRunRpJCQkYPz48XBycsL8+fOhVCqxevVq9O3bF9u3b4eJiQkmTpyIFi1aaL1zPPV+Sv29d3R0RLNmzbB582YMGzYszfk+x87ODnZ2dvD19cXvv/+OoKAgnDlzBjt37vyq5dG3Y17JvPJHyLdyy3amFVNuzisrVqwo/s+8MkVm5JXx8fHo1auX2nDmlSlUxxtVrA4ODswrmVcS6UZcytO5ClP1Y7GBWcrxxoBP/BER5RwSiQRrm6xFfHL8lycGYKhnCEsjS60XaSyNLFHYtDA2NNvw2WWYGphm6Gk/IKVzaH19fezcuRPNmzdH48aN0apVK7i4uGjcUaji7u6OXbt24cqVK+LdrwDEO4hVd8yllvrkMfXJDgDs3bsXs2fPxpgxY+Do6Ahzc3OsWrUK165dE6fx8/ODl5cXTp06hf3792PhwoUICgqCg4ODxrqqVKmCo0eP4uTJkzhz5gyGDRsGV1dXLF68OM0YPr07MLOkPmlTkUgkGk1naWvmJvXJmepzVZ18aKPtDshvbf41vX607Uy9DlVM2uJUrTcuLg6dOnXS2pfFly56fro92vabrqTns1HdRVygQAGtd41+WjbSWy6+Zr9ktCzpgqp/nuXLl6No0aJq41QXtjLreNSvXz+UKFECM2bMQOHChaFUKtGiRQu1JrPSo1ChQrh+/braMNXdvKnv6I6Pj8fNmzehr6+PsLCwNJe3bds2VKpUSevF+s9R7bvp06fD3t5ebZzq90oVT+qLiEZGRihVqhRevnypNo+qzxJra2vkzZsXXbt2xYABA9K8+G9ra4vLly+nGZ+lpSXKli2r1t/Np1Rxh4WFoXTp0ti9ezeeP3+OLVu2iNswb9481KhRA0ePHkXz5s1x7tw5HDt2DKtXrwbw38XfypUrY9q0afj555811mNoaIhKlSqJsaj2S2RkpNr3NDIyMs2nkB49eoSdO3di9+7diI6ORocOHbSui7KHLvJKIOO5JfPKFMwrv82Ptp3MK1No2w/MK7+NKjcKDAxUq1AHmFd+CfPKzMkrU28f80qitOnHpfSxrTRVPyYbmVsC4BN/REQ5jkQigZmh2ZcnBBAvj0dXm65Ydn2ZxriuNl2RrExO97IyokiRIhgwYAAGDBiAy5cvIzQ0FIMHD4a5uTlatmyJ1q1bq92dCQCdO3dGxYoVMWDAAAQGBqJGjRoAgMqVK+Px48coU6ZMhmK4fPkyHB0d0bVrV3GYtgS3cuXKqFy5Mvr27YuOHTtiz549Wi/QACnNXDRr1gzNmjVD48aN0bt3b3z48EGtOY7UpFIpzp49q7VD8LS8fPkSERER4knW1atXoaen98W7gAsUKIDXr1+L7588eaLWNn9WKFmyJAwNDXHjxg0UL14cABAdHY0nT558sRP63LKd36Jy5cp48ODBZ8u+oaFhhi8WWFhYoHDhwrh8+bL4PQNSvjOqu8+tra2xc+dOJCYmihdCr169mvGN+EolS5aEpaVluqcvUKCAWh8RQMqdyN96F/KXlCtXDjdv3lQbduPGjc/OY21tjZCQEMTFxYkXdS9fvqxR/u/du4eEhATxwsrVq1dhZmam9eKctbU1jIyM8PLlS7i4uGhdr0wmQ0hICORyudb9kp6y9P79ezx+/BgzZswQy/7Fixc1lgPgi/1sODg4YPny5YiMjBSfSjhz5gwsLCxQoUIFcbpZs2ZBT08PK1asQJ8+feDu7q52ER9IadJv//79GDlypNZ1fe54U6hQIRQuXBhPnz5Fq1attM5ftWpVGBkZ4fHjx+J2y+VyPH/+XDwmaKO6+JeUlJTmNHfv3v1s01WxsbF4+vTpZ6e5c+cOgP8umCQkJEBPT0+tYkX1XvUZb9myRe0zOnr0KFasWIHNmzdrXORTUSgUuH//vnj3eMmSJWFlZYWzZ8+Kx6mYmBhcu3YNnTt3Fud79+4d9u3bh507d+LWrVtwdXXFyJEj0bBhQ7WKFtIN5pXpw7zy+863cst2fgvmlcwrM5pXvnjxQu0zTY15ZYRYOcW8MvPzStWTpcwriT7PMOHfmyQszNWGm1jkTxmfsXsofgis+COiXMPU0BQ+dilNv228uxFRSVGwNLJEV5uu8LHzgbF+1idGTk5OcHJywvjx43HkyBEEBwdj9erVCAkJ0WgexcvLCwqFAn379sWKFSvg7OyMgQMHol+/fihevDgaN24MPT093L17F/fv38fw4cPTXG+ZMmWwc+dOnDp1CiVLlsTOnTtx48YNlCxZEgDw9OlTbN26FR4eHihcuDAeP36MJ0+eoHXr1gCA69evY/To0Vi7di2KFCmCoKAgWFlZoVKlStDT08OBAwdgZWX12ZPIQYMGoUePHihdujSaN2+O5ORknDhxAn369AGQ0pF8RESEWhMixsbG8PX1xZgxYxATE4MZM2agadOmX+wfoWbNmti4cSMcHR2hUCgwb968LD85tbCwgKenJ+bMmYO8efOiYMGC8Pf3h0QiUTsxyC3bmdlUfbNMmzYN7du3h6mpKR48eIAzZ85g0qRJAFKakrpw4QKaN28OQ0NDsR+ML/Hx8YG/vz9Kly4NGxsbBAcH4+7du5g3bx6AlL4eFixYIPZZ8fz5c/EOztSaNGmCkSNH4qeffhKHvXv3TjxhVPnS5/qtatasiVWrViE0NBQODg7YtWsX/vnnH1SuXDlL19utWzd069YNQUFBqF+/Ps6dO4eTJ09+tly0bNkSixcvhq+vLwYNGoR3795h+vTpaN26tVrfJElJSRg/fjz69++P58+fw9/fH926ddP6dIuFhQW8vLzg5+cHQRBQrVo1REdH4/Lly7CwsECbNm3QtWtXrF+/HiNGjECfPn2QJ08eXL16FXZ2dihfvjxKlCiB06dP49GjR8iXLx/y5Mmj8d3Kmzcv8uXLhy1btsDKygovXrzA/Pnz1aYpWLAgTExMcOrUKRQtWhTGxsZi00apubm5oUKFChg9ejR+/fVXvHnzBgsXLkTXrl3Fu8mPHz+OHTt2YMuWLahSpQp8fHzg6+uLXbt2IW/evOKy9u3bB4VCkeYFli8db4YMGYIZM2YgT548qFOnDpKSknDz5k1ERUWhZ8+esLCwQKdOneDv749ixYqhePHiWLVqFYCU7wAAnDhxAm/fvoWtrS3MzMzw4MEDzJkzB05OTuLvzpo1a1CyZElUrFgRiYmJ2LZtG86dO6f23Zo9ezbq16+P4sWL4/Xr1/D394eenh5atGgBIKWiYffu3XB3d0e+fPlw7949+Pn5oXr16uLd0K6urpgzZw6mTp0KLy8vKJVK/PHHH9DX1xcrhj9t0u3mzZvQ09NTa1YrICAADg4OKFOmDKKiorBq1Sq8ePFCbBpKIpHA29sby5cvR7FixVC+fHksXrwYhQsXVmuqrEOHDjA2NoanpyeWLFmS5l3q9P3L7XllaGgo88osxLySeSXzyu8nr+zVqxfzyi/klaNHj8bbt28xc+ZM5pWZmFcuW7YMZcqUQcmSJbFo0SLmlUSfYZiYUvmulyef2nATy5T3hnzij4jox2asb4yeVXviF7tfEC2PRh7DPEhWJmfLxRm1OIyN0bx5czRv3hwREREwNzfXOl2PHj0gCAL69OmDlStXok6dOli+fDmWLFmCFStWwMDAAOXLl/9iHyedOnXC3bt3MXz4cEgkEjRv3hxdunTByZMnAaQ06fLo0SOEhITgw4cPKFy4MLp27Sq2qR8fH4/Hjx+LzYyYm5tj5cqVCAsLg56eHmxtbfHHH3+k2cQUALi4uGDRokVYunQp/vjjD1hYWKi1e//mzRuN5jxKly6Nn376Cb/88gs+fvyIevXqYfLkyV/cv2PGjMG4cePQtWtXFC5cGOPGjcOtW7e+ON+38vX1xeTJk9GvXz9YWFigd+/eePnypdrddrllOzObjY0N1q9fj4ULF6JLly4AUjqHb9asmTjNkCFDMGnSJDRs2BBJSUm4d+9eupbt7e2NmJgYzJo1S+xXZOnSpWI/F+bm5li+fDkmT54MT09PVKhQAaNGjcLgwYPVlvP48WNER0erDduzZw/27NmjNmzo0KFpnjxnhjp16mDAgAGYO3cuEhMT0a5dO3h6euL+/ftZtk4AqFatGqZOnYqAgAAsXLgQbm5u6NGjBzZu3JjmPKampli1ahVmzpyJn3/+GaampmjUqBF8fX3VpqtVqxbKlCmDrl27IikpCS1atNDY/6kNGDAARYoUQWBgIJ49e4Y8efKgcuXK6NevHwAgf/78WLt2LebOnQsvLy/o6emhUqVKqFatGoCUk+i///4b7dq1Q1xcHNatW6fx9KCenh4WLFiAGTNmoEWLFihXrhwmTJig1myYgYEBJkyYgCVLlmDx4sVwdnbG+vXrNeLV19fH8uXLMWXKFHTs2BGmpqZo06YNhgwZAiDlQt/48eMxePBgVKlSBQAwePBgnD59GpMnT8bChQvFZe3YsQM//fRTmhfMv3S8ad++PUxMTLBq1SrMmTMHZmZmkEql6N69uzjN6NGjYWBggNGjRyMhIQH29vZYu3ateKHI2NgY27Ztg5+fH5KSklCsWDH89NNP4gV5IOVu7tmzZyMiIgKmpqaQSqUICgpCzZo1xWlevXqFESNG4MOHDyhQoACqVauGrVu3ihdfDQ0NcfbsWaxbtw5xcXEoVqwYGjVqhAEDBojLsLa2xvLlyxEQEICOHTuKn/XKlSszdHEkKioKEydOxJs3b5A3b15UqVIFmzdvVrtz/pdffkF8fDxmzJiB6OhoVKtWDStXrlQ7NgYGBn627yjKWXJzXnnnzh3mlVmMeSXzSuaV30deOWzYMBQoUIB5pRaq402fPn2YV2ZRXjlp0iRERUUxryT6AuOElIo/43wF1Yab5035bhrlwoo/ifC9NEaeTRQKBa5evQoHBwfo6+vrOhzKBVjmMkdCQgIeP36McuXKZVmfHj8iQRDEpk6y8q7ZrODv748jR47k6M6o4+LiULduXYwZMybNi2g/6nbm5LJHmWfChAl49OgRNm3alKXrGTFiBPT09DBv3jyWPdKprCp/n8uDmGum7XP7hrll7pJd+VZW/gbl5rySviw35D+6yCspfXJD+fvR/Ch5EPNgUjldoxIKRgFPfbuiUY8J4vDwe5cR2zqlefrSF87BPE/etBaRYboofxlZJ5/4IyIiyiS3b9/Go0ePYGdnh+joaCxZsgQA0KBBAx1Hlrlyy3ZSxq1atQq1a9eGqakpTp48idDQ0HQ9ZfC1kpOT8eTJE1y9ehUdO3bMsvUQERFlt9ySb+WW7aSMY15JRETpZZqQ8jdf8bJqwy0LFUPsv/9HvXuZqRV/3ztW/BEREWWi1atX4/HjxzA0NESVKlWwcePGdPcJkpOktZ0XL17EL7/8AiDlzs9P7/i8cuWKLsKlbHL9+nWsXLkSsbGxKFWqFMaPH5+ld+v/888/6NSpE1xcXMQm5IiIiH4UzCv/yyu1YV75Y2NeSURE6REb/RFmSSn/FyopVRuXJ68VngPQAxD1/jWKlbHJ9vh0hU19EmUxlrnM8aM0Q5Dd2OQGZbeEhARERERAEATEx8fD1NRUreyVKVNGh9FRbsDjHukSm/r8vrCpT8pu/A3KXKq8Mi3MK//Dske6xPKX8/woeRDzYAKAhzfOIKm9DwCg7KXzMDVX75f0atVKME4GEpbNgGP9dpm2Xjb1SURERLmGiYkJypQpw5M/IqJc7MKFC1i1ahVu3ryJN2/eYMmSJWjYsCEAQC6XY+HChTh58iSePn0KCwsLuLq6YuTIkShSpIi4jA8fPmD69On43//+Bz09PTRq1Ajjx4+Hubm5rjaLiLKZKq8kIiIiSsvb5w9gCSDOGBqVfgCQZAgYJwPx0e+yPzgd0tN1AERERERERPTjiIuLg0wm09oXU0JCAm7fvo3+/fsjODgYAQEBePz4Mfr376823ahRo/DgwQMEBQVh+fLluHjxIiZNmpRdm0BERERERDnAx1dhAIB4Y+3jk/999C0h5mM2RfR94BN/RERERERElGnc3d3h7u6udVyePHkQFBSkNmzixIlo3749Xrx4geLFi+Phw4c4deoUtm/fDltbWwDAhAkT0KdPH4wePVrtyUAiIiIiIsq94t6+BADEp9FqrfzfGrDEmPfZFNH3gU/8ERERERERkc7ExMRAIpHA0jKlaZ4rV67A0tJSrPQDAFdXV+jp6eH69eu6CpOIiIiIiL4z8g+RAIAkE+1VXaon/uRxMdkV0neBT/wRERERERGRTiQmJmLevHlo3rw5LCwsAABv375FgQIF1KYzMDBA3rx58ebNmwyvQ6FQaB0mCIL4IsoMqrLEMkXZjWWPdInlL+dR5T8KhUJrnpRTqGLPydtA3y456gMAIMlYX2tZSDaQABAgj43O1LKii/KXkXWx4o+IiIiIiIiynVwux9ChQyEIAqZOnZpl67lx44bW4QYGBoiPj4dSqcyydVPuFB8fr+sQKJdi2SNdYvnLORITEyGXy3H37l1dh5Ip0sr1KHdQRkUBAOTG+rh69arGeNUTf7Ef3mkd/62+1/Kn84q/jRs3YtWqVXjz5g1sbGwwceJE2NnZpTn9mjVr8Oeff+Lly5fInz8/GjdujJEjR8LYOI3eG4mIcpBnz56hQYMGCA0NRaVKlXQdTq4WHByM3377DRcvXtR1KFnqR9/OrNq+3PBdlclkWLJkCRo2bKjTOHx9fREVFYWlS5d+0zQE+Pv748iRI9i5c6euQ/khfC/fkZxKLpdj2LBhePHiBdauXSs+7QcAhQoVwrt379SmT05OxsePH2FlZZXhddna2kJfX19tWEJCAsLCwmBqagoTkzQ6BMnhnj17hoYNGyIkJOSH/a363giCgPj4eJiamkIikYjDg4OD4efnhwsXLugwuqz3o29nVm1fZnxX0yp73wsbGxsEBATo/DfT19cX0dHRWLJkyTdNQyl55dGjRxEaGvrdl7+cILu/I3p6ejA0NESFChVydB6kUChw48YNrbke5R4Pk5IBAEpzEzg4OGiM32ugB0ABY0Dr+K+li/KnWmd66LTib9++ffDz88PUqVNhb2+PtWvXwsfHBwcOHEDBggU1pt+9ezfmz5+P3377DY6Ojnjy5Al8fX0hkUgwduxYHWwBERH9qJo1awZ3d3ddh5HlfqTt9PDwgLe3N3r06KHrUAAAXl5esLGxwfjx479q/vPnz8Pb21t8X7BgQVSrVg2jR49GqVKlMivMNJ0+fRp58+bN8vVkhvHjx2dq00KZVZGYkQriFy9eYMqUKTh//jzMzMzg6emJkSNHwsAgfel6cHBwmvnwmTNntObW3+L48eNYsmQJ7t27B2NjY1SvXj1D+8vf3x8BAQFqw8qVK4cDBw5kKI5du3Zh5cqVCAsLQ548eVCnTh2MHj0a+fPn15h27969GDFiBBo0aKAR68OHDzF37lxcuHABCoUC1tbW8Pf3R/HixdMdy4cPHzB9+nT873//g56eHho1aoTx48fD3Nw8Q9uUG6gq/cLCwrBu3TqNz8vR0RFRUVG4efMmqlatCgA4d+4clErlZ28STYu+vr7Gybi+vj4kEon4+hGptutH3sbv1af7vHnz5qhXr94P/zn8SNupLa9M/Z3KTOn5rqY3r0xrGd9LXqnrspGez3DChAkQBCHTYv1R80pt+/Jrf2+YV6ZIvf+yOq9UrUtbjpQT/SjbQV9HPz4p5R9zU63lQPFvxZ8gT8iScvK9lj+dVvwFBQWhQ4cOaNeuHQBg6tSpOH78OHbs2IE+ffpoTH/lyhU4OTmhZcuWAICSJUuiRYsWuHbtWrbGTUQ5mzI+HhIDAyiioqBvaQkhORl6pqY6i+fjx48wMDDIsguDcrk8S5ab0RgMDQ2zfD1JSUkwMjLKlGWZmJh8t3e+5ZbtpBQHDhyAubk5wsLCMHHiRPTr1w+7du3SSCxVfTSk94T+S77mqRpdyZMnj65D+CYKhQJ9+/ZFoUKFsHnzZrx+/RpjxoyBoaEhRowYka5lNGvWDHXq1FEb5uvri6SkpEyv9Dt48CAmTpyI4cOHo2bNmlAoFLh//36Gl1OxYkUEBQWJ7zN6snTp0iWMGTMGY8eORf369REREYEpU6Zg4sSJGhd/nj17htmzZ8PZ2VljOeHh4ejSpQvatWuHIUOGwMLCAv/880+GWxQZNWoU3rx5g6VLl8LAwADjxo3DpEmTMH/+fAApTSrFxsZq9F33I4qNjUV4eLj4/tmzZ7hz5w7y5s0LKysrDBkyBLdv30ZgYCAUCoXYb1/evHlhZGQEa2tr1KlTBxMnTsTUqVMhl8sxffp0NG/eHEWKFNHVZmmVG/PK7MjpvocYcku+lVu2k1Iwr/wy5pXMK7+nvDIoKAhyuTxX55VEX2KYkNLvncTCUut4paFeyj+JSdkV0ndBT1crTkpKwq1bt+Dq6vpfMHp6cHV1xZUrV7TO4+joiFu3buH69esAgKdPn+LEiRNf9aSCqvNSvvjKjhfLXOa8VJ0Pf8tLmZiIyJUrcb+2G/6p7Yb7td0QuXIVlImJmbL89L7kcjn+97//YciQIXBzc0N4eLg4Dvivo+Xk5GSMHTsWTZo0wfPnzyEIAo4cOYI2bdrA1tYWDRo0gL+/P+RyuTiPTCbDpk2b0K9fPzg6OmLVqlVITk7GuHHj4OHhATs7OzRu3Bhr165Vi+ncuXP4+eef4eDgAGdnZ3Tq1AnPnj3TGn9iYiKmTp0KNzc32Nraon79+li+fLnWGBwcHLBs2TIIgoCjR4+iXbt2sLW1hYuLCwYOHJjmPlq8eDFat26NP//8E+7u7rC3t8fQoUMRFRUlTjNmzBgMGDAAy5Ytg5ubG5o0aSKu//Dhw2rLc3Z2xo4dOyAIAp4+fQqZTIaDBw/Cy8sL9vb2aNWqFS5fvixOv2PHDjg7O2vEExoaivr166NatWoYPnw4oqOjxWmio6MxcuRIODg4wM3NDUFBQejWrRtmzpyZK7czdVlO/apfvz6WLl2K0aNHw9HREfXr18fRo0cRGRmJ/v37w9HRES1btsT169fV5rt48SK6dOkCOzs7uLu7Y/r06YiNjYUgCOjWrRueP38OPz8/yGQyyGQytThOnjyJpk2bwtHRET4+PoiIiFDr0DwgIAB169ZF1apV0bp1a5w8eVJt3deuXYOnpydsbW3Rtm1b3L59O83tS88+SM9no5q/QIECsLKygrOzMwYMGIAHDx7gyZMnOHfuHGQyGU6cOCEeEy5evCiWl9TLmTlzJrp16ya+79atG6ZPn445c+agRo0aqF27NhYvXqw2T+ryld54t2zZIpbjgQMHYvXq1WrlS9vr7t278Pb2hp2dHWrUqIEJEyYgJiZGYz/4+/ujZs2acHJywqRJk5CY6rj96TYrlUoEBgaKx7xWrVph//79asu8f/8++vTpAycnJzg6OqJLly4ICwvD4sWLERISgqNHj4pl6dy5c1pjP3nyJDp37gxnZ2fUqFEDffr0QVhYmDi+QYMGAABPT0/IZDK1zyD16/Tp03jw4AHmzJkDGxsb1K1bF0OHDsXGjRuRmJgIpVKJHj16oFevXlAqlRAEAe/fv0fdunWxaNEiCIIAY2NjFCpUSHzp6enh/PnzaNeunca+/NzxRhAEbN26FU2bNoWtrS2aNGmCjRs3iuPkcjlmzpyJX3/9FZ06dULZsmVhbW2Npk2bqi3j3r176N27NxwdHeHq6opff/0V7969U5tGX19fLeb8+fOrjf/48SPGjx8vfu7e3t64c+eOOP7KlSsoUaIEvLy8ULJkSVSrVg0dOnTQOHYkJydj1KhRGDx4MEqWLKnxvVywYAHq1q2LX3/9FZUqVUKpUqXg4eGBAgUKiNM8fvwYXbt2ha2tLZo1a4bTp0+rLefBgwc4deoUpk+fDltbWzg5OWHChAnYu3cvXr16BUEQ8ObNG9StWxcDBgzAoUOHkJSU9MVjyKevz+Wa35ObN2/C09MTnp6eAAA/Pz94enpi8eLFiIiIwLFjx/Dq1Su0bt0abm5u4iv1eeC8efNQvnx5dO/eXfyuTps2TUdbpN3n8srslJycjOPHj4t55dOnT7VOp1AoxLzyxYsXAKCRVwYEBCA5OVmc59Ocbvny5VAoFFrzytTOnz+vkVc+f/5ca1xJSUmYNm2aWl4ZGBj42RgA4NixYxp5ZVr8/f3RunVrbN68We34Fx0dLU7j6+urkW+p1n/kyBG15Tk7OyM4OBhAygVgmUyGQ4cOwdvbG66urmjdurVaeQ4ODla7QKyKJzQ0FB4eHmK+FRMTI04TExOjlm+tWbMGXl5emDlzps63M3UuoIvtTM3Dw0NrXvnu3Tu1vPLTZrE+zStnzJiBuLg4AClP132aV6Z26tQptbzy9evX4jilUqk1r0zt+vXrannlnTt30rWtaXn27BlsbGxw9OhReHt7a/1sVAoWLIjChQujevXqGDhwIB48eICwsDCcP39ezCvbtm0LW1tbXLp0SSwvqc2cORNeXl7iey8vL8yYMUMtr/T391ebJ3X5Sk9ZAoCtW7eq5ZVBQUFaK1pSu3fvnphXuri4YOLEiYiNjdWYLiAgQC2vTEr672Lwp9usLa/89Emyf/75B3379lXLK8PDw+Hv76+RV54/f15r7KnzShcXF/Tt21ftJppP88rUn0Fqqrxy7ty5qFSpEtzd3cW8UpV79OjRAz4+PmJu+OHDBzGvBFIq8a2srMSXvr6+mFd+avPmzahXrx5cXV0xbNgwteMNAGzbtk0jr1RJTk4W88rOnTujXLlyqFChApo1a6a2jPv372vNK1PT19dXi/nTCrGoqCiNvDJ1/3ZXr15FiRIl4O3tjVKlSsHZ2RkdO3YUr0GrKBQKMa/U9rSsKq8cPXo0KleujNKlS6NBgwZqFaZPnjxRyyv/+usvtWU8fPgQp06dwowZM2Bvbw9nZ2cxr4yIiAAAvH37VswrDx8+/F3c8E2kK0aJKf11G+TVXhEuVvwl5a7vic6e+Hv//j0UCoXGnSIFCxbEo0ePtM7TsmVLvH//Hl26dBFP4jt16oR+/fpleP3fa6eL9ONimft2BgYGiI+Ph1KpFIcJggAhISFd8xsbGuLjunV4u+S/ZhiUUVF4u2QJAAF5vb2R+IVkSWJi8k1Nfvzzzz/YvXs39u/fj+TkZDRq1AiBgYEoXbo04uLikPDvtiQkJODDhw8YO3YsXr58iZUrVyJfvnw4ffo0xowZg19//RWOjo549uwZZsyYAblcjr59+4rr8ff3x+DBgzF8+HDo6+uLd4LNnj0befPmxbVr1zBjxgxYWlqiUaNGSE5OxsCBA9GmTRtxebdu3UJCQoJ4ApzaunXrcOzYMfj5+aFo0aKIiIhARESE2rSfxnDw4EGMGDECvXr1wpQpU5CcnIzTp09rXT6Qcjd3WFgY9u7diwULFiAmJgbTpk3DpEmTxAsBCoUCZ8+ehYmJidi8hmp5iYmJassWBAFJSUlq+/n333/H8OHDMXbsWCxZsgQjRozAzp07YWBgIJ4UqZYhl8sRHh6OgwcPYuHChYiKioKvry+WLl2KQYMGAUg5Cb506RJ+//13FCxYEMuWLcPt27dRoUKFXLud2jp4FwQBa9aswcCBA9GzZ09s3LgRo0ePhp2dHVq3bo3Bgwdj8eLFGD16NLZv3w6JRIKnT5+id+/eGDBgACZOnIj3799j9uzZmDx5MqZOnYo5c+agU6dOaNu2Ldq0aSPuo6SkJMTHx2PlypWYNm0aJBIJJkyYAD8/P3H/btiwAUFBQRg/fjxkMhl27tyJ/v37Y/v27eJ3s2/fvqhZsyamTZuG58+fY/bs2QCQ5ncESLlIIJfLtY5Pz2eT+O+F4/j4ePFua9XxJzo6Whw/d+5cDB8+HCVKlIClpaVYEZB6vXK5HEqlUhymVCoRGhqKrl27Yu3atbh+/TomT56MKlWqoGbNmuJ8qvKVnnivXr2KKVOmYMiQIXB3d8f58+fFSv+09lF8fDx8fHxgZ2eH9evX4927d5g+fTqmTJmCqVOnAviv/Ovr6+OPP/4Qmy4yNzcXy+Sn27x69Wrs27cPY8eORenSpXH58mWMHj0a5ubmqFatGl6/fo1u3bqhWrVqCAwMhLm5Oa5evYqYmBh07twZ9+/fR2xsLKZMmQIg5Skkbdvw4cMHdO7cGRUrVkR8fDyWLVuGAQMGYPPmzdDT08P69evh5eWFZcuWwdraGoaGhlqXc+HCBVSoUAFmZmbi+GrVqiEmJgY3b96EjY0NJk+ejA4dOmDVqlXo0qULJk6cCCsrK/To0UPrMrdt2wZjY2PUqVNH7fv9pePNvn37sGjRIowZMwY2Nja4e/cuZsyYAX19fbRs2RI3b95EREQE5HI5WrdujcjISEilUgwbNgwVKlQQy2f37t3h6emJYcOGITExEYsXL8bgwYPxxx9/iLE8efIEbm5uMDY2hp2dHQYNGoRixYqJ2zB48GAYGxvD398fFhYW2LFjB3r06IGQkBDkzZsXlSpVwsuXL3Ho0CHUrl0b7969w/79++Hq6qq2T5YtW4Z8+fKhWbNmOH/+vFpZUSqVOH78OLp3744ePXrg3r17KFGiBHr27In69euL0wwaNAgFChTAunXrEB0djblz56p9R86fP488efLA2tpaLNv29vbQ09PDhQsX4OHhgfz58yMoKAh79+7FpEmTIAgCmjRpghYtWqBy5cpavyMqiYmJkMvlaheovmcuLi64d+9emuM/N04lX7584l3t2UUQBAhafre0TqtU4l1QUJp5ZYGePSHR+/J9tpJv6A/p3r17CAkJwe7du5GcnIymTZti3bp1sLGx0Zg2KSkJI0aMwPPnz7Fp0yYUKFBAvFlkwoQJcHZ2Rnh4OCZOnAgA4vEVSLlAPnLkSIwfPx76+vpQKpUoWrQoFi1ahHz58uHKlSuYNGkSrKys0KxZMzGvbN++PX7//XfI5XJcv349ze1cv349jh07hoULF6JYsWJ4+fIlXr16pTbNpzEcP34cgwYNQr9+/TBnzhzI5XKcOHHis/srPDwc+/fvx/LlyxETE4Px48djypQpauXs7NmzsLCwUHtqJL0WLFiA0aNHo0iRIli+fDlGjhyJQ4cOpfnEVHh4OI4ePYrly5cjKioKw4YNw4oVKzB8+HAAwKxZs3DlyhUsW7YMBQsWxOLFi3Hr1i2tn292b+eYMWNQpkwZLFiwQGfbmdratWsxfPhwDBgwAGvWrBErAdu1a4fRo0dj3rx5GDNmDPbu3QuJRILw8HD88ssvGDp0KH777Tcx/5g+fTr8/PzECssOHTqgQ4cOautKSEjA6tWrMWfOHOjp6eHXX3/F7Nmzxf27bt06BAUFYdq0aahUqRJ27NiBAQMGYM+ePShbtixiY2PRt29fuLq6Yu7cuXj27Fm6Kzm/ZMmSJfD19UXZsmXT9dmons5MXWEwf/58jBkzBqVKlYKlpfYnJ7QJCQlBz549sXXrVly9ehW+vr5wcnJC7dq105znc2Xp0qVLmDx5MkaNGgUPDw+cOXMGixcv/mwMcXFx8PHxgaOjI7Zv347IyEhMmDAB06dPx6xZs8Tpzp49C2NjY6xfvx7Pnz/H2LFjkT9/frFMfiowMBC7du3C1KlTUbZsWVy4cAG//vorChQogBo1aiAiIgLdunVDjRo1xD5rL1++jOTkZPTq1QsPHz5ETEwM/Pz8ACDNpvTj4+PRs2dPyGQyxMXFYdGiRRg4cCB27twJPT09bNu2De3bt8eaNWtQoUKFNJ98vnr1KqRSKQoVKiQOc3Nzw5QpU/DgwQNUrlwZs2fPRsuWLbFu3Tp0794dkydPRpEiRdK8gSI0NBQmJibijQIqquPNsmXL8PbtW8yYMUPteLNr1y4sWrQIkyZNQqVKlXDnzh1MnDgRZmZmaNOmDW7fvo2IiAjo6enB09MTb9++hY2NDUaPHg2pVAogpcKue/fuaN++PcaOHYvExETMmzcPw4YNw7p168RYwsLCxLzSwcEBI0eOVGtac+jQoTA2NsaKFSuQJ08ebNmyBd27d8fBgweRL18+ODg4YMGCBThx4gTq1q2LyMhIHDx4UONhkyVLlqBgwYJo3749Ll26pDZOlVf27t0bPj4+uH37NkqWLIm+ffuKffcplUoMHjwYBQsWxLZt2xAdHY3ffvtNbTlXrlyBpaUlbG1txWGurq7Q09PD9evX8dNPP6FEiRLYvHkzQkNDMWnSJCiVSrRs2RKenp5iM+lEuYVJQspNDCb5tT9drjRIeQJYIs9dT/xB0JFXr14JUqlUuHz5strw2bNnCz///LPWec6dOye4uroKW7duFe7evSscOnRIcHd3FwICAtK93uTkZOHixYtCYmKikJyczBdfWf5KTExkmcuEV0xMjHDr1i0hLi5OUCqVglKpFBQKhfC4U2fhtszmi697NWsJithY4W71GlrH361eQ1DExgr3atb67HIed+4iKBQKMYb0vCIjI4U1a9YInp6eQpUqVYQBAwYIBw4cEBISEjSmDQ8PF6RSqXDhwgWhe/fuQufOnYWPHz+K47t37y4sW7ZMbZ7Q0FChdu3a4nupVCrMnDlT3EcxMTFaY54yZYowaNAgQalUCu/evROkUqlw7ty5dG3TtGnTBG9v7zT3ReoYVK8OHToII0eOTPd+W7RokVCpUiXh5cuX4rATJ04INjY2QkREhKBUKoXRo0cLrq6uGvtSKpUKhw4dUhtWrVo1Yfv27Wr7eevWreL4+/fvC1KpVHjw4IGgVCqF7du3C9WqVVOLx97eXoiKihKHzZ49W2jfvr2gVCqFqKgooUqVKsK+ffvE8R8/fhTs7e2FGTNm5Lrt/FzZq1evnjBq1CjxfUREhCCVSoWFCxeKwy5fvixIpVJxH4wbN06YMGGC2nIuXLgg2NjYCPHx8eJyg4KC1KbZvn27IJVKhSdPnojDNmzYILi6uorv3dzchKVLl6rN165dO2HKlCmCUqkU/vzzT6FGjRriepRKpbBp0yZBKpUKt27dSvOz7dq1a5qffXo+m7NnzwpSqVT48OGDoFQqhVevXgkdO3YU6tSpIyQkJIjjDx8+rLbs0aNHC/3791cbNmPGDKFr165qsXXu3Fljm+fOnau1fKUn3mHDhgm//PKL2jJHjhypVr4+fW3evFmoXr26EBMTIw773//+J9jY2AivX78Wt6d69epCbGys2v53cHAQkpOTNbY5Pj5esLOzEy5evKi2rnHjxgnDhw8XlEqlMG/ePMHDw0NITEzUGpe2fZieV2RkpCCVSoW7d++q7bfPlROlUimMHz9e6Nmzp9qw2NhYQSqVCsePHxeH7du3T7C1tRXmzp0rODg4CI8ePUpzmU2bNhUmT56sNiw9x5uGDRsKu3btUptvyZIlQocOHQSlUins3r1bkEqlQr169YT9+/cL169fF4YPHy7UqFFDePfunTj9p9vz4sULQSqVCg8fPhSUSqVw/PhxYd++fcKdO3eEEydOCB06dBDq1asnHnsuXLggODk5aRz3GjZsKPz5559q+8TBwUGoXLmyIJVKhb59+6p9rhcuXBDq1KkjREZGav1sVccfe3t7YfXq1cKtW7eE5cuXCzKZTPxNPHnypFC5cmWN/Zb6O7J06VKhUaNGGse+mjVrChs3btT4fJKSkoQjR44IgwcPFqpWrSo0b95cWLFihVjuP33FxcUJt27dEmJiYtLMNZOTk7/p/OxHlJycnOa+iY+PF27fvi3Ex8cLgiAISqUy2/NKVW6pVCrTvU3v3r3TyCsPHjwoJCYmakz79OlTjbwyKipKHN+9e3dh+fLlavOo8koVVU73JVOnThUGDx4sCIIgvH//XpBKpcL58+fTtU3Tp08XvL2909wP2mLo2LGjMHLkyHQtXxAEYfHixUKlSpWEV69eicNUx7/Xr18LgiAIY8aMEVxdXTX2peq3NrVq1aoJO3bsEAThv/2s+o2MiYlR+40UBEHYsWOHUK1aNbV47O3thejoaHGYKt8SBEGIjo4WqlSpIuzfv18cHxUVJeZbut5OlX/++Ucn25la/fr1hVGjRonvX79+LeaVKleuXBGkUqm4D8aNGydMnDhRbTmqvDIhIUFcblBQkNo0O3bsEKRSqRAWFiYOU+WVKm5ubsKyZcvU5lPllYIgCJs3bxZq1KghrkcQBDGvvH37dprb2a1btzT3ieqz2bBhg/g9+vSzOXfunCCVSoWPHz8KgiAIERERYl6ZmJgojv+0DIwZM0bo37+/2rAZM2YI3bp1U4utc+fOGts8d+5c8X3qZaenLA0bNkzo06eP2jJVeWVatmzZIuaMKsePHxdsbGyEN2/eiNtTo0YNIS4uTpxGlVcqFAqNbU5MTBTs7e01rl2OGzdOGDFihCAIgjB//nzBw8NDSEpK0hqXtn2YHqq88t69e4Ig/LffPldOBEEQJkyYIPTq1UttWFxcnJhXqqjyynnz5gkODg7C48eP01ymKq9MLfXxRnXs+/R407BhQ2H37t1q8y1ZskTo2LGjIAiCsGfPHjGvPHDggHDjxg1hxIgRQo0aNYT379+L03+6PS9fvhSkUqnw6NEjQRAEtbzy5MmTQseOHYV69eqJxx5VXvnpca9hw4bC5s2b1fbJp3ll6s81dV4pCJqfrer4Y29vLwQFBQm3b98WAgMDBZlMJv4mnjp1SqhcubLGcTr1d2TZsmVCo0aNND4HVV75KblcLhw9elTMK1u0aCGsXLlSLPfafJoH5VSfy/Uo9zjnmJJXn9jmr3X8nz1ScvItndL+Dfkauih/GVmnzp74y58/P/T19REZGak2PDIyUu2ulNQWLVqEVq1aoX379gAg3gUzadIk9O/fH3rpuKNS5XvtdJF+XCxz30ZfX1/sfFjtbuF03iFtYFUIye/eQRkVpXW8MioKye/fw8CqEBTv3392WRntsHrjxo0ICAiAs7MzDh8+rPY0g7ZlA8DIkSNRtGhRrF27Vq2fjLt37+Ly5ctqzR8pFAokJiYiISEBpv/2KVO1alWNTrY3bdqEHTt24MWLF+KTAzY2NpBIJMifPz/atm2L3r17o3bt2qhVqxaaNm2KwoULa42zbdu26NWrF5o2bYo6deqgXr16cHNzU5vm0xju3r2LDh06pHvfSSQSFCtWDEWLFhWHOTo6QqlU4smTJyhcuDAkEgmkUqnW9vK1fU6fliGZTCb+r9rWd+/ewdraWqOzcolEghIlSqj1+VC4cGFERkZCIpHg+fPnkMvlsLe3F+extLREuXLl1JaT27YzrfhSx6Tq80MqlYrDVLnAu3fvULhwYdy9exf37t3Dnj17xOUI/zbp+Pz5c63bovrf1NQUZcqU0bo9MTExeP36NapVq6Y2n5OTE+7evQuJRIJHjx5BJpOpfRcdHR3T3L7U69a2T1IPS89nU69ePQiCgPj4eNjY2MDf3x/GxsbieFtbW41t1rbe1LF++hkAKZ+Dar98Ok964n38+DEaNmyoNr+dnR2OHz+e5j5S7dvUfVFVq1ZNLP9WVlaQSCSwsbGBmZmZOI2joyPi4uLw6tUrlChRQm27wsPDkZCQgN69e6utSy6Xo1KlSpBIJLh79y6cnZ3T7NPoc59dak+ePMHixYtx7do1vH//Xmwu6dWrV2r76ku/G9p+37T937RpUxw5cgQrVqzAlClTxO/dp65cuYKHDx9izpw5Gsv53PHGwsIC4eHhmDBhAiZNmiROk5ycjDx58kAikYjb2K9fP/Gu71mzZqFu3bo4ePAgOnXqhLt37+Lvv/+Gk5OTRmxPnz5F+fLl1e6etrGxgYODA+rXr48DBw6gffv2uHfvHuLi4tSeQAVSnrR4+vQpJBIJHjx4gJkzZ2LgwIFwc3PDmzdvMGfOHEyZMgW//fYbYmJiMHr0aEyfPl1s7unTz1a1PQ0aNEDPnj0BAJUrV8aVK1ewZcsWuLi44NGjRyhatKjGftP22aX1mX/6+RsaGqJBgwZo0KCB2PfO3LlzERERgfHjx2vsN9XymE9mMR3klRm1YcOGdOeVKl/KK1VNZwL/5ZXx8fFqeeWnNm7cqDWvBFKe1mzbti18fHzSlVe2adMGvXr1QpMmTT6bV6Z2584d8bpAehUrVkytj0jV8e/x48dqucjX9neXujlI1fJUv5HalChRAhYWFuJ7VX4CpDSFKJfLYWdnJ47PkydPmsf91HLLdqYVkyqHVD0tBEBsaSoyMhJWVlZiXrl7925xGlVe+ezZszS3BQBMTU1RunRprdujyis//f1T5ZVAShN+MplMLa9X/aZ8q4oVK4r/p/XZuLu7a+SVqctC6ieMMuLT5lBVeWV65/k0XlVemZoqr0yLat+mzhmdnJzE8q8qGzKZTDy+Af/llS9fvkSJEiXUlhkWFob4+Hj06tVLbbgqrwRSjkfOzs7f3PdoWnnly5cv1cpzZlHllX/88QemTJmCsmXLap0udV75KdXxRhWrg4ODuL/Nzc0RHh6O8ePHi0+TA//llQDE1pz69euHxo0bA0hpHrxu3bo4cOCAmFeeP39e6/ckPDwc5cqV08gr7e3tUb9+fezfv18tr3RxcVGbPyEhQWxONa28cvLkyWnmlZ9SbU+DBg3Qo0cPAEClSpVw+fJlbN68GTVq1MDDhw9RtGhRjeP0tzAwMICHhwc8PDzEvHLOnDl49eqV1ryS6EeiSE6G2b8NwRUoqf33WzBMqQLTk39/XSRkJZ1V/BkZGaFKlSo4e/as2uPOZ8+eRbdu3bTOk5CQoFG5pzrxVf3IEFHuIZFIUGbjhnQ3ySQxMICepaXWizR6lpYwtLJC2c2bP7+Mr2iOqUOHDtDX18fOnTvRvHlzNG7cGK1atYKLi0uaNyy4u7tj165duHLlCmrVqiUOj4uLw+DBg9GoUSONeVKfPKY+2QGAvXv3Yvbs2RgzZgwcHR1hbm6OVatW4dq1a+I0fn5+8PLywqlTp7B//34sXLgQQUFBcHBw0FhXlSpVcPToUZw8eRJnzpzBsGHD4Orqqtb8yqcxpL7QlJlSn7SppL44rZKcqr8aldQnZ6rPVZmqKdlPaWsmJ7t+f3607Uy9DlVM2uJUrTcuLg6dOnXS2pfFly56fro92vabrqTns9m4cSMsLCxQoEABtQtnKp+WjfSWi6/ZLxktS7qgasJx+fLlahU1AMQLW5l1POrXrx9KlCiBGTNmoHDhwlAqlWjRokWG+9goVKiQRv8hb9++BfDfhTAgpQmomzdvQl9fH2FhYWkub9u2bahUqVKGm/lR7bvp06fD3t5ebZzq90oVT+qLiEZGRihVqhRevnwpLqd+/foYNWqUxjpSb09qlpaWKFu2rHjxJTY2FlZWVli/fr3GtKqLRYGBgXBychIreW1sbGBqaoquXbti2LBhiIyMxPPnz9G/f39xXlV5rVy5Mg4cOICiRYvCwMBA4yKvtbW1RvNNn1OoUCGNvmaSk5Px8eNHrdssCCn9lu7cuRMHDhyApaUlBg4ciJ9//jnd66TMpYu8Esh4bsm8MgXzym/zo20n88oU2vYD88pvo8qNAgMD1SpqAOaVX8K88j/MK4myTmREGAz+/ekoWkZ7M+ESo5S8Vi/5+/qNyWo6q/gDgJ49e2LMmDGoWrUq7OzssHbtWsTHx6Nt27YAILaPP3LkSABA/fr1ERQUhMqVK8POzg7h4eFYtGgR6tevzztfiXIpiUQCyScXAtKijI9HAS+vf/teUVfAywuCQgG9dC4rI4oUKYIBAwZgwIABuHz5MkJDQzF48GCYm5ujZcuWaN26tdrdmQDEPqMGDBiAwMBA1KhRA0BKQvn48WO1p5fS4/Lly3B0dETXrl3FYak7ClepXLkyKleujL59+6Jjx47Ys2eP1gs0/2fvvqOjqvY2jn+nZdJ7ARJ678UO2FEUsCByRQFFKdeGvYJiw4b6igXlUkQF1MsVsIENKyJeVLoGRHoN6ZM+9f1jQjSXAAlMclKez1qsmZw5Z59nwibsnN/sfQDCw8MZMGAAAwYMoH///owZM4acnByio6Mr3L9du3asXLmywhuCH8n+/ftJS0sr+yVr7dq1mM3mY34KODY2loMHD5Z9vWPHjgrvNRdIKSkp2Gw2NmzYUHYvgby8PHbs2HHMm9A3lPd5Ijp16sSff/551L5vs9mqfLEgPDycxMREVq9eXfbvDPz/Zg59+rx169Z8+OGHlJSUlF0IXbt2bdXfxHFKSUmp0j1WYmNj2bJlS7ltqampJ/wp5GNp2bIlGzduLLftWPe3bd26NYsXL6awsLDsou7q1asP6/+bN2+muLi47MLK2rVrCQ0NrfDiXOvWrQkKCmL//v2Hfar3kPbt27N48WJcLleF35fK9KXs7Gy2b9/O5MmTy/r+L7/8clg74J9BczQ9evRg+vTpZGZmls1K+PHHHwkPDy+7bx74Z9aZzWZmzpzJuHHjOPvss8tdxAf/hY1PP/20bPz8v4728yY+Pp7ExER2797NpZdeWuHxXbp0ISgoiO3bt5e9b5fLxd69e8t+JnTu3JnPP/+c5OTkI95X6H8VFBSwe/fusosZnTt3JiMjA4vFQkpKSoXHFBcXH/Y7wN8/FNiqVatyszkApk6dSkFBARMnTqRRo0YEBQXRtWtXtm/fXm6/HTt2lH3qv3Xr1hw4cICDBw+WzVj6358BPXv2xOFwsHHjRlq1agXATz/9hNfrLTeTZfv27Xz44Yd89NFHZGdnc9FFFzFt2jROPfXUKn+wSAJP48rK0biydo+3Gsr7PBEaV2pcWdVx5b59+8r9nf6dxpVp5cZHGlcGdlx5qOCqcaVIxQ7s+B0b4LJATELTincq/aBGQyv8VX5tzGowYMAA7r//fl5++WUuu+wyUlNTmTVrVtn0+/3795Oenl62/0033cQNN9zA1KlTGThwIBMnTqRv3748/vjjRr0FEalDzCEhxI0bS/wtt2Au/WXHHBlJ/C23EDduLOYKPvkaaL169eLxxx9nxYoV3HfffaSmpnLZZZexefPmw/YdOXIkt99+O//85z/LBv2Hbu796quvsmXLFrZu3cqSJUt48cUXj3re5s2bs3HjRpYvX8727duZOnVquV+cdu/ezQsvvMCaNWvYu3cvP/zwAzt27Ci7eLl+/Xouuugi0tLSAJgzZw6ffPIJW7duZfv27Xz22WckJCQc9ZfIW2+9lSVLlvDyyy+zdetWNm/ezIwZM8pef+GFF7jvvvvKHWO323nggQfYtGkTv/zyC5MnT+biiy8+4if7Djn99NOZP38+v//+Oxs2bOCRRx6p9l9Ow8PDufzyy5kyZQo//fQTW7ZsYeLEiYct99ZQ3megjR07ljVr1vD444+TmprKjh07WLZsWbkxQHJyMj///DNpaWmHfULyaEaPHs3MmTNZunQp27Zt4/nnn2fTpk1ce+21AAwaNAiTycRDDz3En3/+yXfffccbb7xxWDsXXXQRX375ZbltWVlZpKamlvtz6BO31eX0009n48aNfPDBB2VLBv3vBZvqMGLECL777jvmzJnDjh07eO+99/j++++P2i8uueQSgoKCeOCBB/jjjz/46aefeOKJJ7jsssvKLf3udDqZOHFi2ff/lVdeYcSIERXObgkPD2fkyJE8/fTTLF68mF27dvHbb78xd+5cFi9eDMDw4cPJz8/nrrvuYsOGDezYsYMPPviAbdu2Af6+tHnzZrZt20ZWVlaFn7SOiooiOjqaf//73+zcuZOVK1fyzDPPlNsnLi6O4OBgli9fTkZGBnl5eRV+H/r27UubNm2477772LRpE8uXL2fq1KkMHz687NPk3377LQsXLuT555+nT58+jB49mgceeIDc3NxybS1duhSPx3PECyzH+nlz2223MWPGDN5++222b9/O5s2bWbhwIXPmzCn7/g4bNoxXXnmFH374gW3btvHoo48ClC39ec0115Cbm8tdd93F+vXr2bVrF8uXL+fBBx8su1j17LPPsmrVKvbs2cPq1au59dZbMZvNDBo0CIDevXvTo0cPbrnlFn744Yey/V588cWy/7/OPfdcvvzyS9555x12797Nr7/+yuTJk+nWrRtJSUnY7XbatWtX7k9kZCRhYWHllrkbPXo0n376KQsWLGDnzp3MmzePb775hquvvrosS4sWLcp93/73/93WrVtz5plnMmnSJDZu3Mjq1at54oknGDhwYNnF93379jFgwADWrFnDbbfdxooVK3j66ac57bTTdHGmDtK4UuPK6qRxpcaVGlfWnnHlDTfcoHFlJcaVq1ev5sknn9S4MoDjyocffpj169fz66+/alwpcgRZ+3YAUGgHyxE+HGAO9o/Lra6GVfgzdMYf+P8zP9LSnv87BdtqtXLrrbdy66231kQ0EamHzHY7cWNGE3/jP/Hk5WGJiMDndmOu4N5p1clutzNw4EAGDhxIWlpauftb/d2oUaPw+XyMGzeOWbNmceaZZzJ9+nSmTZvGzJkzsVqttGrV6pj3ODm0Nv6dd96JyWRi4MCBXHPNNXz//feAf0mXbdu2sXjxYnJyckhMTGT48OEMGzYM8C8Dsn379rJfUsLCwpg1axY7d+7EbDbTtWtXZsyYcdR7rZ522mm89NJLvPbaa8yYMYPw8HBOOeWUstfT09PLlvM4pFmzZlxwwQWMHTuW3NxczjnnHB555JFjfn/vv/9+JkyYwPDhw0lMTGTChAn89ttvxzzuRD3wwAM88sgj3HjjjYSHhzNmzBj2799fbrmshvI+A61Dhw7MnTuXqVOncs011wDQtGlTBgwYULbPbbfdxqRJk+jXrx9Op7PCC58Vufbaa8nPz+eZZ54pu6/Ia6+9Vnafi7CwMKZPn84jjzzC5ZdfTps2bbjnnnsYP358uXa2b99+2C/gn3zySbn7EgLcfvvtR/zlORDOPPNMbr75Zp577jlKSkoYMmQIl19+OX/88Ue1nRP89+Z77LHHePXVV5k6dSp9+/Zl1KhRzJ8//4jHhISEMHv2bJ588kmuvPJKQkJCuPDCC3nggQfK7XfGGWfQvHlzhg8fjtPpZNCgQYd9///u5ptvJikpiX/961/s2bOHiIgIOnXqxI033gj47zX91ltv8dxzzzFy5EjMZjMdO3bkpJNOAvxL6a1atYohQ4ZQWFjI22+/fdjsQbPZzIsvvsjkyZMZNGgQLVu25KGHHiq3bJjVauWhhx5i2rRpvPzyy5x88skVLjFksViYPn06jz76KFdddRUhISEMHjyY2267DfBf6Js4cSLjx4+nc+fOAIwfP54ffviBRx55hKlTp5a1tXDhQi644IIjXjA/1s+boUOHEhwczOzZs5kyZQqhoaG0a9eO6667rmyf++67D6vVyn333UdxcTHdu3fnrbfeIioqCvDPSHr33Xd5/vnnGT16NE6nkyZNmnDmmWeW/T9x4MAB7rrrLnJycoiNjeWkk05iwYIF5e7FN2PGDKZOncqDDz5IdnY28fHxnHzyyWUX76644goKCgqYP38+zz77LBEREZx++unce++9R+wbFbngggt49NFHmTFjBpMnT6Zly5Zlf1+H/q5fffVVJk6cyJVXXklycjIPPfTQYfeRfP7553n88ce58cYbMZvNXHjhhTz00ENlr8fExPDVV1+VfYJd6r6GPK5MTU3VuLKaaVypcaXGlbVjXHnHHXcQGxurcWUFDv28GTdunMaVpQI5rnziiSe47rrrNK4UOYr8g7tIAoqOsuqyOdg/C9zirh1Lc9cUk6+2LEZeQzweD2vXrqVHjx5aHlRqhPpcYBQXF7N9+3ZatmxZbff0qI98Pl/ZUid17VNfr7zyCsuWLePDDz80OspxKyws5KyzzuL+++8/4kW0+vo+63Lfk8B56KGH2LZtG++88061nueuu+7CbDbz/PPPq++Joaqr/x1tHKSx5pEd7XujsWXDUlPjrer8P6ghjyvl2BrC+MeIcaVUTkPof/VNfRkHaRwsC5+4jk7zV7GziYmLvv69wn0+eO5G2s/+jj1JJi74ruJ9jocR/a8q5zR8xp+IiEh98fvvv7Nt2za6detGXl4e00rv+3P++ecbnCywGsr7lKqbPXs2ffr0ISQkhO+//54PPvigUrMMjpfb7WbHjh2sXbuWq666qtrOIyIiUtMaynirobxPqTqNK0VE5FjcjhwAnPYjrxRhDQn3PzawGX8q/ImIiATQG2+8wfbt27HZbHTu3Jn58+eXLTFSnxzpff7yyy+MHTsW8H/y838/8blmzRoj4koNWb9+PbNmzaKgoICmTZsyceLEav20/pYtWxg2bBinnXZa2RJyIiIi9YXGlX+NKyuicWX9pnGliIgciy/fvyy2K+TIZS5bqH+5Yqu7RiLVGlrqU6Saqc8FRn1ZhqCmackNqWnFxcWkpaXh8/koKioiJCSkXN9r3ry5gemkIdDPPTGSlvqsXbTUp9Q0/R8UWIfGlUeiceVf1PfESOp/dU99GQdpHCz/vvZ0uq3KZcNJ4fxj/s8V7vPtf14h6eHXyA2D039NDdi5tdSniIiINBjBwcE0b95cv/yJiIiIyAk5NK4UERERqYi5yAmAN+TIBeyQiCgAbA1sxt+RFz8VERERERERERERERERqWVsxS7/k/CwI+4TFp0AQJCrJhLVHir8iYiIiIiIiIiIiIiISJ1hK/YCYI6IPuI+4VH+wp/VC0UFjpqIVSuo8CciIiIiIiIiIiIiIiJ1hr3EX/izR8cdcZ+o+MZlz3Mzj3zv4PpGhT8RERERERERERERERGpM4KL/Y8hcY2PuE9kTFLZ87zsg9UdqdZQ4U9ERERERERERERERETqjNDSwl90kxZH3MditVJi9T/Pz1HhT0REaoH27duzbNkyAPbs2UP79u1JTU01OJWIiIiI1EUaW4qIiIhIfVCQl0uI0/88PqXdUfd1lRb+ivOyqjlV7WE1OoCIiFRO48aN+eGHH4iJiTE6ioiIiIjUcRpbioiIiEhddWDHb2XPGzfvcNR9XTagGIoc2dWcqvZQ4U9EGhxXiQezxYSzyE1QiBWvx4fNbjE61jFZLBYSEhKMjiEiIiIiperquBI0thQRERGRuit9zxaigEI7hIRFHnXfQzP+nAW51R+sltBSnyLSoLhdHlZ/sZM59/3AG/f+wJz7fmDNFztxuzzVds7PPvuMSy65hG7dunHaaacxatQoCgsLWb9+Pddffz2nnXYaJ510EiNGjOC33347YjsVLcf0xx9/MGbMGHr27Env3r259957ycr6a9r6yJEjmTJlCs899xynnnoqffr04ZVXXinXrsPhYNKkSfTu3ZuuXbsyaNAgvvnmGwoLC+nVqxefffZZuf2XLVtGjx49yM/PD9B3SERERKTuMWJcCcaPLSdPnsyUKVM0thQRERERwzjSdgFQZD/2vu6ywp+jGhPVLir8iUid5vP5cJV4KvXHWeTm18928suSHZQUugEoKXTz85Id/PrZTpxF7mO24fP5qpTv4MGD3H333QwZMoSlS5fy9ttvc8EFF+Dz+SgoKODyyy/nnXfeYcGCBTRv3pxx48ZV+qKHw+Hguuuuo1OnTrz//vvMmjWLzMxM7rjjjnL7ffLJJ4SEhLBgwQLuvfdepk2bxooVKwDwer2MHTuW1atX89xzz7F06VLuvvtuzGYzoaGhDBw4kEWLFpVrb+HChfTv35/w8PAqfS9EREREarOaHlfW1bHl4sWLCQ0N1dhSRERERAxTmLEfgKLgY+/rtpr8j0UF1RmpVtFSnyJSZ/l8PhY9t5oD2449TTs43Ma1T/Zmwzd7Knx9wzd76HVhc96e+CPF+a4jttO4dRSD7+mFyWSqVMb09HTcbjcXXHABycnJALRv3x6AM844o9y+TzzxBCeffDI///wz55577jHbnjdvHp06deKuu+4q2/bUU09x9tlns337dlq2bAlAmzZtuPXWWzGZTLRo0YJ58+axcuVK+vTpw48//sj69etZunRp2f5NmzYta2/o0KEMGzaMgwcPkpiYSGZmJt9//z1z5syp1PsXERERqQuMGFdC3Rxbtm/fnltvvRVAY0sRERERMYQzO8P/GHzsuW0eqwnw4SkurOZUtYcKfyJSp1XyGgmhkUEU5TnLPpH9v0oK3RTlOwmNDDrmBZqq6NChA2eccQaXXHIJffv2pW/fvvTv35+oqCgyMjKYOnUqq1atIjMzE6/XS1FREfv27atU25s2beK///0vPXv2POy1Xbt2lV1sadu2bbnXEhISyMzMBCA1NZVGjRqV7fu/unXrRps2bfjggw8YN24cH330EU2aNOGUU06pyrdBREREpNar7eNKqB1jy0OFxkM0thQRERGRmubJ9y/b6azE/bUPzfjzFRdVa6baRIU/EamzTCYTg+/phdvprdT+ZosJe6i1wos09lArYVF2rrz/5KO2YQ0yV/oT2QAWi4U5c+awevVqVqxYwdy5c3nxxRdZsGABjz76KDk5OUycOJEmTZoQFBTEVVddhctVuQtEhYWFnHvuudxzzz2HvZaQkPBXZmv5H/Umk6lsWang4GPPhx86dCjz589n3LhxLFq0iCuuuKJK3wMRERGR2s6IcSVobKmxpYiIiIgcD1O+f9lOd8ixS1ze0sKft6S4WjPVJrrHXy3gKsjD63LhyszwPxbkGR1JpM4wmUzY7JZK/fF6fHQ7N6XCdrqdm4LX4ztmG8dzUcJkMnHSSSdx22238cEHH2Cz2Vi2bBmrV69m5MiRnH322bRt25agoCCys7Mr3W7nzp3ZsmULycnJNG/evNyf0NDQSrXRvn17Dhw4wPbt24+4z6WXXsq+fft4++23+fPPPxk8eHClM4qIiIjUFTU9rtTYUmNLERERETk+5kL/7D1PiP2Y+3pspbMCnSXVGalWUeHPYN6SErJnz2FLn7782edMtvTpS/Ybb+ItqT2dUIVJqS9sdgu9LmrOKQNbYA/1fxrEHmrllIEt6HVRc2yVmBpeVevWrWP69Ols2LCBffv28cUXX5CVlUWrVq1o0aIFH330EVu3bmXdunXcc889lfqU9CHXXHMNubm53HXXXaxfv55du3axfPlyHnzwQTweT6XaOPXUUzn55JO57bbbWLFiBbt37+a7777j+++/L9snKiqKCy64gClTptCnTx8aNWpU5e+DiIiISH1ixLgSNLYUEREREQGwFJeuahEWcsx9vdbSMpgzsMvw12Za6tNAroI8smfPIfO118u2eR0OMqe9BkDMDaOwhUUYFc+fp7QwmT1vPl6HA3NkJDEjRxA/bhxm+7Gr6SK1jdVmoeeFzTnp4hY4i9wEhVjxenxYbdVzcSY8PJyff/6Zt956i/z8fJo0acIDDzzA2WefTUJCAg8//DCDBw+mcePG3HnnnUyZMqXSbSclJfHuu+/y/PPPM3r0aJxOJ02aNOHMM8/EbK785zpeeeUVnn32We666y6Kiopo3rw5d999d7l9rrzySj755BOGDBlS6XZFRERE6rOaHleCxpYiIiIiIgC2Yv8H00zhkcfc11s6Pjep8Cc1wRIUTPa8+RW+lj13HvFjx7J0/CXkZR0ATGDir8fS575DS8OYKL0bfQVfH1o9xmTCd6gNSu/FUPq1yVS+fZ/JxKDbXqHosy8rLkz6fESNHI43yEZI2LH/cVUXV0EelqBgPI5cLJFReJzFhhdLpfY79AnskIggACzV+JOwdevWzJ49u8LXOnXqxMKFC8ttu+iii8p9vXnz5rLnKSkp5b4GaNGiBa+++uoRzz937lwKCwvLbXvttdfKfR0dHc3TTz995DcBpKWlER0dzfnnn3/U/UREREQakpocV0LtGFv+L40tRURERKSm2Yv99+a2RsUec1+fzT9IN7kOvz93faXCn4E8jly8DkeFr3kdDtyZmXTYaaLkj/waTgaWmBjCUpqz70iFyXnziR8zhj/P70deQTaFIVAcbKLEbsIVYsEdbMUTGowvLBRLRBS26HhC4xoR2agZsSltSW7ZhfBK/KM8Gs1GFKl+RUVFpKenM3PmTIYNG0ZQUJDRkURERESkjtLYUkREREQCwV7iAyA4JuHYO9tsAJhdlVu+vj5Q4c9AlsgozJGRFRb/zJGRWOPi2NTKQl50JPjw/8H31yOlk/R8gM//9aHXTH/b59CxprJj/9rf9Lfd8PnKvk5q244WuccoTGZnY02IJzQ7m1AnkHsopBdwAUVANrC3/LHAbqAoCAqDoTgYSoLNOO0W3CFW/w05w0MxRURii4ojJC6JiKTmxKe0oUmrLkTGJNaJZVL/rkmTJkZHEDkus2bNYvr06Zx88smMGzfO6DgiIiIiUodpbCkiIiIigRBS7H+MSGx6zH19dv+Hzcxub3VGqlVU+DOQx1lMzMgRZcWqv4sZOQKPx8WAqYsNSObndbmOXphMTMD63GMc3Lae3P07KcrchzMnE68jFwoKMBcWYy1yEVTsIajYR3CJj9Bi/z9KMxDi9P/BAf5y4N8LhjnAvnLn9OEvIe5MjKHz518dfZnUceNY/s7zEBxCfNP2NG7Vmei4xgH73lTWoaVI44NsmLxeXMWFtaogKXIs48ePZ/z48UbHEBEREZF6QGNLERERETlRHreb0NLCX2xK62Pubwryrw6owp/UCFtYBPGln3LMnjuv1i1XeczCpLOEZu170qx9zyq1W1JUyP4dv3Nw9yZy9u2gKHM/zuwM3Hm5mPILMBcWYS1yYSt2lxUMQ4ohrBjMPgiLjsedmXn02YgZGSS/9z0lf2wBYD+ww/rXDMPiYBPO4ENLkgbhDQ3FHB6BNSoGe2wi4YnNiG/ahiYtuxAV1xiLter/VLQUqYiIiIiIiIiIiIhI4GSm7cRaWsNr1LzDMfc324MBsKjwJzXFbLcTc8Mo4m+8EU+eA0tEJB5nca0oDFVXYdIeEkqLjifTouPJVTrO5SzhwO4tONJ2Yk1MPPpsxNhYDnqyMYVCWBFYfGB3gz0fyAf//EF36Z9i/NMODxzWVhqw23J4wdAVbMETYscXFoIpPBxLVCzBMQmEJzYlNqU1HbqeSc6bb9eZpUhFRERERERERERERGq7Azt+xwa4LBCTcOylPs32EAAsbt8x9qw/VPirBQ4VgMyxcf7H0ptN1ga1qTBpC7LTtHUXaN0FV0He0Wcjet2cvmQ54J/6e3DvnxzYkUr23q0UpO+hJDsdtyMHX34e5oJirMVObEVugoq92Iv/mmFo9UKQB4IKgAIoXzAswV8wTAO2lp3fEhOD9avzj74U6T//Sb4jh/DI6EB+i0RERERERERERERE6q2sfTtIwj9ZpzIr9VlCwwGwuqo5WC2iwp8cU20sTFZlNqLFaqVx8w40rsS037/zuN1kpu3kwI7fydy9hYL0vRRnp+PJzcKXn4+poAhLUUnpkqRegot9BBdDXFIC7qysoy9Fmp7OgZtuYt/BLeTGWCiKDcGblEh4yw40O+k8Op7cD1uQ8bM+RURERERERERERERqi/yDu0gCiip5+dwWEgaA1aMZfyK1XnXPRrRYrSQmtyYx+dg3CP07j9uNyes9+lKkMTF40zNIyIGEHA9sP7QG6TZgKb/ZIDMaHNE2SuLDMTdJJrptdzr0uYRmbbsH4N2JiIiIiIiIiIiIiNQtJVkH/Y/BpkrtbwuL8j9qxp9I3VAbZyNarNZjL0Xq8+B9dTLbVn1J/tbf4MABQrIKiczxEJcDdhc0SYcm6S7Ykg1kAxspYD6rQiArxkR+TBCuhGiCmrYgqUtvuvS9hOi4xlXK6irIwxIUjMeRiyUyCo+zWPceFBEREREREREREZFaye3IAcAZbK7U/vawSACs7upKVPuo8CdSDSqzFGmHk86jw0nnHXZsUYGDDSs+Yt+6HyjeuRVLeiZhWSVEZ3uJyYeIIogo8sG+Evz3F0wD/steXmRjJOTEmCmMCcHbKJ7QFu1p2vMcOp3aH3tIaLnzeEtKyJ49h+x584+6VKrUTosWLeKpp57il19+MTqKiIiIiNRxGluKiIiISF3hy88DwBVcufJWSFQsAEEq/InIiTrepUhDwiI59cIRcOGIw147uHcrv//wMRmpP+Pes4ugTAfhWS7isn2EOiHOAXEOL+wsAAqAncAXbLJMIDMGHNFWiuPCGfTI2xR+8imZr71e1rbX4SiboRhzwyjN/AugkSNH0qFDByZOnGh0FBERERGp4zS2FBEREZGGzFRQCIAnpHKr/4VE+lcLtKnwJyKBYAuLwOPxkFHiJDHKfMLFtMTk1iRedcdh2z1uN9t++y9bV32G48+N+PbvIzizkMgcN3E5EOSBxhnQOMONJdNEaKNk9s6bX+E5sufOI/6f/zyhnLWdq6QYs8VCSUEB9rAwvB4PNnuw0bFqnMvlwlYLlscVERERqas0rvyLxpYiIiIiUhPMRU4AvCGVG3eHRydQAtg8UFJUeNjKePWRCn8iNWDfvn0kJiZWW/sWq5W23fvQtnufw14rKSpk449L2LP2e4p2/kFydHNa5OTgdTgqbMvrcOBOTyf1gdvYnv0HxY2isbfpSMs+A+l6xkAs1rr9Y8PtdLLqw4Ws+eyjsgs0vS6+lFMvG4o1KCjg53vggQdYtWoVq1at4u233wbgyy+/ZPr06fz0009kZGTQuHFjrrnmGq677joAfv75Z0aNGsW3335LQkJCWVtPPvkkv/32G++8806F53rnnXd44403OHDgAMnJydx0001cdtllZa+3b9+eRx55hO+//56ffvqJ0aNHM378+IC/ZxEREZGGoKbHlWD82PLyyy8ve11jSxERERExgq3Y5X8SHlap/aPjGpNW+jw3az+Jya2rJ1gtUrev4IvIMdlDQjnp/KGcdP7Qsm1elwtzZGSFxT9zZCTWmBhC/txP+2w3/JkBPyyHN5ezxv4A6fEm8hJD8TVNIb57b3pdOJyYhOSafEvl+Hw+3CUlldrX6/Pyy8eL+Wnhu2XbSgoKWPm+/+uTBg7GbD76TWGtdjsmk6nS+SZOnMiOHTto27Ytt912GwBRUVE0atSIl156iejoaNasWcOkSZNISEhgwIABnHLKKaSkpPDhhx8yZswYwP8J6o8//ph77723wvN8+eWXPPXUUzz44IP07t2bb7/9lgkTJpCUlES3bt3K9nv11Ve5++67mThxIhaLpdLvQ0RERKS+q+lxJdS9sWWjRo04/fTTy/bT2FJEREREapqt2AuAOSK6UvtHxCSWFf7ystJU+BOR+snjLCZm5Iiye/r9XczIEbhKith89ekUbF6HbV8G0RlOEjMhrATC9vpgbwGs2QwfbWbf5DmsjYGcBBvFjWMJbduZdmcNpl3Pc6p9dqDP5+O9Sfex74/UY+4bEhHJ2FffYM1nH1X4+upPP+KUS4Yw89YbKMqreDYkQJP2nRj22LOVvkATERGBzWYjODi43CesD12oAWjatClr167ls88+Y8CAAQBceeWVLFq0qOzizDfffENJSQkXX3xxheeZPXs2gwcPZvjw4QC0bNmStWvXMmfOHF588cWy/QYNGsSQIUMqlV1ERESkoTBiXAl1b2z5xhtvlCv8aWwpIiIiIjXNXuIv/NmjE46xp58tyI7TCkFuyMtJr85otYYKfyINkC0sgvhx4wD/Pf28DgfmyEhiRo4gftw4zHY7l9z2f+WOyc/N4pcv5nNwzXf4du4iPL2AhHQvEUXQKAsaZblgcxp8mwYzv+bXEEiPN5OfGIqpWTMSe5zJyRddS3hUbGDfTCUvkoRFx1DoyKGkoKDC10sKCih05BIWHXPMCzSBMH/+fBYuXMi+ffsoKSnB5XLRoUOHstevuOIKXnrpJdauXUuPHj1YtGgRF198MaGhFa9BvW3bNq666qpy23r16lW2BNQhXbp0CfybEREREakP6ui4EjS2FBEREZGGI7jY/xgSl1TpY1ylhb9CR1Y1papdVPgTaaDMdjsxN4wi/sYb8eQ5sERE4nEWY7bbK9w/PCqWc4aOh6F/3bfD43bzx5pv+eP7xRRu+Y3g/VlEp7tIzIaIIojY7YXd+fDr77D4d7Y/9i/SYyEnIQhn43jC2nelwzlDK7w3IYCrIA9LUDAeRy7eIDs+r7fc6yaTiWGPPVvpJZnMVgv2sLAKL9LYw8IIj43lmskvHLWNqi7HVJElS5bw7LPPcv/999OzZ0/CwsKYPXs269atK9snLi6Oc889l0WLFpGSksLy5csPu9ByPI50cUdERESkITNiXAkaW4qIiIiIVFVoaeEvukmLSh/jtEIYUKzCn4jUd7awCADMsXH+R5utSsdbrFY6ntKPjqf0K7c9O30vq7+YT8a6HzHt3kPEwUISM3yElkDjDGic4YTUffD1Ptyvf85PYZARb6YgKRxT85Z0v/ha2vY6j+zZc8ieN99/L8K2bfE+NLHC4p8tOLhSeV0lxfS6+NKye6/8Xa+LL8Xr8VS6raqw2Wx4/5Z79erV9OzZs2zpJIBdu3YddtyVV17J3XffTVJSEk2bNuWkk0464jlatWrF6tWrGTx4cLnztG5d/9esFhEREQmEujCuBGPHlm3atAnQuxARERERqbqCvFxCnP7niU07HH3nv3GXVsJcBTWzIofRVPgTkYCLSUjm/OH3wV/XHvC43WxYuYTtK5ZQ8mcqwQdyiM1wE5cDUQUQVeCFnQ5YtY4m59jJ+NcMMl9/vex4b34+3vx83FlZeO12zBZLlXPZ7MGcetlQwH/vlZKCAuxhYfS6+FJOvWwo1qCgE33rFUpOTmbdunXs2bOH0NBQmjdvzgcffMDy5ctJSUnhww8/ZMOGDaSkpJQ77swzzyQ8PJzXX3+93H1bKjJmzBjuuOMOOnbsSO/evfnmm2/48ssveeONN6rlPYmIiIg0ZEaNK8HYseWcOXOq7X2JiIiIiBzLgR2/lT1v3KJTpY9zlc53cRbkBjpSraTCn4jUCIvVSo8zL6PHmZeV235w71bWfD6f7I3/xbJ3P4nuCNqfcQb7HpxQYTve3FxMTZocdw5rUBCnXDqE0wb/g5LCQuyhoXg9nmq9OHPDDTfwwAMPMHDgQIqLi/n0009JTU3lzjvvxGQyMXDgQK655hq+//77cseZzWYGDx7Mv/71Ly6//PKjnqNfv35MmDCBN954g6eeeork5GSeeuopTjvtNAoLC6vtvYmIiIg0VEaMK8H4saWIiIiIiFHS92whCii0gz2k8svOuy0mwIe7qGFcJ1XhT0QMlZjcmv43TCq3zZWZ4V/eswI+jwefx4OzuAB7aMRxndNm9y+7FBoZBYDFWrUlTquqZcuW/Pvf/y637emnn+bpp58ut+3uu+8+7Ni0tDTOOussEhMTy22/4ooruOKKK8ptu+aaa7jmmmvKbfP5fGXPN2/efFz5RURERKRiNT2uBGPHln+nsaWIiIiI1DRH2i5/4a+Kq+p7rIcKf/nVEavWUeFPRGodS2QU5sjICot/JosFk9mMd+cesq1ezNExRCUc/wzA2iovL4/NmzfzySef8PrfljwVEREREakqjS1FREREpD4ozNgPQLG9asd5bCYAvCVFgY5UK6nwJyK1jsdZTMzIEWROe+2w18xRUXiLi8DjIdgDpGWRm5WFJyKM6KRmx3Xvv9ro5ptvZv369QwbNow+ffoYHUdERERE6jCNLUVERESkPnBmZ/gfg81VOs5j8Rf+fCUlAc9UG6nwJyK1ji0sgvhx4wDInjsPr8OBOTwcX3g41thYLKGhuFOScGWmE1zkJcgFZBVQkJuKKyyIiEbNsAVVcb53LTN37lyjI4iIiIhIPaGxpYiIiIjUB558/wpxzuCqTf7w2EoLhSr8iYgYx2y3E3PDKOJvvBFPngOXLYgdu3ZhMvt/SEdEJ0B0AsWFeRSm7cVe5MbqAavDiTPvT/JCLYQkNCEkPMrgdyIiIiIiIiIiIiIiJ8qUXwCAO7hqpS2vtbRQ6HQGOlKtpMKfiNRatrAIAMyxcXiKi8uKfn8XHBpBcMsOuF1OHAd2YssvweqBkAIPvoLdZAfvwRobT0RsUk3HFxEREREREREREZEAMRf679HnCanaTf58QaWFP5c70JFqpaothCoiYjCv11vhdqstiNimbQlr34niuHCcNjABwcU+rPvSyfljIzkHduLz+Wo2sIiIiMgJqmvjl59//pkbb7yRvn370r59e5YtW1budZ/Px0svvUTfvn3p1q0bo0aNYseOHeX2ycnJ4e6776ZXr16cfPLJTJgwgYKCgoBnPdLYUkRERKS+0vhH6jJLscv/JCy0Ssd5rf45cOYGUvjTjD8RqROCgoIwm83s27ePhIQEgoKCMJlMFe4bEtMIYiA/+yAeRy72Eh8UA8W5ZGZtwB1qJzy+MVZrUM2+CQP4fD5KSkowm81H/H6JVAf1PTGK+p4YqTr6n8/nIz09HZPJhM1mC0ib1a2wsJD27dszZMgQbr311sNenzlzJnPnzuWZZ54hJSWFl156idGjR7N06VLsdv8nd++55x7S09OZM2cOLpeLCRMmMGnSJF544YWAZKzK2FKksvR/kBhFfU+MpP5Xd/h8PpxOJ+np6ZjNZoKC6v91Mal/bMUeAEzhEVU7MMj/u5QKfyIitYjZbKZly5bs37+fffv2VeFIK05PIa78XGxOH4eGoAe27cEdZMYeGYPVVrWp4XWJz+fD5XJhs9k0AJcapb4nRlHfEyNVV/8zmUykpKRgsVTtBvZGOfvsszn77LMrfM3n8/H2229z00030a9fPwCmTJlC7969WbZsGQMHDmTr1q0sX76c999/n65duwLw0EMPMW7cOO677z6Skk58CffjH1uKHJn+DxKjqO+JkdT/6p7Q0FCaNWuGuYJb6ojUdvZi/4xVa1Rs1Q48VPhzN4wZryr8iUidERQURLNmzXC73Xg8niofn7bnT36ZPZkmGw4SWQA2wGmFrW2CaXzFtXQ/8/KAZzaax+Nh06ZNtGnTps5cLJT6QX1PjKK+J0aqrv5ns9nqTX/es2cP6enp9O7du2xbREQE3bt3Z82aNQwcOJA1a9YQGRlZVvQD6N27N2azmfXr13PBBRcEJMuJji1F/pf+DxKjqO+JkdT/6haLxYLValWRVuose4n/NgjBsVX8MKA9GACzS4U/EZFa59AyV8ez1FXzNl1o/vR7FOTlsmTKP4n5bh0p+6H9bvB+8xift5qM7dJL6T/mcSzW+vHj8dBFrODgYA3ApUap74lR1PfESOp/x5aeng5AXFxcue1xcXFkZGQAkJGRQWxs+U/wWq1WoqKiyo6vimMV9cxmsz7xLgFxqB/Vp2K91A3qe2Ik9b+6p77c4+/QGE8f4GpYQor9j2HxTar0d28K8q/4ZnF7A9JnjOh/VTlX/biyLSJSBWERUfzjiffwuN18NmMino+X0Ha7h3bbPDB1MV+/+wE55/Rg0H3TCQmLNDquiIiIiJyADRs2GB1BGhj1OTGK+p4YSf1PjKK+13B43W5CSwt/+R4ba9eurfSxJV7/TEGL21el446ltvY/Ff5EpMGyWK0MvPlZuPlZflwyh71vvkK71CJS0nyk/HsNqz89jV2nJnPOva/SuHkHo+OKiIiI1HkJCQkAZGZmkpiYWLY9MzOTDh384634+HiysrLKHed2u8nNzS07viq6du2qGQhSIzweDxs2bFCfkxqnvidGUv8To6jvNTwH924lp3TC6il9+xHfuGWlj931ZTwAVjf06NHjhLMY0f8OnbMyVPgTEQF6D7weBl7P1g0/8vPU+2m5JoNYB8Qu28u+7wfzfbcIOt78CN16Dyw7xlWQhyUoGI8jF0tkFB5nMbawCAPfhYiIiEjtlpKSQkJCAitXrqRjx44A5Ofns27dOq6++moAevbsicPhYOPGjXTp0gWAn376Ca/XS7du3ap8TovFootBUqPU58Qo6ntiJPU/MYr6XsORvnszNsBlgfhGLar0924N9V+ztbp9Ae0vtbX/6UYGIiJ/07prb4bNXk67z5exflBb0mIh1AndfsnDPPoeFl3WjY3fvI+3pITs2XPY0qcvf/Y5ky19+pL9xpt4S0qMfgsiIiIihiooKCA1NZXU1FQA9uzZQ2pqKvv27cNkMnHttdfy+uuv89VXX7F582buu+8+EhMT6devHwCtW7fmzDPP5OGHH2b9+vX8+uuvPPHEEwwcOJCkpCQj35qIiIiIiBgka89WAAqD/Su5VUVQ6e2crO6Ax6qVNONPRKQCMQnJXPX8R7icJSx56XaCvlhOy91eOm52keKNIWP6v8h8/fWy/b0OB5nTXvMfe8MozfwTERGRBmvjxo1ce+21ZV8//fTTAAwePJhnnnmGsWPHUlRUxKRJk3A4HJx00knMmjULu91edszzzz/PE088wXXXXYfZbObCCy/koYceqvH3IiIiIiIitUN+xl6SgCL7MXc9zKHCn02FPxERsQXZufze6XAvfP3uC7iWfU77M85g34MTKtw/e+484v/5T7Zt+JGkFp0Ji4iq4cSHa9KkidERREREpAE57bTT2Lx58xFfN5lM3H777dx+++1H3Cc6OpoXXnihOuKJiIiIiEgdVJJ10P8YbKrysSFRcQDYXAGNVGup8CciUknnXX03XH03rox0vA5Hhft4HQ7c6ekw8Rl2/bGFvBAoCIWiUDMloRZcYXa8EWGYo2MIik8iokkrElp2pkXHU4iMSQxo3kP3IIwPsmHyenEVF2omooiIiIiIiIiIiNQ57txsAJzBVb+DXWhkLKAZfyIicgSWqGjMkZEVFv/MkZFYY2NxZmQAEFHk/0OmF/ACLiAfSAM2Ad8BsBf4ww75YVAUYqI41IorLAhvRCimqGiC4hsR1qiZv0jY6VRiEpKPmvHQPQiz583H63BgjowkZuQI4seNw2w/jvnwIiIiIiIiIiIiIgbxFeQD4AquelkrPCaJEiDIAyVFhdhDQgOcrnZR4U9EpIo8zmJiRo4ou6ff38WMHIHH66blV8vY9ccaDmxdS86erRQf3Is7JxOzIw9rfglBhS5CCr2EFUJ4IVi9EFbi/wM+/AVCF1AApANbys5xANgeBPmhUBhqojjUgivMhic8DKIiueT2Vyj48BMyX9M9CEVERERERERERKTuMxUUAuAJsVX52MiYBNJLn+flpGEPaRnAZLWPCn8iIlVkC4sgftw4wH9PvyPNqGvbvQ9tu/c5ZnsuZwl7t25g35Y1ZO3+g6KDe/FkZYAjD2t+EUGFbkIKvYQW+ogoBJsHQpz+P+T4AHfpnyIsMR5CJjdiz7z5FZ4re+484m+8MTDfCBEREREREREREZEaYCkqAcAbElzlY6PimpQV/nIz9hPfWIU/ERH5H2a7nZgbRhF/44148hxYIiLxOIuPaxlNW5CdFh1PpkXHk4+5r8ftZt/OTez941eydm6m8OAe3FkHITcPa34hycmtaJGTc9R7EHryHJhj46qcU0RERERERERERMQI1uLSG/SFh1X5WFuQHafFv9RngSMzwMlqHxX+RESO06HlMg8V0cy2qk8zryqL1UrT1l1o2rrLEffxulxHvQehJSKyOiOKiIiIiIiIiIiIBJSt2AuAOSL6uI53Wf2Fv0JHVgBT1U5mowOIiEhgHboHYUViRg7H4yyu4UQiIiIiIiIiIiIix89eWvizRycc1/Gu0jkbRbma8SciInXMke5BGDt8OLHXjcJSOlNRREREREREREREpC4I9t/ij5C4pOM63lVaDXMWVHyLpPpEhT8RkXrosHsQhoeT//1ydgy7ml+7BvGPZxcbHVFERERERERERESkUkJLFzGLbtLiuI53N6DCn5b6FBGpp2xhEfjMZjJKnPgsVpYseALntm20X7KJb//zitHxRERERERERERERI6pIC+XEKf/eWLTDsfVhttq8j8WFQQqVq2lwp+ISD23b98+AK589Qv+bGEmyA2m/3uN/Ts3GZxMRERERERERERE5OgO7Pit7HnjFp2Oq41DhT9PUX5AMtVmKvyJiDQQtiA7XV98i6wISMyG/946FI/bbXQsERERERERERERkSNK37MFgEI72ENCj6sNT+lSn57iokDFqrVU+BMRaUBadDyZgluuwW2G9lvcLLh7kNGRRERERERERERERI4od/9OAAqDj78Nj9VfDvOVFAciUq2mwp+ISANz4aiH+e38pgB0+XInX7w52eBEIiIiIiIiIiIiIhUryjoAQLH9+Nvw2krLYc6SACSq3VT4ExFpgIa+uJTNbaxYvRD22nx2pP5idCQRERERERERERGRwzizM/yPwcdf0vKWzvijxBmISLWaCn8iIg2QxWrllFf+TXo0xDpgw12jcDWAT7uIiIiIiIiIiIhI3eLNywXAGWw5/jZspTf5c7kDEalWU+FPRKSBSm7ZCe/dN+O0QJvtHhaO7290JBEREREREREREZHyCgoBcAdbj7uJQ4U/s1uFPxERqcfOGTqeTQPaAdD1+zSWvP6gwYlERERERERERERE/mIuLALAE3ICN/krLfyZnJ5ARKrVVPgTEWngrnx6Ib93smP2QdysD9i85lujI4mIiIiIiIiIiIgAYC12+Z+EhR5/I0FBAJjdKvyJiEg9Z7FaOXvaYg7EQVQBbL3vFkqKCo2OJSIiIiIiIiIiIoK12F+sM4VHHH8jdv9sQYvLG4hItZoKfyIiQnzjltgn3EdRELTc7WXxzf2MjiQiIiIiIiIiIiKCvdhfrLNGxR53G6ZDhT+PLyCZajMV/kREBIDeA69n6+CeAHRfmc2HL9xicCIRERERERERERFp6Owl/mJdcGzScbdhCfYvE2pxqfAnIiINyNDH3mFjd/9/gk3mfs36FR8bnEhEREREREREREQaspBi/2NEQvJxt2EJDvM/ulX4ExGRBqb/9E/Zk2QivBj2P3Q/+blZRkcSERERERERERGRBsjjdhNaWviLTWl93O1YQ/yTHWwq/ImISEMTGZNI/KNPUGCHZvt9LL25v9GRREREREREREREpAFK37cdq/8WfzRp2eW42wkKiwLA6g5EqtpNhT8RETlMz3OHsPvqMwHo+ms+C5+4zuBEIiIiIiIiIiIi0tAc3L0JAJcFouIaH3c7trBIQIU/ERFpwAY/MIP1p/j/Q2zxn1X8/MW7BicSERERERERERGRhiRrz1YACoPBYrUedzvBkbEABKnwJyIiDdmg175gZ7KJUCc4nnic7PS9RkcSERERERERERGRBiI/fQ8ARcEn1k5oaeHPpsKfiIg0ZGERUTR9aiqOUGiSDl/dNMjoSCIiIiIiIiIiItJAlGSn+x/tphNqJyI6AfDP+HM5S044V22mwp+IiBxV59Mu5OCoi/ECnTcWs2DClUZHEhERERERERERkQbAnZsNgDP4xMpZEbFJZc/zsg+eUFu1nQp/IiJyTJfc9n9s6BMPQLuPf+OHD2cYnEhERERERERERETqO29+HgCu4OO/vx9AVGzjsuc5mftPqK3azvDC3/z58znvvPPo2rUrQ4cOZf369Ufd3+Fw8Nhjj9G3b1+6dOlC//79+e6772oorYhIwzX41c/Z2syM3QXuZ1/k4N6tRkcSERERERERERGResxcWASAJ8R2Qu3YQ0JxWfzP83PSTzRWrWZo4W/p0qU8/fTT3HLLLSxevJgOHTowevRoMjMzK9zf6XRy/fXXs3fvXl566SU+++wznnjiCZKSkircX0REAsceEkrHF2aSHQ5JWfDDzYPxuBvA3XBFRERERERERETEEJYi//34vCHBJ9yWq3TSYGFuxgm3VZsZWvibM2cO//jHPxgyZAht2rThscceIzg4mIULF1a4/8KFC8nNzWXatGmcdNJJpKSkcOqpp9KhQ4caTi4i0jC17tqbvBv/gccEHTe7+M99lxkdSUREREREREREROopa1HpxIPw8BNuy1la+Ct2ZJ9wW7XZiS2KegKcTie//fYb//znP8u2mc1mevfuzZo1ayo85uuvv6ZHjx48/vjjfPXVV8TGxjJo0CDGjh2LxWKp0vk9Hs8J5ReprEN9TX1OjFAd/a/f9ZNYsOYHeny1j06fb2PZ/CmcO+zugLUv9YN+9olR1PfESEb0P/V1ERERERGpz2wlXgDMEVEn3Ja7tCJWkp9zwm3VZoYV/rKzs/F4PMTFxZXbHhcXx7Zt2yo8Zvfu3fz0009ccsklzJgxg127dvHYY4/hdru59dZbq3T+DRs2HHd2keOhPidGCnT/a3PtU2zePpr22zzYX57D11EtiG3cOqDnkPpBP/vEKOp7YiT1PxERERERkcCwF/sLf/bohBNuy1V6m0BXYd4Jt1WbGVb4Ox4+n4+4uDieeOIJLBYLXbp0IS0tjdmzZ1e58Ne1a9cqzxIUOR4ej4cNGzaoz4khqrP/Jb00n50jhhGfC3+8/jhnL/oVi7VO/bci1Ug/+8Qo6ntiJCP636FzioiIiIiI1Ech/lv8EZbQ+ITbcltMgA9XUcEJt1WbGXaFNiYmBovFQmZmZrntmZmZxMfHV3hMQkICVqu13C/RrVq1Ij09HafTSVBQUKXPb7FYdDFIapT6nBipOvpfs7bd+fP20bienE27rR4W3TmAYa99FdBzSN2nn31iFPU9MZL6n4iIiIiISGCEFPsfIxs1O+G23KUz/jyF+SfcVm1mNurEQUFBdO7cmZUrV5Zt83q9rFy5kp49e1Z4TK9evdi1axder7ds244dO0hISKhS0U9ERALjvGvu4ff+rQDo8s0+Ppv5sMGJREREREREREREpD4oyMslxOl/nti0wwm357GaAPCWFJ1wW7VZlQt/L7/8Mnv37g3Iya+//noWLFjA4sWL2bp1K48++ihFRUVcccUVANx333288MILZftfffXV5OTk8OSTT7J9+3a+/fZb/vWvfzF8+PCA5BERkaobOuVDUjsEYfFB1L/eZ8u6FUZHEhERERERERERkTpu37aNZc8bt+h0wu15rP6SmLek+ITbqs2qvNTnV199xfTp0znllFO48sor6d+//3HPthswYABZWVm8/PLLpKen07FjR2bNmlW21Of+/fsxm/+qTTZu3JjZs2fz9NNPc+mll5KUlMS1117L2LFjj+v8IiJy4ixWK32nLeL3oYNIyoI/7h1Hsw9/xh4SanQ0ERERERERERERqaMy9/1JFFBoJyDXGr2lhT+fs+SE26rNqlz4+/DDD/n9999ZtGgRTz75JI8//jgDBgxgyJAhdOvWrcoBRowYwYgRIyp8be7cuYdt69mzJwsWLKjyeUREpPokJrfmj/vvpOShF2m1y8viW/szbPZyo2OJiIiIiIiIiIhIHZW7f6e/8BccmPa8Nn/hz1TiDEyDtdRx3eOvU6dOPPTQQyxfvpwnn3yStLQ0rrnmGi655BLeeust8vLyAp1TRERqub6XjeOPS7sA0HVFBh+/dIexgURERERERERERKTOKsrcB0BxsCkg7XmtFv8Tlysg7dVWx1X4O8Tn8+F2u3G5XPh8PqKiopg/fz5nn302S5cuDVRGERGpI/7x5H/4rWswZiDxrc/57b9fGB1JRERERERERERE6iBnTpb/0R6Ywp/P5l8E0+xyB6S92qrKS30CbNy4kUWLFrFkyRJsNhuXX345kyZNonnz5oB/ic7JkyczYMCAgIYVEZHar9/0pfxy+Xkkp8POB++gxYcrCYuIMjqWiIiIiIiIiIiI1CHevFwAnMGWgLTnC7IBYHJ5AtJebVXlGX+XXHIJV111FXv27OHJJ5/ku+++45577ikr+gEMHDiQrKysgAYVEZG6ITquMdGPPkJhEDTf5+OTmy80OpKIiIiIiIiIiIjUNQUFALhDjmsO2+FKC39mtzcw7dVSVS78XXTRRXz99dfMmDGDfv36YbEcXmmNjY1l06ZNAQkoIiJ1z8nnD2PHVacB0O1nB4ufGWdwIhEREREREREREalLzIXFAHiC7YFpMMjfjkUz/sq75ZZbSEpKqo4sIiJSjwyZ+CYbTgoHoOm7y1nzzUKDE4mIiIiIiIiIiEhdYS12+Z+EhQakPbM92P/o9gWkvdqqyoW/8ePHM2PGjMO2z5w5k9tuuy0goUREpH4Y8Nrn7GpsIqwEMh59mJzM/UZHEhERERERERERkTrAVuyfmWeOiAxIe6bgEACsKvyV9/PPP3P22Wcftv2ss87il19+CUgoERGpH8KjYmn85HPkhUBKmo8vbx5odCQRERERERERERGpA4KK/ffis0TGBKQ9S7B/5qBFhb/yCgsLsdlsh223Wq3k5+cHJJSIiNQf3XoPZP+IfniBLuuKWPDwMKMjiYiIiIiIiIiISC1nL/EX6IJjA3P7OVuo/7ZEmvH3P9q1a8fSpUsP27506VLatGkTkFAiIlK/XHb3K2w4IxaAth+s48clcwxOJCIiIiIiIiIiIrVZSLH/MTIpJSDt2UIjALC6A9JcrWWt6gE333wz48ePZ/fu3Zx++ukArFy5kiVLlvDSSy8FPKCIiNQPg1/7kq8uPYWWu7143/sI55mDsYaE4XHkYomMwuMsxhYWYXRMERERERERERERMZjH7Sa0tPAX06RVQNoMCosCwOYKSHO1VpULf+eddx7Tpk1j+vTpfP7559jtdtq3b8+cOXM49dRTqyOjiIjUA/aQUFpPmYbzqf+j+0uzyZrzNtnz5+N1ODBHRhIzcgTx48ZhttuNjioiIiIiIiIiIiIGSt+3Hav/Fn80adklIG2GlN4r0KYZf4c755xzOOeccwIcRURE6rv2Pc+h8KXWZM2dR+brr5dt9zocZE57DYCYG0Zp5p+IiEg95/F4eOWVV/joo4/IyMggMTGRwYMHc/PNN2MymQDw+Xy8/PLL/Oc//8HhcNCrVy8effRRWrRoYWx4ERERERGpdgd2/o4dcFkgKq5xQNoMjvDfiiionhf+qnyPPxERkRMRnNCI7PnzK3wte+48LEHBNZxIRERE/pfH4yE1NZXc3NxqaX/mzJm8++67TJo0iaVLl3LPPfcwa9Ys5s6dW26fuXPn8uijj7JgwQJCQkIYPXo0JSUl1ZJJRERERERqj5x92wEoDAaL9bjmsB0mPDoR8Bf+PO76W/2r8nfL4/Hw5ptv8umnn7J//35crvKLoa5atSpg4UREpP7xOHLxOhwVvuZ1OPDkOTDHxtVwKhERkYbtySefpF27dgwdOhSPx8OIESNYs2YNISEhTJ8+ndNOOy2g51uzZg3nn39+2UoyKSkpLFmyhPXr1wP+2X5vv/02N910E/369QNgypQp9O7dm2XLljFw4MCA5hERERERkdolP30PSUBRAOcIRMQkkl363JGdRkxCcuAar0WqPOPv1VdfZc6cOQwYMIC8vDxGjRrFBRdcgMlk4tZbb62OjCIiUo9YIqMwR0ZW+Jo5MhJLRMWviYiISPX5/PPP6dChAwDffPMNe/bs4dNPP+W6667jxRdfDPj5evbsyU8//cT27f5P8W7atIlff/2Vs846C4A9e/aQnp5O7969y46JiIige/furFmzJuB5RERERESkdinJOuh/tJsC1mZUXFLZ89yM/QFrt7ap8oy/jz/+mMmTJ3POOefwyiuvMGjQIJo1a0b79u1Zt25ddWQUEZF6xOMsJmbkiLJ7+v1dzIjheEqKMdtsBiQTERFpuLKzs0lISADgu+++46KLLqJly5YMGTKEt99+O+DnGzduHPn5+Vx88cVYLBY8Hg933nknl156KQDp6ekAxMWVXwUgLi6OjIyMKp3L4/EEJrTIMRzqa+pzUtPU98RI6n9iFPW9+s/lyAHAGWwO2N9zUHAYbjNYveDISjvudo3of1U5V5ULfxkZGbRr1w6AsLAw8vLyADj33HN56aWXqtqciIg0MLawCOLHjQP89/TzOhyYIyOJHT6cmBEj+GnSjXR7+EUiYxINTioiItJwxMfH8+eff5KQkMDy5ct59NFHASguLsZisQT8fJ9++ikff/wxL7zwAm3atCE1NZWnn36axMREBg8eHNBzbdiwIaDtiRyL+pwYRX1PjKT+J0ZR36u/XDn+RTmddgtr164NXMM2sJbAn5s24ApudEJN1db+V+XCX1JSEunp6TRp0oSmTZuyYsUKOnfuzIYNGwgKCqqOjCIiUs+Y7XZibhhF/I034slzYImIpHDXdrZfO5K4P7exYt05dH5tHs3a9zI6qoiISINwxRVXcMcdd5CQkIDJZCpbYnPdunW0atUq4OebMmUK48aNK7tXX/v27dm3bx//+te/GDx4cNnsw8zMTBIT//owUGZmZtmSpJXVtWvXaileivwvj8fDhg0b1OekxqnviZHU/8Qo6nv1358lTgC8oUH06NEjYO3+bAVKIDYi5LjbNaL/HTpnZVS58HfBBRewcuVKunfvzsiRI7n33nt5//332bdvH6NGjapqcyIi0kDZwiIAMMf6l/AKb92Ozf070HjXNlrs9bHt+uGkP/E4J50/1MiYIiIiDcL48eNp27YtBw4c4KKLLir7UKfFYmHs2LEBP19xcTEmU/l7dVgsFnw+HwApKSkkJCSwcuVKOnbsCEB+fj7r1q3j6quvrtK5LBaLLgZJjVKfE6Oo74mR1P/EKOp79ZeluLTwFxIc0L9jV2lVrCQ/94Tbra39r8qFv3vuuafs+YABA2jSpAlr1qyhefPmnHfeeQENJyIiDcug8S+wPLkVxU+/SlIWOO6dxLI7t9Jv5ANGRxMREan3LrroonJfOxyOgC+7eci5557L9OnTadKkSdlSn3PmzGHIkCEAmEwmrr32Wl5//XWaN29OSkoKL730EomJifTr169aMomIiIiISO1hLXL7n4SHB7Rdd2lVzFXoCGi7tUmVCn8ul4tJkyZx880307RpUwB69OgR0GmWIiLSsJ15xS2kNm3P3rvGk5wO9mffYtGeP7niwVlGRxMREam3ZsyYQUpKCgMGDADg9ttv54svviAhIYEZM2ZUeXnNY3nooYd46aWXeOyxx8qW87zqqqu45ZZbyvYZO3YsRUVFTJo0CYfDwUknncSsWbOw2+0BzSIiIiIiIrVPUIkXAHNEVEDbdVtNgA9XYX5A261NzFXZ2Waz8cUXX1RXFhEREQA6ntKPLvM/4s8WZuxuaP/WCt6746JjHygiIiLH5b333qNRI/+N7VesWMGPP/7IzJkzOfPMM5kyZUrAzxceHs7EiRP55ptvWL9+PcuWLePOO+8sd994k8nE7bffzooVK9iwYQNvvvkmLVu2DHgWERERERGpfYKK/YU/e3RCQNs9NOPPU1wY0HZrkyoV/gD69evHV199VR1ZREREyjRq1pZ+C1fyW9dgzED3z3by7+Gn4nKWGB1NRESk3snIyKBx48YAfPPNN1x88cX07duXMWPGVPoG8iIiIiIiIoESUnoJMCyhcUDb9Vj99xr3FhUEtN3apMr3+GvevDnTpk1j9erVdO7cmZCQkHKvX3vttQELJyIiDVtIWCSD3/2Z/9zSj+7fpdHt1zyWXHEyZ8/5jJiEZKPjiYiI1BuRkZHs37+fxo0bs3z5cu644w4AfD4fHo/H2HAiIiIiItLghBT7HyMbNQtou26bGfDiLSkOaLu1SZULf++//z4RERFs3LiRjRs3lnvt0A3YRUREAsVitTLsX9+y4OFhdFi0jvZ/uln1jwto8/IsWnftbXQ8ERGReuHCCy/knnvuoXnz5uTk5HDWWWcBkJqaSvPmzQ1OJyIiIiIiDUlBXi4hTv/zxKaBvd+411q6EKbTGdB2a5MqF/6+/vrr6sghIiJyVP944j2WJk8g8fXFNNvvY+/Y0WQ+PpFTLxxhdDQREZE678EHHyQ5OZn9+/dz7733EhYWBkB6ejrXXHONwelERERERKQh2bftr0lnjVt0Cmjbhwp/vnp8O6EqF/5ERESMMuDGp1iZ0prMx58nIQfy7n+Sz3f/Sf/RjxodTUREpE6z2WyMHj36sO2jRo2q+TAiIiIiItKgZe77kyig0A72kNCAtu21WQAwudwBbbc2qXLh78EHHzzq608//fRxhxERETmWMwaNZnNKW3bdfiMpaT7s//dvFu7dxpBJbxsdTUREpE7btWsXb731Flu3bgWgTZs2XHfddTRt2tTgZCIiIiIi0pDk7t/pL/wFB75tn81fFjM5XYFvvJYwV/UAh8NR7k9WVhb//e9/+fLLL8nLy6uOjCIiIuW073EWPd9bwpZWFoI80OGdn3n31n543PX3kzoiIiLVafny5QwYMID169fTvn172rdvz7p16xgwYAArVqwwOp6IiIiIiDQgRZn7ACgONgW8bZ/NBoDZ7Ql427VFlWf8TZs27bBtXq+XRx99VJ8EFRGRGhPfuCX9F67iw+vPpOvaQnos28vCEadz2ZzvA74EgIiISH33wgsvMGrUKO65555y259//nmef/55+vTpY1AyERERERFpaJw5Wf5He+ALfwSVFv5c3sC3XUtUecZfhY2YzYwaNYq33norEM2JiIhUij0klCHz/su685sA0HVtAZ8NOZWM/dsNTiYiIlK3bN26lSuvvPKw7UOGDOHPP/80IJGIiIiIiDRU3rxcAJzBlsA3bg8CVPirlN27d+PWEmsiIlLDLFYrw6Z9xe/XnILTAu22eVg9bCCb135vdDQREZE6IzY2ltTU1MO2p6amEhcXZ0AiERERERFpsAoKAHCHVHnRymMyBflvHGhx19/CX5W/a08//XS5r30+H+np6Xz77bcMHjw4YMFERESqYsikt/m8ySPETFtA0zQf6eP+SeYj99F74PVGRxMREan1hg4dyqRJk9i9eze9evUCYPXq1cycOZNRo0YZG05ERERERBoUc2ExAJ5ge+DbDg4BwOL2Bbzt2qLKhb/ff/+93Ndms5nY2FgeeOABhgwZErBgIiIiVdV/zGOsataW9ElPkpADBROnsHT3Fgbc+JTR0URERGq1W265hfDwcN544w3+7//+D4DExERuvfVWrrvuOoPTiYiIiIhIQ2ItdvmfhIUGvG1LsL9Nqwp/f5k7d2515BAREQmIUy8cwdbGrfjztjE02+8j5ZXF/GffdoY+/q7R0URERGotk8nEqFGjGDVqFPn5+QCEh4dTVFTE6tWry2YBioiIiIiIVDdbsQcAc0RkwNu2hob7H+tx4a/K9/jbvXs3O3bsOGz7jh072LNnTyAyiYiInJDWXXtz6oIv2dzGis0DXRas5d0bz8Gje9GKiIgcU3h4OOHh/l+Gd+7cyfDhww1OJCIiIiIiDUlQsf/+e5bImIC3bSsr/AW86VqjyoW/Bx98kDVr1hy2fd26dTz44IMBCSUiInKiYhKSGbjoF9afFAFAj2/TWHT1qRQVOAxOJiIiIiIiIiIiIkdiL/HPxguOTQp82+H+YqJNhb+//P777xUu89KjRw9SU1MDEkpERCQQbEF2rpq/irX9m+MFumwoYtmQMziwa4vR0URERERERERERKQCIcX+x8iklIC3HRweBWjGXzkmk4mCgoLDtufl5eHxeAISSkREJJCufukzNl/XhxIrtNnhZeOIS0n9eZnRsURERERERERERORvPG43oaWFv5gmrQLefkhUPABBroA3XWtYq3rAKaecwr/+9S/+7//+D4vFAoDH42HGjBmcdNJJAQ8oIiISCFc8OItlyU8RMXUuyQch6+bxLJ9wG2cOvsnoaCIiIob56quvjvq67uMuIiIiIiI1KX3fduxRMVgT4olt3SPg7YdGxeEF7G5/kdFirXKZrNar8ju65557GD58OBdddBEnn3wyAL/88gv5+fm89dZbAQ8oIiISKP2uncCvyW1Je3gSSVlQ+MjLfLLnTwaNf8HoaCIiIoa45ZZbjrmPyWSqgSQiIiIiIiIQHRZN4lfLcGdlYY2Ox1WQhy0sImDtR8YkklP63JGdRkxCcsDari2qXPhr06YNH330EfPnz2fTpk0EBwdz2WWXMWLECKKjo6shooiISOCcdP5QdqW05rebR9Bir4/mry9lwb4d/OPphUZHExERqXGbNm0yOoKIiIiIiAgA3pISHPPfJXvefLwOB+bISGJGjiB+3DjMdntAzhEZ27is8JeXpcJfmaSkJO66665AZxEREakRzdr3InLBV3xzQ386bHbRdfHvvJd+JkOnf1Mvp/eLiIiIiIiIiIjUZq6CPLJnzyHztdfLtnkdDjKnvQZAzA2jAjLzLywiCrcZrF7/jL/6yFzVAxYuXMinn3562PZPP/2UxYsXBySUiIhIdYuOa8yg//zMutOiAej+QwYfXHUy+blZxgYTERERERERERFpYCxBwWTPm1/ha9lz52EJCg7YuZyln/svdGQGrM3apMqFvxkzZhATE3PY9ri4OKZPnx6QUCIiIjXBFmRn2FsrWT+wNV4TdPqthA33/5OS3Gy8LheuzAz/Y0Ge0VFFRERERERERETqLY8jF6/DUeFrXocDT17Frx0PV2nhr9iRE7A2a5MqF/727dtHSkrKYdubNGnC/v37AxJKRESkJl31widsGX0utG/NKU/9i9w357KlT1/+7HMmW/r0JfuNN/GWlBgdU0REREREREREpF6yREZhjoys8DVzZCSWiIpfOx4um/+xOD87YG3WJlUu/MXFxbF58+bDtm/atIno6OhAZBIREalxl9/zGi3+NYOsufPIfP31sk8YHVpLPGPGDM38ExERERERERERqQYeZzExI0dU+FrMyBF4nMUBO5e7dMafsyBwswhrkyoX/gYOHMiTTz7JTz/9hMfjwePxsHLlSp566ikGDhxYHRlFRERqhD0ugez5NbOWuIiISG1y/vnnk519+KddHQ4H559/vgGJRERERESkIbGFRRA/dizxN91UNvPPHBlJ3C03Ez9uHLawiICdy201+R8L6+eH/K1VPeD2229n7969jBo1CqvVf7jX6+Wyyy7jzjvvDHhAERGRmlKZtcTNsXE1nEpERKT67d27F6/Xe9h2p9NJWlqaAYlERERERKSh+XXJG3To3Im2332Lp6gQS0QkHmcxZrs9oOc5NOPPU1QY0HZriyoX/oKCgpg6dSo7duwgNTWV4OBg2rVrR3JycnXkExERqTGH1hKvqPhnjozEEhaOx+3GYq3yf58iIiK10ldffVX2fPny5URE/PUpWq/Xy8qVK/W7noiIiIiI1Ig/P3+P8O/T2XVGCv3nfAmA2WYL+Hk8VhPgw1Oswl85LVq0oEWLFgDk5+fzzjvv8P7777No0aJAZRMREalRh9YSz5z22mGvxQ4fTsHy5Xz94h10mPwq7XueU+P5REREAu2WW24BwGQy8cADD5R7zWq1kpycfNh2ERERERGR6hC613/7gawwX7Wex2M1A158JYG7b2BtckJTFn766ScWLlzIl19+SXh4OBdccEGgcomIiNQ4W1gE8ePGAf57+nkdDsyRkcSMHEHsqFFsvWYY7bZ6cFx/E/+5tAdXTJqr2X8iIlKnbdq0CYDzzjuP999/n9jYWIMTiYiIiIhIQ5WQ5gYguttp1XoeT+k9/nwlJdV6HqNU+WplWloaixYtYtGiRTgcDhwOBy+88AIXX3wxJpOpOjKKiIjUGLPdTswNo4i/8UY8eY6ytcQtYREcuO5inC9NIzkduixYy8ere9Jx8jTa9zjL6NgiIiIn5Ouvvz5sm8PhIDIy0oA0IiIiIiLS0Gz69Wti8sFrgpMG3lCt5/LaLIALn9NZrecxirmyO37++eeMHTuWiy66iNTUVO6//36WL1+O2WymXbt2KvqJiEi9YQuLwGyzYYuN8z+G+e93dNaVt3LG0p9Y1zcetxna/+km9/p/8v6jIwxOLCIicmJmzJjB0qVLy76+7bbbOPXUUznzzDPLZgWKiIiIiIhUl9Rl/wbgYCwkJreu1nN5rRYATC53tZ7HKJUu/N1555106tSJ5cuX8/LLL9OvXz+CgoKqM5uIiEitExYRxbBZy8l85J/sS4CIIuj83q98cElXtm740eh4IiIix+W9996jUaNGAKxYsYKVK1cya9YszjrrLKZMmWJwOhERERERqe9KNm0EIKuRvdrP5QsqLfw5XdV+LiNUuvB35ZVXMn/+fMaMGcO7775Lbm5udeYSERGp1c656g5O+2QF63rH+mf/bXGTee1o3n/8WqOjiYiIVFlGRgaNGzcG4JtvvuHiiy+mb9++jBkzhg0bNhicTkRERERE6ruw/TkAuJo1qfZz+Ww2AMzuBj7j7/HHH+eHH37gqquuYsmSJfTt25ebbroJn8+H1+utzowiIiK1UnhULMPeWEH6w2PYH186+++dn1l8aTe2/b7K6HgiIiKVFhkZyf79+wFYvnw5Z5xxBgA+nw+Px2NkNBERERERqec8bjdJaf46U3zPvtV/wtLVLM2u+lnbqnThDyA4OJjBgwczb948Pv74Y9q2bUtcXBxXX301d999N1988UV15RQREam1zrv6bk5dsoJ1Z8TiMUGHP1ykj7iOhU+OMjqaiIhIpVx44YXcc889XH/99eTk5HDWWWcBkJqaSvPmzQ1OJyIiIiIi9dn6FR8TUQRuM5x6yZhqP5/vUOHPrcJfOS1atOCuu+7iu+++47nnnqOoqIi77rorkNlERETqjPCoWIbNWUHaQ9dzIA4iC6HT3P+y+LJu7Ej9xeh4IiIiR/Xggw8yfPhwWrduzZw5cwgLCwMgPT2da665xuB0IiIiIiJSn239bjEABxJMRMYkVvv5zPZgACz1dMaf9UQbMJvNnHfeeZx33nlkZmYGIpOIiEiddf7w+3AMGMXSOy6j66ocOmx2cWD4SFZf2ZsrJsw2Op6IiEiFbDYbo0ePPmz7qFGjaj6MiIiIiIg0KJ4tmwHIaRRcI+czHSr8eXw1cr6adtwz/ioSFxcXyOZERETqpMiYRIa9tZL9E67jQBxEFULHt39k0eXd2bV5tdHxREREKvTBBx9w9dVX07dvX/bu3QvAm2++ybJlywxOJiIiIiIi9VnE/nwAvM2b1cj5LMGhAFhdKvyJiIhIFfQb+QA9P/qa9adG4TVBx01O9l0znMXPjDM6moiISDnvvPMOzzzzDGeddRZ5eXl4vf4lbyIjI3nrrbcMTiciIiIiIvWVy1lCo4P+3z8an9qvRs5pCw0HwOJW4U9ERESqKDquMVe9/RN77x9OWixEFUCHN5ezcHB3dm1ZZ3Q8ERERAObNm8fkyZO56aabMJv/+jWxS5cu/PHHHwYmExERERGR+uznL98hxAklVjjl4mtr5JzWEH/hz+aukdPVOBX+REREasCFox6ix8dfs/6USLwm6JTqZO+wYSyecqPR0URERNizZw8dO3Y8bHtQUBBFRUUGJBIRERERkYZgz49LATiQaCIkLLJGzmmPiAbAqsKf3/nnn092dvZh2x0OB+eff35AQomIiNRH0XGNuWruf9lz79UcjIHoAujwxncsvKIHu7duNDqeiIg0YCkpKaSmph62ffny5bRu3dqARCIiIiIi0iBs2w6Ao1FYjZ3SHh4NaMZfmb1795bd7+HvnE4naWlpAQklIiJSn/W/YRLdPlrG+pMj8AKdfi9h91VD+fCFW4yOJiIiDcyrr75KUVER119/PY8//jhLl/o/bbt+/Xpef/11/u///o8xY8YYnFJEREREROqrqAOFAJhataqxc4ZFxgEQVE8Lf9bK7vjVV1+VPV++fDkRERFlX3u9XlauXElycnJg04mIiNRTMQnJXDVvFZ/PeoTg2QtIzIaYmV/z/o89OOP5d0hu2cnoiCIi0gBMmzaNq6++mqFDh2K325k6dSpFRUXcfffdJCYmMmHCBAYOHGh0TBERERERqYcK8nJplO4DoFnfmvu9IzQ6Di9gc4HH7cZirXSprE6o9Lu55Rb/LASTycQDDzxQvhGrleTk5MO2i4iIyNH1H/MYWZeM4cu7rqDLr/l0/q2Enf8Ywq/XXMCld75sdDwREannfD5f2fNLL72USy+9lKKiIgoLC4mLizMwmYiIiIiI1Hc/L51DkhsKg6DXeVfV2HkjYxuRg39JzLzcdKLjGtfYuWtCpQt/mzZtAuC8887j/fffJzY2ttpCiYiINCSxSU25av7PfDpjImFvLCIhB2L+9SXvr+hJnxfepXHzDkZHFBGResxkMpX7OiQkhJCQEIPSiIiIiIhIQ5H2yzckAQeSzJwUZK+x80bGNian9Lkj62DDLfwd8vXXXx+2zeFwEBkZGZBAIiIiDdXF454k45IxfH3XULquKaDzxmK2XTmYX0b055LbpxodT0RE6qn+/fsfVvz7X6tWraqhNCIiIiIi0lCYd+wGIL9xxDH2DKywiCg8JrD4IC8rrUbPXROqXPibMWMGKSkpDBgwAIDbbruNL774goSEBGbOnEmHDpqVICIicrziG7fkH+/+wpLXHyTyzQ+Iz4XY1z/nPz/04swX/k2jZm2NjigiIvXM+PHjy93DXUREREREpCbEpBUBYG3TrsbP7bRBiBMKczNr/NzVrcqFv/fee4/nn38egBUrVrBy5UpmzZrFp59+ypQpU3jjjTcCHlJERKShGXjT02RcPo6v7xxK17UFdNlQxJ9XXsqeB++k+4VXYwkKxuPIxRIZhcdZjC1MF2xFROT4DBw4UPfzExERERGRGpWTuZ+kDP/z1ucOqfHzu6z+wl+Ro/4V/sxVPSAjI4PGjf3rnX7zzTdcfPHF9O3blzFjxrBhw4aABxQREWmo4hu35B/v/cLW8YPIjITG8a3ocfaVZM+ew5Y+ffmzz5ls6dOX7DfexFtSYnRcERGpg461xKeIiIiIiEh1WPXxbKxecIRC1zMG1vj5XaXT4orzc2r83NWtyoW/yMhI9u/fD8Dy5cs544wzAPD5fHg8nsCmExEREQbd8hztFn9C4jOTyZ47j8zXXsfrcADgdTjInPYaGTNm4CrIMzipiIjUNT6fz+gIIiIiIiLSAGWt/QGAg4lmLNYqL055wg4V/lyF9e96WpW/mxdeeCH33HMPzZs3Jycnh7POOguA1NRUmjdvHvCAIiIiAonJrfEmutg35sYKX8+eO4/4Gyt+TURE5Eg2bdpkdAQREREREWmAbLsOAFDQJNqQ87ttJsCnwh/Agw8+SHJyMvv37+fee+8lLCwMgPT0dK655pqABxQRERE/jyO3bKbf//I6HHjyHJhjdY8mERERERERERGp3WLT/LetsXfoasj5PaXVMU9RgSHnr05VLvzZbDZGjx592PZRo0YFIo+IiIgcgSUyCnNkZIXFP3NkJJaISANSiYiIHJ+0tDSee+45li9fTlFREc2bN+epp56ia1f/L/4+n4+XX36Z//znPzgcDnr16sWjjz5KixYtjA0uIiIiIiIn5MCuLSRm+p93umCYIRncVv+MP09xoSHnr05VvscfwAcffMDVV19N37592bt3LwBvvvkmy5YtC2g4ERER+YvHWUzMyBEVvhYzfDgl2Rk1nEhEROT45ObmcvXVV2Oz2Zg5cyZLlizh/vvvJyoqqmyfmTNnMnfuXB599FEWLFhASEgIo0ePpqSkxMDkIiIiIiJyon5d8gZmIDsC2vc8x5AMHqsJAG9JsSHnr05VLvy98847PPPMM5x11lnk5eXh9XoBiIyM5K233gp4QBEREfGzhUUQP24ccbfcjDnSP7vPHBlJ3E03ETtyBFtu/SdrvltscEoREZFjmzlzJo0aNeLpp5+mW7duNG3alL59+9KsWTPAP9vv7bff5qabbqJfv3506NCBKVOmcPDgQX3gVERERESkjsvbuAqAg0lVXpQyYDzW0vJYPfxgYZULf/PmzWPy5MncdNNNmM1/Hd6lSxf++OOPgIYTERGR8sx2OzE3jKLtih9o8+MPtF3xA1HXjWDd7aOxrd9C3v0T2Lz2e6NjioiIHNXXX39Nly5duO222zjjjDO4/PLLWbBgQdnre/bsIT09nd69e5dti4iIoHv37qxZs8aIyCIiIiIiEiD2PekAFCfHGZbBe6jw53QalqG6VLmcumfPHjp27HjY9qCgIIqKigISSkRERI7MFhYBgDnWPziyR8cSfOtNZI6/nYQc2H3bPwl/ayHJLTsZmFJEROTIdu/ezbvvvsv111/PjTfeyIYNG5g8eTI2m43BgweTnu6/EBAXV/5CQFxcHBkZVVva2uPxBCy3yNEc6mvqc1LT1PfESOp/YhT1vbot/oALgNCOPQz7O/TaLP4nTmeVMxjR/6pyrioX/lJSUkhNTSU5Obnc9uXLl9O6deuqNiciIiIB0Pm0C1k1eSKOB54k+SCsG3slYf/+iui4xkZHExEROYzP56NLly7cddddAHTq1IktW7bw3nvvMXjw4ICea8OGDQFtT+RY1OfEKOp7YiT1PzGK+l7dk7lnM01z/c9D2vRm7dq1huTwWPwz/nwlzuPOUFv7X6ULf6+++iqjR4/m+uuv5/HHH8dZOv1x/fr1fPLJJ8yYMYPJkydXW1ARERE5ulMvHME3WQexPjWTlnt8fHftBVy44EdCwiKNjiYiIlJOQkLCYR8cbdWqFZ9//nnZ6wCZmZkkJiaW7ZOZmUmHDh2qdK6uXbtisVhOMLHIsXk8HjZs2KA+JzVOfU+MpP4nRlHfq7s++f4tANKj4byLhxiWY4s9CACrx0uPHj2qdKwR/e/QOSuj0oW/adOmcfXVVzN06FDsdjtTp06lqKiIu+++m8TERCZMmMDAgQOPO7SIiIicuHOH3cWS7HRSXv2Adls9LL32LC7/9y9YrMbdLFlEROR/9erVi+3bt5fbtmPHjrKVZVJSUkhISGDlypVlt5rIz89n3bp1XH311VU6l8Vi0cUgqVHqc2IU9T0xkvqfGEV9r+4pTF0LQEaSzdC/O1+Qv/BndnuPO0dt7X+Vvgro8/nKnl966aVceumlFBUVUVhYeNh9F0RERMQ4A296mkU56bR/ewWdfivh/VF9uGref42OJSIiUua6667j6quvZvr06Vx88cWsX7+eBQsW8PjjjwNgMpm49tpref3112nevDkpKSm89NJLJCYm0q9fP4PTi4iIiIjI8QrdmwVASdPEY+xZvUx/K/zVN+aq7Gwymcp9HRISoqKfiIhILXTFg7PYeJl/KbRuvzh476bzDE4kIiLyl27duvHqq6+yZMkSBg0axGuvvcaECRO49NJLy/YZO3YsI0aMYNKkSVx55ZUUFhYya9Ys7Ha7gclFREREROREJKR5AIjudpqhOUz2YAAsrvpX+KvSul/9+/c/rPj3v1atWnVCgURERCQwrnpmMe85zqf71/vo/s1+/n3vZVz13IdGxxIREQHg3HPP5dxzzz3i6yaTidtvv53bb7+9BlOJiIiIiEh12fTr18Tkg9cEJw28wdAs5kOFP7fvGHvWPVUq/I0fP56IiIjqyiIiIiIBNuy1r/j3tafTbVUuXT75g4XRoxgy8U2jY4mIiIiIiIiISAOT+uV7dAAOxkLn5NaGZjGHhAEq/DFw4MBqWdpz/vz5zJ49m/T0dDp06MDDDz9Mt27djnnckiVLuOuuuzj//PN57bXXAp5LRESkPrjyjR9YPOwUOm8spu07/+WTqHsYdOvzRscSEREREREREZEGpGTzbwBkNTJ++X5rsL/wZ62Hhb9K3+PvWEt8Hq+lS5fy9NNPc8stt7B48WI6dOjA6NGjyczMPOpxe/bs4dlnn+Xkk0+ullwiIiL1hcVqZeBb37G5jRWbB5rMWMJX86cYHUtERERERERERBqQsP05ALiaNTE2CGANDfc/ug0OUg0qXfjz+aqn6jlnzhz+8Y9/MGTIENq0acNjjz1GcHAwCxcuPOIxHo+He+65h/Hjx9O0adNqySUiIlKfhIRFcs5bX7C9qZkQJ0Q8P4efPn3L6FgiIiIiIiIiItIAeNxuktK8AMT3OsvgNGAPjwLA1pALf5s2bQr4Mp9Op5PffvuN3r17/xXIbKZ3796sWbPmiMdNmzaNuLg4hg4dGtA8IiIi9Vl0XGN6zlrIniQTEUXgmfQMG3/61OhYIiIiIiIiIiJSz61f/iERReA2w6mDbjA6zl+FP5fBQapBle7xF2jZ2dl4PJ7DCopxcXFs27atwmN++eUX3n//fT744IMTOrfH4zmh40Uq61BfU58TI6j/yf9KTGlL81dmsv+fY0jMhgN33cXWmXG06HBSQM+jvidGUd8TIxnR/9TXRURERESkLti6/EM6AwcSTHSNSTQ6DqGR8UD9nPFnaOGvqvLz87nvvvt44okniI2NPaG2NmzYEKBUIpWjPidGUv+T8oLJHX8rtv97lUZZsPWWUeya+CyRsYFfX119T4yividGUv8TEREREREpz7NlMwA5jYINTuIXFh2HDwhy+5chtVjrVLnsqAx9JzExMVgsFjIzM8ttz8zMJD4+/rD9d+/ezd69e7npppvKtnm9/jVhO3XqxGeffUazZs0qde6uXbtisVhOIL1I5Xg8HjZs2KA+J4ZQ/5Mj6dGjBz+GWcl/ZCpND/jY+twD9HjnKyJjEgLSvvqeGEV9T4xkRP87dE4REREREZHaLGJ/PgDe5pWr4VS3iNhGOACzDwrysoisBbMQA8XQwl9QUBCdO3dm5cqV9OvXD/AX8lauXMmIESMO279Vq1Z8/PHH5bZNnTqVgoICJk6cSKNGjSp9bovFootBUqPU58RI6n9SkTMv+ydfZKdjfWE+rXd5+fb6Cxmw4CfsIaEBO4f6nhhFfU+MpP4nIiIiIiLyF5ezhEYH/ZO4Gp/az+A0flGlhT+A3KyD9arwZzY6wPXXX8+CBQtYvHgxW7du5dFHH6WoqIgrrrgCgPvuu48XXngBALvdTrt27cr9iYyMJCwsjHbt2hEUFGTkWxEREalzLhz1EHvG9Mdthg5bXHx0XV887nq4uLmIiIiIiIiIiBji5y/fIcQJJVY45eJrjY4DQEhYJB6T/3l+1gFjwwSY4YuWDhgwgKysLF5++WXS09Pp2LEjs2bNKlvqc//+/ZjNhtcnRURE6q1Lbp/K+7nX0vGdn+myvoj/jD2bYXNWGB1LRERERERERETqgT0/LiUGOJBookdYpNFxALBYrThtEOKEAkeG0XECyvDCH8CIESMqXNoTYO7cuUc99plnnqmOSCIiIg3KlZPe5r2cgXRfuo3uK7N4b/yFDHvlC6NjiYiIiIiIiIhIXbdtOwCORmEGBynPZfUX/opyM42OElCaSiciIiIADPu/Jaw9KwGA7l/uZsHEfxicSERERERERERE6rqoA4UAmFq1MjhJea7SqXEl+bnGBgkwFf5ERESkzD9e+5oNvcIB6LRoAx88d6PBiUREREREREREpK4qyMulUboPgGZ9Bxqcpjy3Cn8iIiJS31msVga/+QOpHYOw+KDlW9/x6YyJRscSEREREREREZE66OelcwhyQ2EQ9DrvKqPjlHNoxp+7KN/YIAGmwp+IiIiUYwuy0//t79nS0kKQGxKnLeLb/7xidCwREREREREREalj0n7+GoADSWZsQXaD05TnsZoAcBeq8CciIiL1XFhEFH3f/JSdySZCSyD4mdf45av3jI4lIiIiIiIiIiJ1iHnnHgDyG0cYnORw7tLCn6e4yOAkgaXCn4iIiFQoNqkpnWcsYF8CRBVA0YTH2PTr10bHEhERERERERGROiImzV9Us7Ztb3CSwx2a8ecrKTY4SWCp8CciIiJH1LR1F5KnTiMjCuJzYe8dt7B760ajY4mIiIiIiIiISC2Xk7mfpAz/89bnXGFsmAp4bf4SmQp/IiIi0qB0OOk8Qp96nNwwaJIOv437B1lpu42OJSIiIiIiIiIitdiqj2dj9YIjFLqeMdDoOIfxWEsLfy6nwUkCS4U/EREROaaTzh+K88FbKLRD870+fhh1MQV5uUbHEhERERERERGRWipr7Q8AHEyyYLFaDU5zOK/NAoCpxGVwksBS4U9EREQq5awrbyX91itxWqHtdg+fX3sWLmeJ0bFERERERERERKQWsu06AEBB4yiDk1TMV1qMNLncBicJLBX+REREpNIuGvsE2687G48JOqY6WTyqLx53/RociYiIiIiIiIjIiYs94P/AuL1DV4OTVMxn8xf+zCr8iYiISEN2+b3T+f0K/4Ct6+p8Ftx8nsGJRERERERERESkNjmwawuJWf7nnS68xtgwR+Cz2wAwuT0GJwksFf5ERESkyv7x5ALWXdAUgB7fp/PeXbXvBs0iIiIiIiIiImKMX5e8gRnIioD2Pc4yOk6FTLYgACxur8FJAkuFPxERETkuw175gnVnxALQdek23n90hMGJRERERERERESkNsjbsAqA9CSrwUmOzGQPBsDsUuFPREREBIChM79jY/cQzED7Bb/y8Ut3lL3WpEkTw3KJiIiIiIiIiIhx7HvTAShOjjM4yZEdKvxZ3D6DkwSWCn8iIiJy3CxWK5e++QOb2tmweqHN139StH8PJq+X+CAbJq8XV0Ge0TFFRERERERERKQGxR9wARDWuZfBSY7MEhoKgFWFPxEREZG/2ENCuWDut2Sf05W2c94mb8EitvTpy599zmRLn75kv/Em3pISo2OKiIiIiIiIiEgN2JH6C/G5/ufdL77O2DBHYQ0OAzTjT0REROQw4VGxnDxlBllz55H5+ut4HQ4AvA4HmdNeI2PGDM38ExERERERERFpANZ/9jYA6dHQrG13Y8MchTU0EgCb2+AgAabCn4iIiASENSSM7PnzK3wte+48LEHBNZxIRERERERERERqWkHqWgAykmzGBjkGe1gEAFYV/kREREQO53Hkls30+19ehwNPXsWviYiIiIiIiIhI/RG6NwsAZ9Mkg5McnT0iBtCMPxEREZEKWSKjMEdGVviaOTISS0TFr4mIiIiIiIiISP2RmOYBIKrbqQYnObqQSBX+RERERI7I4ywmZuSICl+LGTkCj7O4hhOJiIiIiIiIiEhNSv15GdH54DXBKZeMMTrOUYVFxgNgd4HHXX+qfyr8iYiISEDYwiKIHzeOuFtuLpv5Z46MJO6mm4gbMwZb6brpIiIiIiIiIiJSP236agEAaXEQ37ilwWmOLjy2EQBmHxQV1J9b1KjwJyIiIgFjttuJuWEUbVf8QJsff6Dt998R0qkTP913vdHRRERERERERESkmpVs/g2A7CS7wUmOLSo2sex5dsY+A5MElgp/IiIiElC2sAh8ZjMZJU7W//gJO28bT+yX61jy2v1GRxMRERERERERkWoUvi8HAFezJsYGqYSwiFi8Jv/zgpyDxoYJIBX+REREpFrs27ePrmcPJrVbKABB//4Yl7PE4FQiIiIiIiIiIlIdPG43iQe9AMT3OsvgNMdmsVpxWv3PC3IyjQ0TQCr8iYiISLXqMeFlioIgJc3HB49fa3QcERERERERERGpBuuXf0hEEbjNcOqgG4yOUylOm/+x0JFhbJAAUuFPREREqlXb7n3444wkAJp8vp7s9L0GJxIRERERERERkUDb+t1iAA4kmIiMSTzG3rWDu3TGX0l+rrFBAkiFPxEREal2/SfPJzsCYvPgs4eHGx1HREREREREREQCzLN1CwA5jYINTlJ5LhX+RERERKouJiGZPRd2AaD9j2ls3fCjwYlERERERERERCSQIvbnA+Br0dzgJJV3aMafuzDf2CABpMKfiIiI1IjBj8xjTyKEOOHXJ8cbHUdERERERERERALE5Syh0UEvAI1OOd/gNJXntpr8j8V5BicJHBX+REREpEbYguyUXDUIgE7rCvnp07cMTiQiIiIiIiIiIoHwyxfzCHFCiRVOufhao+NUmqe08OctKjI4SeCo8CciIiI1ZtAtz7GllQWLD9Jefc7oOCIiIiIiIiIiEgC7V34GwIFEEyFhkQanqbyywl+xCn8iIiIixyXh5jvxmKDdVg9LXn/Q6DgiIiIiIiIiInKitm0HwNE4zOAgVeOx+ctkPmeJwUkCR4U/ERERqVFnDBpNarcQAILe+xBXPRpYiYiIiIiIiIg0RFEHCgEwtWxlcJKq8VoPFf6cBicJHBX+REREpMb1mPAKRUGQkubjg8frzrrvIiIiIiIiIiJSXkFeLo3SfQA06zvQ4DRV47VaADCp8CciIiJy/Np278Pm0xMBaPLFerLT9xqcSEREREREREREjsfPS+cQ5IZCO/Q67yqj41SJ11Za+HO5DU4SOCr8iYiIiCH6PzGP7AiIdcBnD48wOo6IiIiIiIiIiByHtJ+/BuBAohlbkN3gNFXjs9kAFf5ERERETlhsUlP2XNgFgHY/HmDrhh8NTiQiIiIiIiIiIlVl2bkbgPzGEQYnOQ5B/sKf2eUxOEjgqPAnIiIihhn8yDz2JkKoE3598jaj44iIiIiIiIiISBVFpxUDYG3b3uAkxyEoCACL22twkMBR4U9EREQMYwuyU/wP/02fO64v4KdP3zI4kYiIiIiIiIiIVFZO5n6SMvzP2553pbFhjofdvzSpWYU/ERERkcAYdOvzbGlpweqFtFefNzqOiIiIiIiIiIhU0qqPZ2H1giMUOp92sdFxqsxsDwbA4vIZnCRwVPgTERERwyXcciceE7Tb6mbp9AlGxxERkRo0Y8YM2rdvz5NPPlm2raSkhMcee4zTTjuNnj17Mn78eDIyMgxMKSIiIiIiFclcswKAg0kWLFarwWmqzhIcCoDVo8KfiIiISMCcMWg0qd1CALC9+wEuZ4nBiUREpCasX7+e9957j/bty98L5KmnnuKbb75h6tSpzJ07l4MHD3LrrbcalFJERERERI4kaPcBAAqaRBsb5DhZQsL8j5rxJyIiIhJY3R6cSlEQpKT5WPzEdUbHERGRalZQUMC9997L5MmTiYqKKtuel5fHwoULeeCBBzjjjDPo0qULTz31FGvWrGHt2rXGBRYRERERkcPEHvB/eNvevovBSY6PLTQCAKvb4CABpMKfiIiI1Arte5zF5tMTAEj+fB05mfsNTiQiItXp8ccf5+yzz6Z3797ltm/cuBGXy2mHQ1oAAFBFSURBVFVue+vWrWnSpIkKfyIiIiIitciBXVtIzPI/73ThNcaGOU6HCn+2elT4q3sLroqIiEi91f+J+WwadCGxDvjsoeEMe/1royOJiEg1WLJkCb///jvvv//+Ya9lZGRgs9mIjIwstz0uLo709PQqn8vj8Rx3TpGqONTX1OekpqnviZHU/8Qo6nu1w69LZtMKyIqA07v2qZN/H/aIaMBf+KtsfiP6X1XOpcKfiIiI1BqxSU3Zc0FnYhb9RtsV+9n2+ypadTrV6FgiIhJA+/fv58knn+SNN97AbrdX+/k2bNhQ7ecQ+Tv1OTGK+p4YSf1PjKK+Z6ys1T/SCkhPstTZ1TmyHIUk4i/8VfU91Nb+p8KfiIiI1CqDH53Pd8t7kJwOvzxxM63e/cXoSCIiEkC//fYbmZmZXHHFFWXbPB4PP//8M/Pnz2f27Nm4XC4cDke5WX+ZmZkkJCRU+Xxdu3bFYrEEJLvI0Xg8HjZs2KA+JzVOfU+MpP4nRlHfqx12peUAUJwcT48ePQzNcrzs7kwAglzQsZLvwYj+d+iclaHCn4iIiNQqtiA7xVcNhFeX0HFdAf/97G1Ou+hao2OJiEiAnH766Xz88cfltj344IO0atWKsWPH0rhxY2w2GytXrqR///4AbNu2jX379h3XxQSLxaKLQVKj1OfEKOp7YiT1PzGK+p6x4tNcAIR17lVn/x4iY5MoACw+KC7MJywiqtLH1tb+p8KfiIiI1DqDbn2ej5Z8RtvtHva/+hyo8CciUm+Eh4fTrl27cttCQ0OJjo4u2z5kyBCeeeYZoqKiCA8PZ/LkyfTs2bPOfopYRERERKS+2ZH6C/G5/ufdL77O2DAnIDI2kYLS5zkZe6tU+KutzEYHEBEREalI/E234zVB+z/dLJ0+weg4IiJSgyZMmMA555zDbbfdxogRI4iPj+eVV14xOpaIiIiIiJRa/9nbAKRHQ7O23Y0NcwIiohLwlj7PzzloaJZA0Yw/ERERqZV6XzqW/8x7nS7ri7C++wGuGx7BFmQ3OpaIiFSDuXPnlvvabrfzyCOP8MgjjxiUSEREREREjqbw97UAZCTZjA1ygixWKy4b2F2Qn5VudJyA0Iw/ERERqbW6TZhKURA0TfPxweRRRscREREREREREREgZF8WAM6mSQYnOXHO0ilyRfnZxgYJEBX+REREpNZq3+MsNp+eAEDjz9aSk7nf4EQiIiIiIiIiIpKY5gEguvvpBic5ca7Swl9Jfo6hOQJFhT8RERGp1fo/MZ/scIhzwGcPDzc6joj8f3v3HSZVdbhx/J2+fdkKC6IowoJ0bBEh1kiCHcQYAaUogjWWn5oYC6KiiRhFUVERpSixgVHQKBpbxI4K2BBUOmyD2Trl3vn9Mbsr6MICW87s7PfzPPPMzJ07M+8MR/Dsu+deAAAAAECr9vXHS9SmTLId0mGnjDUdp8FC1UcrDZRuM5qjsVD8AQCAmJbZtqPW/+4QSVKX/23Smq8+MpwIAAAAAACg9fp6yXxJ0pYsKTvvQMNpGi7sqr6uLDMbpJFQ/AEAgJh3+k1ztCFHSgpIn0y+2HQcAAAAAACAViv43deSpOK2PsNJGkfY45AkhSoo/gAAAJqFLzFJVWcPkSR1/6JcH70213AiAAAAAACA1ill4zZJUnj/9maDNBLLHS3+rKoKw0kaB8UfAABoEU65bKq+P9Alty1tnHaX6TgAAAAAAACtjhUOq+0WW5KU3f+3htM0jpriL1JVaThJ46D4AwAALUbGhMtlO6T878N65ZEbTMcBAAAAAABoVb5890WlVElhp3TEKWNNx2kUljtaldnBgOEkjYPiDwAAtBgDTx+vr3omSpJc8xYoFCf/QwYAAAAAANASrH57gSRpc45DaRm5htM0Dru6+FMgPn7ORPEHAABalN5/vUeVXqnjlogW3j7GdBwAAAAAAIBWw/p+lSRpW7sEw0kaj+2JVmWOUMhwksZB8QcAAFqU/H7H6rsjcyRJea8s07aiTWYDAQAAAAAAtBKpm0slSZFOBxhO0nhst1uS5AhS/AEAABhx0m3ztC1FyvJLr9440nQcAAAAAACAuBcKBtRua0SSlHfkiYbTNJ6I1yNJcoQsw0kaB8UfAABocTLbdtTaE7tLkrr8b6N+/PoTw4kAAAAAAADi2yevzVViUAq4pcMGjzIdp9FEPNEVf84wxR8AAIAxZ9w8VxtzpKSA9NHkiabjAAAAAAAAxLV1778iSdqc61BicprhNI3H4fNJkpys+AMAADDHl5ikiuF/kCR1/7xMH70213AiAAAAAACAOPbDj5Ikf16y2RyNzOH1SpJc4YjhJI2D4g8AALRYp15+j77v5JTbljZOu8t0HAAAAAAAgLiVvrkieuOgzmaDNDJHQqIkyRW2DSdpHBR/AACgRcuYeIVsh5T/fVivPnqj6TgAAAAAAABxp7x0u9oVRFfEHXD0EMNpGpfLV1P8seIPAADAuIGnj9dXPaP/g+ac97yscNhwIgAAAAAAgPjy0aKZ8oalCp/U//g/mo7TqFyJSZIkd5z8SIniDwAAtHi9/3qPqjxSx80RLbjtfNNxAAAAAAAA4srWT96SJG1u65TH6zMbppG5k1Kj16z4AwAAiA35/Y7Vt0dmS5LaLf5M/pKtZgMBAAAAAADEEddP6yRJZe1SDSdpfN7kNEmSJ2Q4SCOh+AMAAHHhxMmztS1FyvJLi//2J9NxAAAAAAAA4kabzVWSJHeXfMNJGl9CSoYkDvUJAAAQU7LzDtTaE7tJkg5+b6N+/PoTw4kAAAAAAABavpKCDWpbFL3d5fizzIZpAglpbSRJHoo/AACA2HLGzfO0MUdKDkgf3TbRdBwAAAAAAIAW7+NFj8ttS/4kqceRfzAdp9Elp+dKkrwUfwAAALHFl5ikirMGS5K6LyvTx689bTgRAAAAAABAy1a07H+SpK1tXXK53YbTNL7UNtmSJLctlZduN5ym4Sj+AABAXDn1inv1fSen3La0YdodpuMAAAAAAAC0aN61myRJ5e3bmA3SRNKy82pv+4s3GUzSOCj+AABA3Glz0aWyHVL+92H957GbTccBAAAAAABosTK3BCVJCd16Gk7SNFLTc2RX3/aXbDWapTFQ/AEAgLgz6MyJ+rpnQvTO3GdlhePkIO0AAAAAAADNaPPaVcotjt7u/rtzzYZpIi63WyFP9HZFSYHZMI2A4g8AAMSlntdPVZVH2n9zRAtuG206DgAAAAAAQIvz6cuPySmpOFXK7/tb03GaTLD61IWVpcVmgzQCij8AABCXuh16vL49Mnpy5ravfBoXh2oAAAAAAABoTqUrPpEkFbR1G07StMK1xV+J2SCNgOIPAADErRMnz9a2ZCl7u7Toxvg8HAUAAAAAAEBT8a2PHvqycr8sw0maVqi6+AuWbzcbpBFQ/AEAgLiVnXeg1p3YTZLU5d0NWvvtZ4YTAQAAAAAAtBzZW0KSpJRD+htO0rRqVvyFykvNBmkEFH8AACCunX7LPG3MkZID0ge3XmQ6DgAAAAAAQIuw5quPlF29AK7vyWPNhmliYbcjel1ZbjhJw1H8AQCAuOZLTFLFWYMlSd2Xlenj1542nAgAAAAAACD2rfjPXEnS1gypY+eehtM0rZriz6qqMJyk4Sj+AABA3Dv1inv1fSen3La04f47TMcBAAAAAACIeRVffS5JKsr1mA3SDKzq4s+uqjScpOEo/gAAQKvQ5qJL5e58kE74872yg0GFigplh0Jxcex2AAAAAACAxpa4oViSFOzY1nCSpme5o3VZJFhlOEnDUfwBAIBWYdCZE3XQ00+rasVKrRo4SN8fPUirjh6oksefkB0ImI4HAAAAAAAQU3K3WpKkNn1+YzhJ07M91XVZIGg2SCNwmw4AAADQHELlpSp54gkVPfRQ7Tbb71fR9AclSRljR8uTnGoqHgAAAAAAQMz4+uMlalMm2Q7psFPGmo7T5GqLv2DIbJBGEBMr/ubNm6fjjz9evXr10vDhw/Xll1/uct9nnnlG5557rg4//HAdfvjhGj169G73BwAAkCSXN0Elc+fV+VjJnLlyeROaOREAAAAAAEBs+nrJfEnSliwpO+9Aw2manu12SZIcoZa/4s948bd48WJNmTJFl1xyiRYsWKBu3bpp3LhxKioqqnP/Dz/8UCeffLJmz56t+fPnKy8vT2PHjtWWLVuaOTkAAGhJLP922X5/nY/Zfr+s0rofAwAAAAAAaG2C330lSSpu6zOcpHlEvB5JkiNkGU7ScMaLv1mzZunss8/WsGHDdPDBB2vSpElKSEjQ888/X+f+U6dO1YgRI9S9e3d17txZt912m2zb1tKlS5s5OQAAaElcaelypqXV+ZgzLU2u1LofAwAAAAAAaG1SNm6XJIUP6GA4SfOIeKJnxnNS/DVMMBjUypUrNWDAgNptTqdTAwYM0LJly/boNSorKxUOh5Went5UMQEAQBywglXKGDWyzscyRoxQqIwVfwAAAAAAAFY4rLZbbElSdr9BhtM0D4c3urLRGbYNJ2k4t8k3LykpkWVZysrK2ml7VlaW1qxZs0evcffddys3N3en8nBPWFbLb23RMtSMNcYcTGD8wZRYHHvOhCRljx8vKXpOP9vvlzMtTRkjRihz1Eh9fsU4ZVz3F3XqdqjhpGiIWBx7aD1MjD/GOgAAAIDG9vnbC5RSJYWd0hGnjDUdp3l4vZIo/ox75JFHtHjxYs2ePVs+394dZ3b58uVNlAqoG2MOJjH+YEqsjb127dopd+xoZU+YIKvUL1dqmgLbivTVRaOVvHyV1kw8Xz9cN1kZ7eL/pNXxLtbGHloXxh8AAACAluyHd19UD0mbcxzqlZFrOk6zcCYkSpLcIYq/BsnIyJDL5VJRUdFO24uKipSdnb3b586cOVOPPPKIZs2apW7duu31e/fq1Usul2uvnwfsLcuytHz5csYcjGD8wZRYH3sRSc70NopI8mbmqHTUqXLfco/22yqt+8dNavv4c8o7IN90TOyDWB97iG8mxl/NewIAAABAY7G+XyVJ2tYuwXCS5uNMSJIkucIRw0kazmjx5/V61aNHDy1dulQnnniiJMm2bS1dulQjR9Z9Dh5JevTRR/Xwww9r5syZ6tWr1z69t8vl4odBaFaMOZjE+IMpLWXsHX3ahXo3HFTp5AfUcXNEKy44S945Lym3Q2fT0bCPWsrYQ3xi/AEAAABoyVI3l0qSIp0OMJyk+bgTo8WfO2w4SCNwmg4wZswYPfPMM1qwYIFWr16tW265RZWVlRo6dKgk6dprr9XUqVNr93/kkUd033336Y477lCHDh1UUFCggoIClZeXm/oIAAAgDgwaeonKr7tQ5T7pgI0RfXL+aSress50LAAAAAAAgGYTqKxQu63RVW95R55oOE3zcSemRK9Z8ddwQ4YMUXFxsaZNm6aCggJ1795djz32WO2hPjdt2iSn8+d+cv78+QqFQrr88st3ep1LL71Ul112WbNmBwAA8eW4c67SkmCVnHfP0YHrbb0/6vca+PQStcnKMx0NAAAAAACgyX32xtNqE5QCHumwwaNMx2k23pR0SfGx4s948SdJI0eO3OWhPefMmbPT/TfffLM5IgEAgFbqxPP+qv+Egsq991/qvNbWuyN/p2OeelNpreRk1gAAAAAAoPVa9/4raiNpc45DfZPTTMdpNt7kNpIkTxwUf8YP9QkAABBrBo+7RRsvPkMBt3TwD5b+O/J4lZduNx0LAAAAAACgaa35UZLkz0s2m6OZJaZGV/x5QoaDNAKKPwAAgDqcPHGK1o3/g4IuqetqS6+dO0iV5X7TsQAAAAAAAJpM+uaK6I2DOpsN0syS2+RIkrys+AMAAIhfp15+j34Yc5zCTqnbqpBeGTFIgcoK07EAAAAAAAAaXXnpdrUriEiSOg06xXCa5pWSHi3+3LZa/C9+U/wBAADsxhnXPKhVo45W2Cl1/yaol0YOUCgYMB0LAAAAAACgUX20aKa8llThk/odO9x0nGaVnp1Xe3t70RaDSRqO4g8AAKAeQ//ymL4953DZDqnHyoAWjjpKVjgOjv0AAAAAAABQbcsn/5UkbW7rlMfrM5ymeaVltK29XVqy1WCShqP4AwAA2ANn3TRbK8/qI1tSzy8q9fyo31D+AQAAAACAuOH+ab0kqTQvzXCS5udyuxVwR2+XbaP4AwAAaBXOnjxfK87oLknqtaxcz405mvIPAPbBjBkzNGzYMPXr109HHXWULr74Yq1Zs2anfQKBgCZNmqQjjzxS/fr102WXXabCwkJDiQEAAID412ZzlSTJc3BXw0nMCFUXf1WlxWaDNBDFHwAAwF74450v6ItTDpYk9f7Yr2cvPMZwIgBoeT766CONGDFCzzzzjGbNmqVwOKxx48apoqKidp877rhD//3vf3Xvvfdqzpw52rp1qy699FKDqQEAAID4VVKwQe2qf8+uy/FnmQ1jSMgTva70l5gN0kAUfwAAAHvpnLtf0he/P0CS1GdpseZf+FvDiQCgZZk5c6aGDh2qLl26qFu3brrzzju1ceNGrVy5UpJUWlqq559/Xtdff72OOuoo9ezZU3fccYeWLVumzz//3Gx4AAAAIA599NJMuSKSP0nqceQfTMcxombFX7B8u9kgDUTxBwAAsA/OufdVfXFCe0lSn3cLNP/i4w0nAoCWq7S0VJKUnp4uSVqxYoVCoZAGDBhQu0/nzp3Vvn17ij8AAACgCRR/8b4kaUtbl1xut+E0ZoRriz+/2SAN1Dr/9AAAABrBOdPf0NPjj1Hfd7aqz5ubNP+yk3TO/a+ZjgUALYpt27rjjjvUv39/de0aPZdIYWGhPB6P0tLSdto3KytLBQUFe/X6lmU1WlZgd2rGGmMOzY2xB5MYfzCFsdf4vGs3SZIq8tJb7fcadjskRRSuKNvtd2Bi/O3Ne1H8AQAANMCfHnlb88cerT7vF6vP6+s0/6qTdc49i0zHAoAWY9KkSVq1apWeeuqpJnn95cuXN8nrArvCmIMpjD2YxPiDKYy9xpO1JShJCu13YKs9yoZV3ZiVlRTt0XcQq+OP4g8AAKCBhj/ytp4bO1C9P9quPovX6BnPmTr7rgWmYwFAzLv11lv11ltvae7cuWrXrl3t9uzsbIVCIfn9/p1W/RUVFSknJ2ev3qNXr15yuVyNlhnYFcuytHz5csYcmh1jDyYx/mAKY69xbfrpWyUUR28ffsYF6tKnr9E8pixyOyVZ8jkc6tu37y73MzH+at5zT1D8AQAANJDL7dZZj7+n588/Sr0+LVOPF7/RM+6zdfbtz5iOBgAxKRKJaPLkyXr99dc1Z84cdezYcafHe/bsKY/Ho6VLl2rw4MGSpDVr1mjjxo27nYDXxeVy8cMgNCvGHExh7MEkxh9MYew1ji9efVIHSSpOlY7uf6zpOMbYbockKRKs3KNxFavjz2k6AAAAQDxwud0a9uRSreiTJKekHi8s13O3jDQdCwBi0qRJk/Tvf/9bU6dOVXJysgoKClRQUKCqqipJUmpqqoYNG6Y777xTH3zwgVasWKG//vWv6tev314XfwAAAAB2r3TFx5Kkgrate62Y5aku8YJBs0EaqHX/KQIAADQil9utM+a8rxfP/Y16rKhSt399quc9YzXshsdNRwOAmPL0009LkkaNGrXT9ilTpmjo0KGSpL/+9a9yOp26/PLLFQwGNXDgQN18883NnhUAAACId751hZKkyv2yDCcxy3ZXr5ULhswGaSCKPwAAgEbk8fp0+lMf6N9/PEKHfB1U13lLtcAzQWde+7DpaAAQM7799tt69/H5fLr55psp+wAAAIAmlr01WnSl9jjUcBKz7OoVf44WXvxxqE8AAIBG5vH6NGTuu/om3yO3LXV+8m39+94rTMcCAAAAAADYyZqvPlL29ujtPkPGmA1jWMQTXSvnCIUNJ2kYij8AAIAmkJicppPmvqtvD3bLY0kHPPaaXp7+f6ZjAQAAAAAA1Fr+yhxJ0tYMqWPnnobTmBXxeiRJzpBlOEnDUPwBAAA0keTUdJ04722tOtAlb1ja7+GX9eqjN5qOBQAAAAAAIEmq/OYLSVJRW6/hJDHAG/0OnGHbcJCGofgDAABoQinpmfrt3Ne1+gCnfCEp9/7n9NoTt5mOBQAAAAAAoMQNxZKk4H65hpOY5/D6JFH8AQAAoB5tsvJ09NzXtKajU4lBKeuf8/TmU3ebjgUAAAAAAFq53C3Rw1q26fMbw0nMc/oSJEkuij8AAADUJyOng46c/bJ+7OBQUkBK/ftMvfXs/aZjAQAAAACAVmrlh6+pTblkO6TDThlrOo5xTl+iJMkdihhO0jAUfwAAAM0kO+9A9Zv1otbmOZRSJSXd8aDe//ejpmMBAAAAAIBW6Ns3n5EkbcmK/syitXMlpUSvw4aDNBDFHwAAQDNqt38XHfLYM1rf1qHUSsk56R59+Ops07EAAAAAAEArE/zuK0lScVuf4SSxwZOYLElyW6z4AwAAwF7o2Lmn8h+Zq405Unq5ZP1tij55Y77pWAAAAAAAoBVJ2bhdkhQ+oIPhJLHBk5wevQ4ZDtJAFH8AAAAG7J/fX50enKnNWVJGmRT4yyR9/u6LpmMBAAAAAIBWwAqH1XaLLUnKOfQYw2ligy85TZLk5lCfAAAA2Bedew1Q+wce0tYMKdMv6cHHVVVcIDsUUqioMHpdXmo6JgAAAAAAiDOfv71AKVVS2CkdPmS06TgxITE9U5LkpfgDAADAvsrvd6yy771PVf26quf0WfLPeVqrjh6o748epFVHD1TJ40/IDgRMxwQAAAAAAHHkh3cXSpI25ziUlpFrNkyMSEzLkiR5Wnjx5zYdAAAAoLXrceRJqnqgn4rnzFXRQw/Vbrf9fhVNf1CSlDF2tDzJqaYiAgAAAACAOGJ9v0qStK1dguEksSOlTY4CkjyWFKiskC8xyXSkfcKKPwAAgBjgTWujknnz6nysZM5cubz8jzgAAAAAAGgcqZvLJEn2gQcYThI72mTl1d7eXrzJYJKGofgDAACIAZZ/u2y/v87HbL9fVmndjwEAAAAAAOyNQGWF2m2NSJLaH3Gi4TSxI3WHQ55uL9xsMEnDUPwBAADEAFdaupxpaXU+5kxLkyu17scAAAAAAAD2xqevz1NiUAp4pMMGjzIdJ2Z4vD4Fq0+QV+4vNBumASj+AAAAYoAVrFLGqJF1PpYxcoSsYFUzJwIAAAAAAPHI/+0X8nXtom35bZWYzC8a7yhUXfxVbm+5xZ/bdAAAAABInuRUZY8fLyl6Tj/b75czLU2ZI0YoY+RILX/refU7ebTZkAAAAAAAoEULlZfqpMvuVvhPheqUna1Qeak8yammY8WMoFtKllRZus10lH1G8QcAABAjnD6fMsaOVvaECbJK/XKlpqnsi2X6aeQohTat0ScJCTrshHNMxwQAAAAAAC2QHQioZOYslcydV/sLxxmjRip7/Hg5fT7T8WJCuLo1C5X7zQZpAA71CQAAEEM8yalyejzyZGbJ6fEo3DFPa8p/UGqlVHrjJK1bvcJ0RAAAAAAA0MKEyktVOGOGih58SLY/WmrZfr+Kpj+owkceUai81HDC2BDyRK+D5dvNBmkAij8AAIAYltm2ozre84BKUqV2xdIXE/+oyhb8W2cAAAAAAKD5ubwJKpk7r87HSubMlcub0MyJYlPY5YheV1YYTrLvKP4AAABiXLdDj1fwmotU5ZE6r7X18thjZYXDpmMBAAAAAIAWYOWbzylcUFC70u+XbL9fVim/ZCxJlqem+CsznGTfUfwBAAC0AMf+8c/64dyjZUvq+UWlnrn6FNORAAAAAABADPvv/Hv07yE95bnhHrnbtJEzLa3O/ZxpaXKl1v1Ya2O5o8WfHag0nGTfUfwBAAC0EEP/8piWH58nSer92k96ceolhhMBAAAAAIBY885zD2jhKb3U7pZH1WWNpdC2EpV9940yRo6oc/+MUSNlBauaOWVsslwOuTIytF/HHqaj7DO36QAAAADYc8OnvaYFfzxMPVYG1PHJN/XuQQ9p0JkTTccCAAAAAACGvffiIyp47AF1WxVSjiRb0rfdvGp/0VVK69tfKd17SA6HSubMle33y5mWpoxRI5U9frycPp/p+DHh+MlPKCW/uw4sLZUdCskKVsmTnGo61l6h+AMAAGhBXG63fv/4m3pn2EAduD6iiinTtOqg3urS52jT0QAAAAAAgAEfvPKkNj48VfnfhpRVve2brh61veAyDT3twtr9nD6fMsaOVvaECbJK/XKlpskKVlH6VbMDAVW9+z9tuujiFl2MUvwBAAC0MCnpmTrk/tlaO2aUcrZJq6+8UDnPvqE2WXmmowEAAAAAgGby8WtPa+3Dd6nb1wF1j0S3fXuwW1ljJ+jMoXWfHqRm9ZozM1oROj2eZska60LlpSqZOUtFDz5Uu832+1U0/UFJUsbY0S1m5R/n+AMAAGiBOnU/TN6br1e5TzpgY0Rvjvu9rHDYdCwAAAAAANDElv33eT03rK8Sr7hVh3wVkDMifXeQS5tvuVBnvLxcg3ZR+mHXXN4ElcydV+djJXPmyuVNaOZE+47iDwAAoIX6zR/O1+YLhshySN2/CerZicebjgQAAAAAAJrIl/97Sc8O7y/3JX9Tj5UBuSLS9we6tPHGMTp98Qodd85VpiO2WJZ/u2y/v87HbL9fVmndj8Uiij8AAIAW7JTLpmrFyQdLkvq8W6DnbhlpOBEAAAAAAGhMKz98Tc/+8VA5LrxWPZdXym1Lqw9wat315+rUV1bohBHXmo7Y4rnS0uVMS6vzMWdamlypdT8Wiyj+AAAAWrhz7n5JXx6aIknq8tynWjL7DsOJAAAAAABAQ33z6Zt65k+HKTz2CvX8okJuW1rT0amfrj5Lp/xnpU4afaPpiHHDClYpY1Tdv0ydMWqkrGBVMyfad27TAQAAANBwZzz2tv4z9Ah1+cFSyn1z9OXBfdR7wMmmYwEAAAAAgL206ov/6bO/X6luX5SqVzi67ccODtnDTtXJF99lNlyc8iSnKnv8eEnRc/rZfr+caWnKGDVS2ePHy+nzGU645yj+AAAA4oAvMUlHzFigr849TXmF0sbrrtHW+d2U26Gz6WgAAAAAAGAPrPnqI31852XKX+ZX71B029o8h0JD/6A/TLxLLjeVTlNy+nzKGDta2RMmyCr1y5WaJitY1aJKP4lDfQIAAMSNdvt3UeaUO7U9SWpfIC298HQFKitMxwIAAAAAALux9tvPNH/0Udp2zvnq/ZFfvpC0rq1Dqy76nU58/UudctlUSr9m4klOldPjkSczK3qdnGo60l6j+AMAAIgjfQedLv/l5yrokrqusbTwwuNMRwIAAAAAAHVYt3qF5o89WgXDR6jPB9uUGJQ25ErfjD1GJ7zxpU67chqFH/YaxR8AAECcOWn0jfrurH6SpN6f+PWv/zvdcCIAAAAAAFBj00/faP6Fv9WWocPV5/1iJQWljTnS1+cN0HFvLteZ1z5M4Yd9RvEHAAAQh4ZPekpfHJ0tSTpk0Xd6efr/GU4EAAAAAEDrtnXDaj094VitP/1M9Xm3QMkBaXOW9NW5h+u3b3yuoX+dSeGHBmMEAQAAxKnhM/6rF4f1V/dvQ2r36Mv6qEsfHXHSSNOxAAAAAABosPbt25uOUKdQealc3gRZ/u1ypaXLClapKlilRTeco4OWblTfyuh+WzKlgt/102nXPyZfYpLZ0IgrrPgDAACIUy63W8c//h+tzXMouUqqvPl2rf32M9OxAAAAAADYZ6HyUjlsW9lejxy2rVB5qelItexAQCUzZ2nV0QP1/dGDtOrogSqZOUuJcuvwHxOUWikVtJGWD+ulo974VMMnPUXph0ZH8QcAABDH2mTlqdM/H1ZxmpRbIq24dJTKS7ebjgUAAAAAiFGh8lLZoZBCRYXR61gv1h5/QnYgYDqaQuWlKpwxQ0UPPiTb75ck2X6/ih58SCWz5yjn+uv05RnddfgbH+rs25+h8EOT4VCfAAAAcS6/72+15dpLVHnrdB24ztYrY47VmfM/5rwBAAAAAICd1BRrJXPnyfb75UxLU8aokcoeP15On89otlB5qUpmzlLRgw/VbrP9fhVNf1CSlDF2tDzJqQpUVqjcX6SybQUq9xerqmy7qsq3qapsu4IVZQpVlsqqqpIVqJAVqJQdqJJCQUWCQUVCITlCYSkcliMcljNsyRG25QzbcloRuayInOHotSsckduSXJaUlJql/v9+XSVz59WZvXjePHWZOEF//O0LzfJdoXXjpz0AAACtwG/PulQLf1ihLo+/rR4rqvTMn3+vPz2wxHQsAAAAAGh16joHnCc51XSs3RdrkYhSzx6md199XHYoqHCwSpFwSFYoqEg4LNuKlmaRcFh2OCRZliJWWBHLksOyqu9bcli2ZEcvDsuSw45Ub4vIYdvR+3ZETjsih63a20kZuTp21uJdFmslc+Yqe9w4fX30AKmoZKfHEqovTcmXm6lwUVHtSr9fsv1+WWWlcmZmNXESgOIPAACg1Tjj/x7W/LUnqc/r69R7yQYtuOsinXndDNOxAAAAAKDRxWq51hgr6spLt6t46zr5CzeqrGSzKrYVKlBaomDZdoUq/LIqymVXVkiBKikQlCMYlCMYlitkyRm05Q7bcoVseUKSJxSRJyQlp2aq98u7XrFWMneesi+4QJ1mvCKrpKTOfZqSz5dab7EWLimRLytbgR2KP9shBd1SyC1ZLinsksLu6LXldsh2OWS5JMvtlO1yKOJ2ynY7FXG7ZLvdktutiMclh8creTxyeH1yen1y+hLlTkiUKyFJnsRUZeTsp065uXKmpdWZ0ZmWJldqWpN9P8COKP4AAABakeH/XKwXzj1CPb+sVKe57+itg+7XscMvMx0LAAAAQAvVvn170xF+xfThKivL/dpWuFH+4s0qKy5Qhb9QlduLNPB356ns+YW7XFGXcMwgvT5pjFxBS+6QLXc4Ind1MecJSd6Q5AtJbjv6XKektOpLQ/nyshQuLt59sbZtmyo752nb6hLZDsl2RYs12+lQxCnZTinidOxw7VCkzotTckWvo7edkjNatjmcLskVvThcLjncHsnlUnrOfuqUk7PbYs2dkyN78rVK9CYopU2WktOylJjcfGVbqLxUGaNG1h56dEcZo0bKClbJ6fE0Wx60XhR/AAAArYjL7daQmf/VW8MG6KC1tir+/qC+PbiX8vsdazoaAAAAgBakZkVdttcjh20rVFUREyvq6j0P3JjztbVgvUqLN6ts21ZVbi9Spb9EwbJtClWUyaook1VVoUigqnrFXEiOUFCOoCVnKCxX9Wo5V3Up5w5XF3Ph6nIuLHmsn/MkVV9cGRlKOPsqrdvNirouF1ygXpuS9nhFneWQAh4p6JFCtReHwm6HLI+z+uKS7XUp4vUq4vNIvgS5EhLlTEySKylVnuRUJaRmKKtD53pXrLlzctRn7vN7/GfR2Oot1kIBHdxnoIFkUZ7kVGWPHy8peujRWDtHIloPij8AAIBWJjk1Xb2mP6UfzjtHuSXST1derNxnX1dGTgfT0QAAAAC0AM29om5b0SYVrF+jbVvWqrRwgypKtii4vVjhsu2yK8oUqaiQsyqgtKQsDb73hXrPA1d+7gWKlJQoWVJyo6f9ma1oKRf0SAmd8xQuKdn9OeBKS7Vl+NHasHaFHAmJciWlyJ2UIk9yuhLTs5SQnqWUjFy1ycpTm9z9lJyaKZe78X7EH+sr1lpCseb0+ZQxdrSyJ0yQVeqXKzUt+r3FQDa0HhR/AAAArdD+Xfpo86QbVHbt7eq4OaK3xv1Bpzz3sTxeJiMAAABALIjVc9TVu6Ju7OjanOWl21WwfpVKNv+k7YUbVVG0SQF/scL+ElkVZYpUlMtRGZAzEJQ7EJYrYMkTtOUNRuQNSgkBKSH48wq6lOrLrvi6tlW4sLDe88C5c7IV2lYSXS3njq6UC7ulkNuhsMchy+OQ5XZEV8t5nLLdbkW8HkV80XO8OXwJciQkRku5pBR5k9PlTYmWcykZuUrLaKu07DylpufsVMzZodDuzwGXmalBV/1j7/5AGhHFWuOoGf/OzKzoNYf3RDOj+AMAAGiljjhppBatXqmE+xeq23chPT/heJ3z+P9MxwIAAACaRawWa1Lzr6gLBQMqLdmqkq3rVLqtQBUlW1VZWqJA6TaFyrcrXFEmq7JcqUnpGvx/D+5+Rd0FF+jTEwfKtblIvnB0e0L1paEqvVKVTwp4paDXoZDXoZDXJcvnkuXzKKVDwh6dBy539uNKTGnTqKvl9oQVrIrpFXVSyynWLMtSYSCo3HRnzPx3C8QKij8AAIBW7OSJU/Svn75R74XfqM/7xXr2b3/U8Nv+ZToWAAAA4gDF2r7Z7Yq6SESp5/5RH77xtAJl2xQs9ytU7pdVWS67MnpeukgwIEcgJEcwJGcoLGfIqj4nnV19TjrJHYpEz0VXfU46X+jn9/dVXzLqyObr2kXhgoLdr6grLlZGUqYC4aLa7QH3z4VdwOtQyOdQyOuMFnZet+wEn5SYIEdSslwpqfKkZiqhTZaSMtsqve3+yso7ULkdOu/REUr25DxwKW2y632dptASVtRJLWfF2saNG5Wbm2s6BhBzKP4AAABauT/euUD/2niken/kV9cFX+o/B92qwWNvMh0LAAAAe6h9+/amI/xKiy7WzhmupW88pXCgUlawUlagSlYwKDsUkB2skh0OKRIKKhIKKWKFpfAOF8uSLFuOsCWHbcthRS9OOyKHFZHTishhR6+ddkROW3JZit62pMT0bB3xwmu7XlE3d56yL7hA7e99RlZJSZN8P0F39cXz8yEwwx6Hwm6HfOnl6pSdXe+KuuKJw+VKSFJW+wOV1b6zklPTmyRrXWK9XGsJK+oAtGwUfwAAANDQx97R4jMPVdfVlto88LSWde6lfsecaToWAAAtSiyWLy1BLK8Kk2I7X022bK9HDttWqKoiJrLVdw64Nuefp81bflRlWYkq/CUKVPhVVeZXuLJUwYoyWYEKhasqZQcqZQcDsoOB6qItKEcoLIVC0WItbMlhWXKGbTnCtpxWRC4rImc4eu2yJJcVXeHmsiS3JSWmZarn4tfrLdb2m/Z8kxVru+PLzlC4qKjec9QFD2in4khJ9Lx01aWc5XbI8jpluZ2yPW5FvC5FPF7J55V8PjkTEuVMTK4+L12qfCnpSkjPVnJallIy2yk9u53SM/PkS0zabcY9WVF32B9GNcr3sa9ivVxrKSvqALRMFH8AAACQx+vTUY+8qC/POUUdCqRNN/xVm+Z1V94B3UxHA4C4EMvFgUS+horV8qVGLH9/sbwqLNbzNUW28tLt2la4Qf7CjSot2ayK7SUK+IvqPJykAgEpGJIjGKw+lKQlZ8hWamqOjnt80e7PATdunKrOmyi7pKT23G/NtR7Mt1+WwsXF9RZrgU7ttM0uke2ULJdkOSXb6ZDtkmyXQ3b1/YjLIdvlVGSH23I5FXE5FXG5o7fdLsntllxuOd1uyeORw+WR0+OV0+uT0+ORy5sgpzdRGdl56pSbu/sVdbm56jn/hab+qnYp1lfU1aBcA9BaUfwBAABAkpTbobPa/eMebbvkKuUVSp9cNEwnPr9UiclppqMBQIsWy8UB+chnUn2rwjLGjm7UgtIKh1VVWa5QsELhYJVCwYBCVRUKBasUDgcVDgZlhQMKVVXJsgLq1e9ElT79zK4PB3n2MP3v9TnRByJ29HHbrt4zUr05stPjilRvr75fe23XbN95/8gvnldzffyZl6vshRd3mS1x8IladPcEKRiSMxiSM2TVnufNHY5Un+ctIk9I8oQlbyh6cdu1L6fU6sve8nVN2aMVa+6cbIVKShRyS2FX9eEkq6+jRZtDljtaslkuR7RscztluxyKuF2y3U5F3G7J7VbE45bD44kWal6vnN4EOX0+uXxJcvsS5UlMkTsxWd6kVGXktJd7D4q1Xk+bK9bqXVEXrDJeYsX6ijoAaM0o/gAAAFCr52/+oDeuWq7EKbN08I+2XrrwBJ391MemYwFAi1XfeaRShp2h/744fbevUfuD/71RUxLU4/gzL1fZ8wt3nW/o6Xrj+X/u8LI7vO4Ot2vLjegr7GL/miJjFzl3fL3qfQePuEHlC1/aZb7k00/Rq3NvlexdvM6OH9be8Xvc8b12leHn245dbB9ywe2qeOmVXeZLOvn3WvTw/0mRiCKKyBGJ7FDsRKrfL6JIpPo9diiFovers0Zq3jcixy9u/1wI1ewbqf14J//fw6pc8t9d5ks47hi9cucFctQ8/xev5YjU3ql+XHLo59ev2d+xw26KRHZ6PceOX/kOz0/OzNUJj9SzKuyCC/TWWccqULRVDltyRqKZnLbkiKj2tjOi6sd3fd9pS8463+lnzuqLR5IrI0O+N4ZqbT2Hg9z/wX83++EgXRkZShg3WevqydZ/RVhWybZ9eg/bIQU81ed4c0fP8xZyOxT27OJwkl6v5PVKCQlq066jOuXk1HsOuLZPzVVCQpJc7ub/8WSsF2staUWdZVkqDASVm+6MmZXEANDaUfwBAABgJyeMuFbPr16hQ576WL0+K9P8q07WOfcsMh0LQCs0b948zZw5UwUFBerWrZtuvPFG9e7d23SsveLyJtR7Hqkuc941ch4pV0aGEsbeWm95kD//I2P5Eq9oq/X15Ov18nfG8iXd0EEb6snX770CY/mSO3XWxrkX1Pl4ydx56nLBBeq3xmPmPGZdkxUuLNz9qrDiYu0fTFNg05Ymz2NL1YdujJZeCe1z6z8c5LZtqjooT/5VO39/EYcadr/mxi72a9Olg8IlJbvNZvn92nr8Idr44xfV5ZxXDp9PDl+CnAmJciWlyJ2UIl9KG3lT0pXUJlvJbXKUltVebbLzlJya2aBCbk/OAZecYu6oEi2hWGtJK+o2btyo3Nxc0zEAANUo/gAAAPArw26arafXH6O+72xVz1fW6KUD/qxTr7jXdCwArcjixYs1ZcoUTZo0SX369NGTTz6pcePG6dVXX1VWVpbpeHvM8m+vtzgoy2+v7asav3j5ZZnwS3tSHoS3bVNptw7a9v3P+XZaRbfDe+y8ve43r6vQ2NXrZXbpqPC2bbsvN7Zv17Y+HVW8atvOr7Pje+4UxVHnzV1lqDN79T7ZXTrtUb6iIw7S1lWfV7+uY+c8DikiR/QxR01exw6P6efv0rHj9+fY5fNr3iOva091qmf8WaWl2nJyP21Y9dnPn92xw4vteL/m9R01n8Ehxy9u13xGOaKfw+Fw7vAcR3Vsh+RwKC0zp/5VYdnZWj24u0qPypXD5ZbT5ZHT7ZLD5ZbD5ZHb45XDXbPdI7fHFz1nmtsjt8cjjydRLq9XLrdPbq9XHl+S3B6fPN7oxe1NkC8hWV5f3avO7FCo3lVrvec9X+f329Tqy+bKytLA2x8zkCyKYq1xcI46AMC+oPgDAABAnc5+8A0tHH6oDvk6qPaP/0fvHzxLA04eYzoWgFZi1qxZOvvsszVs2DBJ0qRJk/TWW2/p+eef1/jqHya3BK609HqLg/5PPGcgWdSeFBuHznrWQLKoesuN7Gwd8fC/DCSL2pN8R90310CyqHrzZWZq0N92f6jZplTvqrBwUL+75C4DyaKsYFXMHg4ylrPVoFgDAMAMij8AAADUyeV263ePv673hx2rAzZG5Jy7UIGjT4+ey8O/Xa60dFnBKs7lAaDRBYNBrVy5UhdddFHtNqfTqQEDBmjZsmV79VqWZTV2vL1i78EP5yPO+s481nTI1zDkaxhnQtJuV4VF3G6j/w3Hcr5YzrYjZ0KSIpKc6W2i1wlJMZELjafmz5M/VzQ3xh5MMjH+9ua9KP4AAACwS2kZuepy32Mqvf0u9Zo+S8VPzlXJvHkxebgmAPGjpKRElmX96pCeWVlZWrNmzV691vLlyxsz2l5r166d8nbzw/lNhYXavPk78pGvVearyZj7y1VhgarqbJuNZov1fLGcDa2P6X9v0Xox9mBSrI4/ij8AAADsVudeA1Q57WEVz5mrooceqt1u+/21Kwgyxo5m5R+AmNSrVy+5XC6jGSLSrw93F6hSxO1Wu3bt1K5dO/KRr9Xmq7HTqrDEJLVLTIqZbFJs5wtblooCQWWlROSKsWyIf5Zlafny5THx7y1aF8YeTDIx/mrec09Q/AEAAKBevoxsrZ03r87HSubMVfaECc2cCEA8y8jIkMvlUlFR0U7bi4qKlJ2dvVev5XK5YuKHQa4YP48U+RrGlZwqy7JUGAgqN90pT0ps/TJMrH9/aLiNGzcqNzc3Jv6+Q+sUK//eovVh7MGkWB1/5g7kDgAAgBbD8m+X7ffX+Zjt98sqLNT/Hvir3l80S1Y43MzpAMQbr9erHj16aOnSpbXbbNvW0qVL1a9fP4PJgN3buHGj6QgAAABo5VjxBwAAgHq50tLlTEurs/xzpqXJlZ6unHlvySpZoA9v/rs27e+TfUi+eg+bqPx+xzZ7XgAt35gxY3TdddepZ8+e6t27t5588klVVlZq6NChpqMBAAAAQMyi+AMAAEC9rGCVMkaNrD2n344yRo5Q+doftCp1u/bzSxllUsZXAemrL2U/N1FvZksFB6TI1+9wDfjT1crt0NnAJwDQ0gwZMkTFxcWaNm2aCgoK1L17dz322GN7fahPAAAAAGhNKP4AAABQL09yqrLHj5cUPaef7ffLmZamjFEjlT1+vJw+n05+baVKCjbovaenquKT95X543a13yrlFUp5hWXSp//Vlsf/q0/zHNp+YJYyB56kQcOvUGJymuFPByBWjRw5UiNHjjQdAwAAAABaDIo/AAAA7BGnz6eMsaOVPWGCrFK/XKlpsoJVcvp8tftk5HTQqZffU3t/zVcf6bPnHpCWL1e7n6qU5Zc6bYhIGwql957SV/98Suv3c6uyy37qNPgcHf67EXK5+V9UAAAAAACAfcFPVQAAALDHPMmpkiRnZlb02uPZ7f4HHXKEDrppdu39T96Yr9WL58r37U/ab31YyVVS19VhafWP0qt36oPUO6vPD9hNfYZdrPy+v22yzwIAAAAAABBvKP4AAADQbA474RwddsI5kqRAZYXee+EBFbyzWKlrCtRxo63MUilzZUBa+YXsZy/SGzlS4f6pSuh/pI4+9ypl5x1Y73u0b9++qT/GPgmVl8rlTZDl3y5XWrqsYFVtkQoAAAAAANAYKP4AAABghC8xSSeMuFYaca0kqXjLOr339N2q/PQDZf3oV4cCqX2B1L6gVPp0iTbNXKKP2ju0/aBsZQ04Scf88Sr5EpNqX6+mWMv2euSwbYWqKmKmWLMDAZXMnKWSufPqPD9iLKCYBAAAAACg5aP4AwAAQEzIbNtRp/35vtr7q5e/r2XPT5dWfKW8tVXK9EsHro9I6wukd+Zp+T/naX1HtzJPOllHX3hzzBZrofJSlcycpaIHH6rdZvv9Kpr+oCQpY+xo4wVbaygmTT8fAAAAAIDmQPEHAACAmNS51wB17jVAkmSFw/r0zfla8+rTSvhurfZbF1ZyQMr/Pqz9rvydih95VEUP1VGsRSJKGjJYLz94lSKWJYdlSbYlWbZkVV/bthx2zXVEDtuWw4rIEYlU34/scF9y2hE5IpLTqr62JYcdkTMiOS1Fr+3oJSE9S/1eel0lc+fV+RlL5sxV9gUX6LMTjlZge7Fsp2Q7VHsdqb121N6POCTb6VCk9r5DkZr7LsdO9+Wsue2Uqh+T06mIyylV3z75modV+dobdReTkYiSzzhFS56dKpcvUZ6EZLkTk+VNTJYnMUW+pDQlpqQrKaWNktIylJyWJa8vSS53404zGlpMmn4+AAAAAADNheIPAAAAMc/lduuIk0bqiJNGSoqeH/DdZ+9V4KsvlT9ggDb+5a91Pq9k7jxlX3CB+i8tkVVS0pyRJUm+vEyFi4pk+/11Pm77/QoXF6tNcpYCG4p380qReu7vG1dGhpL3P1Abd1VMVn9/3Z9btsvvLyKpvPpSIMmWFHZLYdfPF2uni0OWK1pe2q7qi9sp2+VQxBUtJSNulyJut+R2RYvJ/yzZZTGZdMoftHhm3X/+kjTkgjtU8dIrTfN8xcaKTQAAAAAAalD8AQAAoMXxJSbpxPOiZU2oqHC3xZq1fbuKjuqszd9/EV399ouLnM7obZdLcjqrV8Q5JKdLcrvkcLokV/TicLvlcFVf3B45XG45PV45XR45PR453R65vQlyVF+nZbRVp9xcOdPS6szoTEuTOydHJZf+Uf7iLbKtsKxQQHYoKCscViQclG2FZYdDilhW9XU4egmHJdtSxLIUsS3JshSx7dpVjdHbtmRHJLuulY0R5XXpoU7bt+/++9u2Tf5D9lPpNyVyWZLLktzVF5cleapXOdZ+JknecPRSt8gvrnfNlZGh5I6d6i0m+765oc5i0pWRoaQbOmhDUz1/zlxlT5hQ7+cAAAAAAKC5xETxN2/ePM2cOVMFBQXq1q2bbrzxRvXu3XuX+7/yyiu67777tGHDBnXq1EnXXHONjjnmmGZMDAAAgFjhSkvfbbHmys7WUffMMZAsKlReqoxRI2tXiO0oY9RIWaGADj1phIFkUXYotPvvLydHh818ZrevUVnuV7m/SBX+ElVW+FVZtl2B8u0KVpQqWFmucFWFQlXlsqqqZAUrZQeqZIeCilRfFAopEg7JEbakcFiOsCVH2Fb7zofUX0xu367iwzqp8Lvtv3o8u2snhbdta9rnl/rlzMza7fcDAAAAAEBzMV78LV68WFOmTNGkSZPUp08fPfnkkxo3bpxeffVVZWX9egL92Wef6eqrr9ZVV12l4447Ti+99JIuueQSvfDCC+ratauBTwAAAACTrGDV7ou1YJWcHo+BZFGe5FRljx8vKbpCLNbOEdcY319icpoSk9OkvAMbPV+9xWR2tn5z/1Pmnp+atoefBAAAAACApuc0HWDWrFk6++yzNWzYMB188MGaNGmSEhIS9Pzzz9e5/+zZszVo0CBdcMEF6ty5s/785z/rkEMO0dy5c5s5OQAAAGJBTbGWdcnFcqZFSxhnWpqyLrlY2ePHx8T515w+nzLGjlaX/72ng99/T13+954yxo42XvpJsf/91RSTdakpJmP5+QAAAAAANCejK/6CwaBWrlypiy66qHab0+nUgAEDtGzZsjqf8/nnn2v06NE7bRs4cKCWLFnSlFEBAAAQw2qKtewJE2SV+uVKTYuuVIuBYq1GTYFWc1hIk6sQfymWv7+Grpg0/XwAAAAAAJqT0eKvpKRElmX96pCeWVlZWrNmTZ3PKSwsVHZ29q/2Lyws3KP3jEQikqKlo8vl2ofUwN6xLEsSYw5mMP5gCmMPRnh8CoTDKq6sUmZiklwen4LBoOlULYfHp3AkIqWkRq9j6ftzOJR2/ihlXHihrLJSuVJSZQWqFHY4pD3J2EzPN/F3X8171sxz8LOa76TmOwKaWs1YY8yhuTH2YBLjD6Yw9mCSifG3N3M/4+f4a262bUuSvvrqK8NJ0Now5mAS4w+mMPZgyubNm01HQCtm4u++mnkOflbznSxfvtxwErQ2jDmYwtiDSYw/mMLYg0kmxt+ezP2MFn8ZGRlyuVwqKiraaXtRUdGvVvXVyM7O/tXqvt3t/0tut1u9evWS0+mUw+HYt+AAAAAAEAMikYhs25bb3ep+p7NezP0AAAAAxIu9mfsZnR16vV716NFDS5cu1Yknnigp2lYuXbpUI0eOrPM5ffv21QcffLDTef7ef/999e3bd4/e0+l0yuv1NjQ6AAAAACCGMfcDAAAA0Bo5TQcYM2aMnnnmGS1YsECrV6/WLbfcosrKSg0dOlSSdO2112rq1Km1+5933nl699139fjjj2v16tW6//77tWLFil0WhQAAAAAAAAAAAEBrYPx4MEOGDFFxcbGmTZumgoICde/eXY899ljtoTs3bdokp/PnfrJ///66++67de+99+qee+5Rp06dNH36dHXt2tXURwAAAAAAAAAAAACMc0QikYjpEAAAAAAAAAAAAAAaxvihPgEAAAAAAAAAAAA0HMUfAAAAAAAAAAAAEAco/gAAAAAAAAAAAIA4QPEHAAAAAAAAAAAAxAGKP6ARzJs3T8cff7x69eql4cOH68svv9zlvs8884zOPfdcHX744Tr88MM1evTo3e4P1Gdvxt+OFi1apPz8fF188cVNnBDxam/Hnt/v16RJkzRw4ED17NlTgwcP1ttvv91MaRFP9nbsPfHEExo8eLB69+6tY445RnfccYcCgUAzpUW8+PjjjzVhwgQNHDhQ+fn5WrJkSb3P+fDDD3XmmWeqZ8+e+t3vfqcXXnihGZICaErM/WAK8z6YxNwPpjD3gwnxMPej+AMaaPHixZoyZYouueQSLViwQN26ddO4ceNUVFRU5/4ffvihTj75ZM2ePVvz589XXl6exo4dqy1btjRzcsSDvR1/NdavX6+77rpLhx12WDMlRbzZ27EXDAY1ZswYbdiwQffdd59effVVTZ48WW3btm3m5Gjp9nbsvfTSS5o6daouvfRSLV68WLfffrsWL16se+65p5mTo6WrqKhQfn6+br755j3af926dbrooot05JFH6sUXX9T555+vv/3tb3r33XebOCmApsLcD6Yw74NJzP1gCnM/mBIPcz9HJBKJGHt3IA4MHz5cvXr10k033SRJsm1bxxxzjEaNGqXx48fX+3zLsnT44Yfrpptu0hlnnNHEaRFv9mX8WZalESNGaNiwYfr000/l9/v14IMPNmdsxIG9HXtPP/20Zs6cqVdeeUUej6e54yKO7O3Yu/XWW7V69Wo9+eSTtdvuvPNOffHFF3r66aebLTfiS35+vqZPn64TTzxxl/v84x//0Ntvv62XX365dtuVV14pv9+vmTNnNkdMAI2MuR9MYd4Hk5j7wRTmfogFLXXux4o/oAGCwaBWrlypAQMG1G5zOp0aMGCAli1btkevUVlZqXA4rPT09KaKiTi1r+Nv+vTpysrK0vDhw5sjJuLQvoy9N998U3379tWtt96qAQMG6JRTTtHDDz8sy7KaKzbiwL6MvX79+mnlypW1h4RZt26d3n77bR1zzDHNkhmt1+eff66jjjpqp20DBw7U559/biYQgAZh7gdTmPfBJOZ+MIW5H1qSWJz7uY29MxAHSkpKZFmWsrKydtqelZWlNWvW7NFr3H333crNzd3pHzJgT+zL+Pvkk0/03HPPaeHChc2QEPFqX8beunXr9MEHH+jUU0/VI488orVr12rSpEkKh8O69NJLmyM24sC+jL1TTz1VJSUlOvfccxWJRBQOh3XOOedowoQJzREZrVhhYaGys7N32padna2ysjJVVVUpISHBUDIA+4K5H0xh3geTmPvBFOZ+aElice7Hij/AoEceeUSLFy/WAw88IJ/PZzoO4lxZWZmuvfZaTZ48WZmZmabjoJWJRCLKysrS5MmT1bNnTw0ZMkQTJkzQ/PnzTUdDnPvwww81Y8YM3XzzzXrhhRf0wAMP6O2339b06dNNRwMAtCLM/dBcmPfBNOZ+MIW5H/AzVvwBDZCRkSGXy/Wrk8oWFRX9quX/pZkzZ+qRRx7RrFmz1K1bt6aMiTi1t+Nv3bp12rBhgyZOnFi7zbZtSdIhhxyiV199Vfvvv3/ThkZc2Je/+3JycuR2u+VyuWq3HXTQQSooKFAwGJTX623SzIgP+zL27rvvPp122mm1h7nKz89XRUWFbrrpJk2cOFFOJ78Hh6aRnZ2twsLCnbYVFhYqJSWF1X5AC8TcD6Yw74NJzP1gCnM/tCSxOPdjtAMN4PV61aNHDy1durR2m23bWrp0qfr167fL5z366KN68MEH9dhjj6lXr17NERVxaG/H30EHHaSXXnpJCxcurL0cf/zxOvLII7Vw4UK1a9euOeOjBduXv/v69++vtWvX1v7QQZJ+/PFH5eTkMPHDHtuXsVdVVfWrCV7NDyEikUjThUWr17dvX33wwQc7bXv//ffVt29fM4EANAhzP5jCvA8mMfeDKcz90JLE4tyPFX9AA40ZM0bXXXedevbsqd69e+vJJ59UZWWlhg4dKkm69tpr1bZtW1199dWSood4mTZtmqZOnaoOHTqooKBAkpSUlKTk5GRjnwMt096MP5/Pp65du+70/LS0NEn61XagPnv7d9+f/vQnzZ07V7fffrtGjhypn376STNmzNCoUaNMfgy0QHs79o477jjNmjVLhxxyiHr37q21a9fqvvvu03HHHbfTbyED9SkvL9fatWtr769fv15ff/210tPT1b59e02dOlVbtmzR3//+d0nSOeeco3nz5unvf/+7hg0bpg8++ECvvPKKZsyYYeojAGgg5n4whXkfTGLuB1OY+8GUeJj7UfwBDTRkyBAVFxdr2rRpKigoUPfu3fXYY4/VLjvftGnTTr9tMn/+fIVCIV1++eU7vc6ll16qyy67rFmzo+Xb2/EHNJa9HXt5eXmaOXOmpkyZotNOO01t27bVeeedpwsvvNDUR0ALtbdjb+LEiXI4HLr33nu1ZcsWZWZm6rjjjtOVV15p6iOghVqxYoXOO++82vtTpkyRJJ155pm68847VVBQoE2bNtU+3rFjR82YMUNTpkzR7Nmz1a5dO912220aNGhQs2cH0DiY+8EU5n0wibkfTGHuB1PiYe7niLDOFQAAAAAAAAAAAGjx+HUgAAAAAAAAAAAAIA5Q/AEAAAAAAAAAAABxgOIPAAAAAAAAAAAAiAMUfwAAAAAAAAAAAEAcoPgDAAAAAAAAAAAA4gDFHwAAAAAAAAAAABAHKP4AAAAAAAAAAACAOEDxBwAAAAAAAAAAAMQBij8AAOrxwgsv6LDDDjMdo8Hy8/O1ZMkS0zEAAAAAIOYw7wMAxAtHJBKJmA4BAEB9rr/+ei1YsECS5PF4lJeXp9NPP10TJkyQ2+1u0veuqqpSeXm5srKymvR9XnjhBf3lL3+RJDkcDmVnZ+uwww7Ttddeq/bt2+/x69x///1asmSJXnzxxZ22FxQUKD09XV6vt1FzAwAAAEBjYN7HvA8A0HCs+AMAtBiDBg3Se++9p//85z8aM2aMHnjgAc2cObPOfYPBYKO9b0JCQpNP/mqkpKTovffe0zvvvKNp06bphx9+0BVXXNEor52Tk8PkDwAAAEBMY97XMMz7AAAUfwCAFsPr9SonJ0cdOnTQueeeqwEDBujNN9+UFP3N0IsvvlgPPfSQBg4cqN///veS6j7MyWGHHaYXXnhBkrR+/Xrl5+frtdde06hRo9SnTx+ddtppWrZsWe3+vzzky/3336/TTz9dCxcu1PHHH69DDz1UV155pcrKymr3KSsr09VXX62+fftq4MCBeuKJJzRq1Cjdfvvtu/2MDodDOTk5ys3NVf/+/XXWWWfpyy+/3Om1//GPf2jw4MHq06ePTjjhBN17770KhUK1WR944AF98803ys/PV35+fu1n/eV38e233+q8885T7969deSRR+rGG29UeXn5nv+BAAAAAEAjY94XxbwPALCvKP4AAC2Wz+ernfhI0tKlS/XDDz9o1qxZmjFjxl691j//+U+NGzdOCxcuVKdOnXT11VcrHA7vcv+1a9fqjTfe0MMPP6wZM2bo448/1qOPPlr7+J133qlly5bpoYce0uOPP65PPvlEK1eu3KtMRUVFev311+VyueR0/vxPdnJysqZMmaJFixbphhtu0LPPPqsnnnhCkjRkyBCNHTtWXbp00Xvvvaf33ntPQ4YM+dVrV1RUaNy4cUpPT9dzzz2ne++9V++//74mT568VxkBAAAAoCkx72PeBwDYO017cGwAAJpAJBLR0qVL9d5772nkyJG125OSknTbbbft02FNxo4dq2OPPVaSdPnll+vkk0/WTz/9pM6dO+8yw5QpU5SSkiJJOu2007R06dLa3wBduHCh7r77bh111FGSpClTpmjQoEH15igtLVW/fv0UiURUWVkpSRo1apSSkpJq97n44otrb++333764YcftGjRIl144YVKSEhQUlKSXC6XcnJydvk+L7/8soLBoO66667a177ppps0YcIEXXPNNcrOzq43KwAAAAA0FeZ9zPsAAPuG4g8A0GK89dZb6tevn0KhkCKRiE455RRddtlltY937dp1n89lkJ+fX3u7ZuJUXFy8ywlghw4daid/kpSbm6uioiJJ0cPIhEIh9e7du/bx1NRUHXjggfXmSE5O1oIFCxQOh/XOO+/opZde0pVXXrnTPosXL9bs2bO1bt06VVRUKBwO75RlT6xevVr5+fk7TSz79+8v27b1ww8/MAEEAAAAYATzvijmfQCAfUXxBwBoMY488kjdcsst8ng8ys3Nldu98z9jiYmJv3qOw+FQJBLZaVtdh3LxeDw7PUeSbNveZZZfvrekX73PvnA6nTrggAMkSZ07d9batWt1yy236B//+IckadmyZbrmmmt02WWXaeDAgUpNTdWiRYs0a9asBr83AAAAAJjGvI95HwCgYTjHHwCgxUhMTNQBBxyg9u3b1zkBq0tmZqa2bt1ae//HH3+sPZRKU9lvv/3k8Xi0fPny2m2lpaX68ccf9/q1xo8fr1deeaX2PBHLli1T+/btNXHiRPXq1UudOnXSxo0bd3qOx+PZ7eRVik4uv/32W1VUVNRu++yzz+R0OvfoN1QBAAAAoCkw72PeBwBoGIo/AEBc+81vfqN58+bpq6++0vLly3XzzTfv9FueTSElJUVnnHGG/v73v+uDDz7QqlWrdMMNN8jhcNT+VumeysvL04knnqhp06ZJkg444ABt2rRJixYt0tq1azV79mwtWbJkp+d06NBB69ev19dff63i4mIFg8Ffve6pp54qr9er66+/Xt99950++OADTZ48WaeffjqHewEAAADQojDvY94HAPgZxR8AIK5dd911ysvL04gRI3TNNddo7NixSkhIaPL3vf7669W3b19NmDBBY8aMUf/+/dW5c2f5fL69fq3Ro0frrbfe0pdffqkTTjhB559/vm699VadfvrpWrZsmSZOnLjT/oMHD9agQYN03nnn6aijjtLLL7/8q9dMTEzUzJkztW3bNp111lm64oordNRRR+nGG2/c588MAAAAACYw72PeBwD4mSPSGAemBgAAu1VRUaHf/va3uu666zR8+HDTcQAAAAAAjYx5HwAgFuzZgbIBAMBe+eqrr7RmzRr17t1bpaWlmj59uiTphBNOMJwMAAAAANAYmPcBAGIRxR8AAE3k8ccf1w8//CCPx6MePXpo3rx5yszMNB0LAAAAANBImPcBAGINh/oEAAAAAAAAAAAA4oDTdAAAAAAAAAAAAAAADUfxBwAAAAAAAAAAAMQBij8AAAAAAAAAAAAgDlD8AQAAAAAAAAAAAHGA4g8AAAAAAAAAAACIAxR/AAAAAAAAAAAAQByg+AMAAAAAAAAAAADiAMUfAAAAAAAAAAAAEAco/gAAAAAAAAAAAIA48P/yZPLnjxAPwgAAAABJRU5ErkJggg==\n" + }, + "metadata": {} + } + ] + } + ] +} \ No newline at end of file From 417e4b199f05730ae1a402d4ac84b9e99485d76e Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 20 Aug 2025 10:31:18 +0530 Subject: [PATCH 022/115] Delete Model_Pruning.ipynb --- Model_Pruning.ipynb | 4273 ------------------------------------------- 1 file changed, 4273 deletions(-) delete mode 100644 Model_Pruning.ipynb diff --git a/Model_Pruning.ipynb b/Model_Pruning.ipynb deleted file mode 100644 index e4c7ade6eca3..000000000000 --- a/Model_Pruning.ipynb +++ /dev/null @@ -1,4273 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [], - "gpuType": "T4", - "mount_file_id": "1vgIMup-BMpDekluxQuvlxC2Eb7TgG0ZU", - "authorship_tag": "ABX9TyOtiH25/guWiSeu/gW4r4Wh", - "include_colab_link": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU" - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "view-in-github", - "colab_type": "text" - }, - "source": [ - "\"Open" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "ShWb1fdNpcdg", - "outputId": "98484742-d8bc-4b57-a283-9d98a783aafb" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Requirement already satisfied: pre-commit in /usr/local/lib/python3.11/dist-packages (4.3.0)\n", - "Requirement already satisfied: cfgv>=2.0.0 in /usr/local/lib/python3.11/dist-packages (from pre-commit) (3.4.0)\n", - "Requirement already satisfied: identify>=1.0.0 in /usr/local/lib/python3.11/dist-packages (from pre-commit) (2.6.13)\n", - "Requirement already satisfied: nodeenv>=0.11.1 in /usr/local/lib/python3.11/dist-packages (from pre-commit) (1.9.1)\n", - "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.11/dist-packages (from pre-commit) (6.0.2)\n", - "Requirement already satisfied: virtualenv>=20.10.0 in /usr/local/lib/python3.11/dist-packages (from pre-commit) (20.34.0)\n", - "Requirement already satisfied: distlib<1,>=0.3.7 in /usr/local/lib/python3.11/dist-packages (from virtualenv>=20.10.0->pre-commit) (0.4.0)\n", - "Requirement already satisfied: filelock<4,>=3.12.2 in /usr/local/lib/python3.11/dist-packages (from virtualenv>=20.10.0->pre-commit) (3.18.0)\n", - "Requirement already satisfied: platformdirs<5,>=3.9.1 in /usr/local/lib/python3.11/dist-packages (from virtualenv>=20.10.0->pre-commit) (4.3.8)\n", - "/content\n", - "rm: cannot remove 'keras_hub_repo/': No such file or directory\n", - "Cloning into 'keras_repo'...\n", - "remote: Enumerating objects: 97462, done.\u001b[K\n", - "remote: Counting objects: 100% (354/354), done.\u001b[K\n", - "remote: Compressing objects: 100% (210/210), done.\u001b[K\n", - "remote: Total 97462 (delta 274), reused 145 (delta 144), pack-reused 97108 (from 3)\u001b[K\n", - "Receiving objects: 100% (97462/97462), 46.82 MiB | 15.16 MiB/s, done.\n", - "Resolving deltas: 100% (76881/76881), done.\n", - "/content/keras_repo\n", - "Branch 'model-pruning' set up to track remote branch 'model-pruning' from 'origin'.\n", - "Switched to a new branch 'model-pruning'\n", - "Generating api directory with public APIs...\n", - "2025-08-18 04:52:58.601821: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", - "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", - "E0000 00:00:1755492778.622187 4059 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", - "E0000 00:00:1755492778.628272 4059 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", - "W0000 00:00:1755492778.644040 4059 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", - "W0000 00:00:1755492778.644069 4059 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", - "W0000 00:00:1755492778.644073 4059 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", - "W0000 00:00:1755492778.644081 4059 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", - "2025-08-18 04:52:58.648784: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", - "To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "Formatting api directory...\n", - "Obtaining file:///content/keras_repo\n", - " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", - " Checking if build backend supports build_editable ... \u001b[?25l\u001b[?25hdone\n", - " Getting requirements to build editable ... \u001b[?25l\u001b[?25hdone\n", - " Preparing editable metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: absl-py in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (1.4.0)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (2.0.2)\n", - "Requirement already satisfied: rich in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (13.9.4)\n", - "Requirement already satisfied: namex in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.1.0)\n", - "Requirement already satisfied: h5py in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (3.14.0)\n", - "Requirement already satisfied: optree in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.17.0)\n", - "Requirement already satisfied: ml-dtypes in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.5.3)\n", - "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (25.0)\n", - "Requirement already satisfied: typing-extensions>=4.6.0 in /usr/local/lib/python3.11/dist-packages (from optree->keras==3.11.0) (4.14.1)\n", - "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.11/dist-packages (from rich->keras==3.11.0) (4.0.0)\n", - "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.11/dist-packages (from rich->keras==3.11.0) (2.19.2)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.11/dist-packages (from markdown-it-py>=2.2.0->rich->keras==3.11.0) (0.1.2)\n", - "Building wheels for collected packages: keras\n", - " Building editable for keras (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for keras: filename=keras-3.11.0-0.editable-py3-none-any.whl size=9410 sha256=068d660e1dba2c4f1e4d66ff6ae58dc8965d0c57ba6f4748b8a4955fbd1f19b2\n", - " Stored in directory: /tmp/pip-ephem-wheel-cache-zet2_67e/wheels/09/7a/d4/6dbe98c57884e68eba731115af18ec3a7f493640582bacb80f\n", - "Successfully built keras\n", - "Installing collected packages: keras\n", - " Attempting uninstall: keras\n", - " Found existing installation: keras 3.11.0\n", - " Uninstalling keras-3.11.0:\n", - " Successfully uninstalled keras-3.11.0\n", - "Successfully installed keras-3.11.0\n" - ] - } - ], - "source": [ - "!pip install pre-commit\n", - "%cd /content/\n", - "%rm -r keras_repo/\n", - "%rm -r keras_hub_repo/\n", - "\n", - "!git clone https://github.com/pctablet505/keras.git keras_repo\n", - "%cd /content/keras_repo\n", - "!git checkout model-pruning\n", - "!bash /content/keras_repo/shell/api_gen.sh\n", - "%pip install -e .\n", - "\n", - "# %cd /content\n", - "# !git clone https://github.com/pctablet505/keras-hub.git keras_hub_repo\n", - "# %cd /content/keras_hub_repo\n", - "# !git checkout model-pruning\n", - "# %pip install -e .\n", - "# %cd /content" - ] - }, - { - "cell_type": "code", - "source": [ - "%cd /content" - ], - "metadata": { - "id": "hLHxr-Z8qI7X", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "a7e8816a-fda7-4afc-8335-6b494a537c6b" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "/content\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [], - "metadata": { - "id": "B6MlZUaxvSY8" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "f9996cef" - }, - "source": [ - "# Task\n", - "Clone the branch \"https://github.com/keras-team/keras/compare/master...pctablet505:keras:model-pruning\", install it with pip, and then write code to create a classifier on top of a ResNet50 backbone using Keras 3, and evaluate it on the CIFAR-100 dataset." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "c0cea87b" - }, - "source": [ - "## Load and preprocess data\n", - "\n", - "### Subtask:\n", - "Load the CIFAR-100 dataset and preprocess it for use with the ResNet50 model.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "aaeda69c" - }, - "source": [ - "**Reasoning**:\n", - "The subtask requires loading and preprocessing the CIFAR-100 dataset using Keras. This involves importing the dataset, loading the data, normalizing the images, and one-hot encoding the labels. These steps can be combined into a single code block.\n", - "\n" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "bea81d25", - "outputId": "29593519-defb-4ff6-fe65-0b3053ae9753" - }, - "source": [ - "from keras.datasets import cifar100\n", - "from keras.utils import to_categorical\n", - "\n", - "# Load the CIFAR-100 dataset\n", - "(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n", - "\n", - "# Normalize the image data\n", - "x_train = x_train.astype('float32') / 255.0\n", - "x_test = x_test.astype('float32') / 255.0\n", - "\n", - "# Convert labels to one-hot encoding\n", - "num_classes = 100\n", - "y_train = to_categorical(y_train, num_classes)\n", - "y_test = to_categorical(y_test, num_classes)\n", - "\n", - "print(\"CIFAR-100 dataset loaded and preprocessed.\")\n", - "print(f\"Training data shape: {x_train.shape}\")\n", - "print(f\"Training labels shape: {y_train.shape}\")\n", - "print(f\"Testing data shape: {x_test.shape}\")\n", - "print(f\"Testing labels shape: {y_test.shape}\")" - ], - "execution_count": 1, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "CIFAR-100 dataset loaded and preprocessed.\n", - "Training data shape: (50000, 32, 32, 3)\n", - "Training labels shape: (50000, 100)\n", - "Testing data shape: (10000, 32, 32, 3)\n", - "Testing labels shape: (10000, 100)\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4177d50b" - }, - "source": [ - "## Load the resnet50 model\n", - "\n", - "### Subtask:\n", - "Load the pre-trained ResNet50 model without the top classification layer.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "989069a2" - }, - "source": [ - "**Reasoning**:\n", - "Import the ResNet50 model and load it with pre-trained weights and without the top classification layer.\n", - "\n" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "72c1f0de", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "1007e7f4-3d37-4410-8083-d4c937ee9096" - }, - "source": [ - "from keras.applications import ResNet50\n", - "\n", - "# Load the ResNet50 model\n", - "base_model = ResNet50(weights='imagenet', include_top=False, input_shape= (128, 128, 3))\n", - "base_model.trainable = True\n", - "\n", - "print(\"ResNet50 base model loaded.\")\n", - "print(f\"Base model output shape: {base_model.output_shape}\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "ResNet50 base model loaded.\n", - "Base model output shape: (None, 4, 4, 2048)\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "d2bd7ce8" - }, - "source": [ - "## Build the classifier model\n", - "\n", - "### Subtask:\n", - "Add a global average pooling layer and a dense classification layer on top of the ResNet50 backbone.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5349d77c" - }, - "source": [ - "**Reasoning**:\n", - "Add the classification layers on top of the ResNet50 base model and create the final model.\n", - "\n" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "e67685d6", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 397 - }, - "outputId": "2e7fbc5d-9dd7-4017-cf29-84575b238080" - }, - "source": [ - "from keras.layers import GlobalAveragePooling2D, Dense, Input, UpSampling2D, Conv2D, BatchNormalization\n", - "from keras.models import Model\n", - "\n", - "# Get the output tensor from the base_model\n", - "inputs = Input(shape=(32, 32, 3))\n", - "x = UpSampling2D(size=(4,4))(inputs)\n", - "x = base_model(x)\n", - "\n", - "# x = Conv2D(filters=256, kernel_size=(3,3), strides=(2,2))(x)\n", - "\n", - "# Add a GlobalAveragePooling2D layer\n", - "x = GlobalAveragePooling2D()(x)\n", - "x = Dense(256, activation='relu')(x)\n", - "x = BatchNormalization()(x)\n", - "\n", - "predictions = Dense(num_classes, activation='softmax')(x)\n", - "\n", - "# Create the final model\n", - "model = Model(inputs=inputs, outputs=predictions)\n", - "\n", - "# Print the model summary\n", - "model.summary()" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "\u001b[1mModel: \"functional\"\u001b[0m\n" - ], - "text/html": [ - "
Model: \"functional\"\n",
-              "
\n" - ] - }, - "metadata": {} - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n", - "┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n", - "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n", - "│ input_layer_1 (\u001b[38;5;33mInputLayer\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m, \u001b[38;5;34m32\u001b[0m, \u001b[38;5;34m3\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n", - "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", - "│ up_sampling2d (\u001b[38;5;33mUpSampling2D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m128\u001b[0m, \u001b[38;5;34m128\u001b[0m, \u001b[38;5;34m3\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n", - "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", - "│ resnet50 (\u001b[38;5;33mFunctional\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m4\u001b[0m, \u001b[38;5;34m4\u001b[0m, \u001b[38;5;34m2048\u001b[0m) │ \u001b[38;5;34m23,587,712\u001b[0m │\n", - "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", - "│ global_average_pooling2d │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m2048\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n", - "│ (\u001b[38;5;33mGlobalAveragePooling2D\u001b[0m) │ │ │\n", - "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", - "│ dense (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m256\u001b[0m) │ \u001b[38;5;34m524,544\u001b[0m │\n", - "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", - "│ batch_normalization │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m256\u001b[0m) │ \u001b[38;5;34m1,024\u001b[0m │\n", - "│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n", - "├─────────────────────────────────┼────────────────────────┼───────────────┤\n", - "│ dense_1 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m100\u001b[0m) │ \u001b[38;5;34m25,700\u001b[0m │\n", - "└─────────────────────────────────┴────────────────────────┴───────────────┘\n" - ], - "text/html": [ - "
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
-              "┃ Layer (type)                     Output Shape                  Param # ┃\n",
-              "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
-              "│ input_layer_1 (InputLayer)      │ (None, 32, 32, 3)      │             0 │\n",
-              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
-              "│ up_sampling2d (UpSampling2D)    │ (None, 128, 128, 3)    │             0 │\n",
-              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
-              "│ resnet50 (Functional)           │ (None, 4, 4, 2048)     │    23,587,712 │\n",
-              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
-              "│ global_average_pooling2d        │ (None, 2048)           │             0 │\n",
-              "│ (GlobalAveragePooling2D)        │                        │               │\n",
-              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
-              "│ dense (Dense)                   │ (None, 256)            │       524,544 │\n",
-              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
-              "│ batch_normalization             │ (None, 256)            │         1,024 │\n",
-              "│ (BatchNormalization)            │                        │               │\n",
-              "├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
-              "│ dense_1 (Dense)                 │ (None, 100)            │        25,700 │\n",
-              "└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
-              "
\n" - ] - }, - "metadata": {} - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m24,138,980\u001b[0m (92.08 MB)\n" - ], - "text/html": [ - "
 Total params: 24,138,980 (92.08 MB)\n",
-              "
\n" - ] - }, - "metadata": {} - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m24,085,348\u001b[0m (91.88 MB)\n" - ], - "text/html": [ - "
 Trainable params: 24,085,348 (91.88 MB)\n",
-              "
\n" - ] - }, - "metadata": {} - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m53,632\u001b[0m (209.50 KB)\n" - ], - "text/html": [ - "
 Non-trainable params: 53,632 (209.50 KB)\n",
-              "
\n" - ] - }, - "metadata": {} - } - ] - }, - { - "cell_type": "code", - "source": [ - "# base_model.summary()\n", - "model.load_weights(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")" - ], - "metadata": { - "id": "vpEcuJfN2Hqv", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 426 - }, - "outputId": "a62863e5-1f96-439b-b7bf-5fc52fdd8af4" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "error", - "ename": "ValueError", - "evalue": "A total of 1 objects could not be loaded. Example error message for object :\n\nLayer 'batch_normalization' expected 4 variables, but received 0 variables during loading. Expected: ['gamma', 'beta', 'moving_mean', 'moving_variance']\n\nList of objects that could not be loaded:\n[]", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipython-input-3650302260.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# base_model.summary()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_weights\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0;31m# To get the full stack trace, call:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 121\u001b[0m \u001b[0;31m# `keras.config.disable_traceback_filtering()`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 122\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwith_traceback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfiltered_tb\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 123\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 124\u001b[0m \u001b[0;32mdel\u001b[0m \u001b[0mfiltered_tb\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/saving/saving_lib.py\u001b[0m in \u001b[0;36m_raise_loading_failure\u001b[0;34m(error_msgs, warn_only)\u001b[0m\n\u001b[1;32m 643\u001b[0m \u001b[0mwarnings\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwarn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 644\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 645\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 646\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 647\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mValueError\u001b[0m: A total of 1 objects could not be loaded. Example error message for object :\n\nLayer 'batch_normalization' expected 4 variables, but received 0 variables during loading. Expected: ['gamma', 'beta', 'moving_mean', 'moving_variance']\n\nList of objects that could not be loaded:\n[]" - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "93a35201" - }, - "source": [ - "## Compile the model\n", - "\n", - "### Subtask:\n", - "Compile the model with an appropriate optimizer, loss function, and metrics.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "12a5076e" - }, - "source": [ - "**Reasoning**:\n", - "Compile the model with the specified optimizer, loss function, and metrics.\n", - "\n" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "7597426d", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "d44e2010-71e3-4bcf-8037-89f2e9d61a23" - }, - "source": [ - "from keras.optimizers import Adam, SGD\n", - "from keras.losses import CategoricalCrossentropy\n", - "from keras.metrics import CategoricalAccuracy\n", - "\n", - "# Choose and instantiate the optimizer\n", - "optimizer = Adam(learning_rate=1e-4, )\n", - "\n", - "# Choose and instantiate the loss function\n", - "loss_fn = CategoricalCrossentropy()\n", - "\n", - "# Choose and instantiate the metric\n", - "metrics = [CategoricalAccuracy()]\n", - "\n", - "# Compile the model\n", - "model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics)\n", - "\n", - "print(\"Model compiled successfully.\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Model compiled successfully.\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "826ac27b" - }, - "source": [ - "## Train the model\n", - "\n", - "### Subtask:\n", - "Train the model on the preprocessed CIFAR-100 training data.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ded6c771" - }, - "source": [ - "**Reasoning**:\n", - "Train the compiled model using the fit method on the training data and validate on the test data.\n", - "\n" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "eafc6cc5", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "9c88309c-216b-433f-a7c2-033e55049e7d" - }, - "source": [ - "from keras.callbacks import EarlyStopping\n", - "\n", - "epochs = 200\n", - "# Add early stopping\n", - "early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)\n", - "\n", - "history = model.fit(x_train, y_train, epochs=epochs, validation_data=(x_test, y_test),batch_size=800, callbacks=[early_stopping])\n", - "\n", - "print(f\"Model trained for {epochs} epochs.\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Epoch 1/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m154s\u001b[0m 1s/step - categorical_accuracy: 0.3758 - loss: 2.7659 - val_categorical_accuracy: 0.0099 - val_loss: 4.7074\n", - "Epoch 2/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.7463 - loss: 0.9840 - val_categorical_accuracy: 0.0103 - val_loss: 4.8237\n", - "Epoch 3/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9000 - loss: 0.4333 - val_categorical_accuracy: 0.0100 - val_loss: 5.0798\n", - "Epoch 4/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9775 - loss: 0.1587 - val_categorical_accuracy: 0.0100 - val_loss: 5.1278\n", - "Epoch 5/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9965 - loss: 0.0558 - val_categorical_accuracy: 0.0117 - val_loss: 5.3401\n", - "Epoch 6/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9991 - loss: 0.0250 - val_categorical_accuracy: 0.0146 - val_loss: 5.3616\n", - "Epoch 7/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9996 - loss: 0.0147 - val_categorical_accuracy: 0.0219 - val_loss: 5.2611\n", - "Epoch 8/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9997 - loss: 0.0103 - val_categorical_accuracy: 0.0384 - val_loss: 5.1634\n", - "Epoch 9/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9998 - loss: 0.0077 - val_categorical_accuracy: 0.0699 - val_loss: 4.9677\n", - "Epoch 10/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0061 - val_categorical_accuracy: 0.1234 - val_loss: 4.6203\n", - "Epoch 11/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 254ms/step - categorical_accuracy: 0.9997 - loss: 0.0051 - val_categorical_accuracy: 0.2005 - val_loss: 4.0977\n", - "Epoch 12/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 254ms/step - categorical_accuracy: 0.9997 - loss: 0.0046 - val_categorical_accuracy: 0.3009 - val_loss: 3.4119\n", - "Epoch 13/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 254ms/step - categorical_accuracy: 0.9998 - loss: 0.0037 - val_categorical_accuracy: 0.4052 - val_loss: 2.8126\n", - "Epoch 14/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0033 - val_categorical_accuracy: 0.4918 - val_loss: 2.3466\n", - "Epoch 15/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0029 - val_categorical_accuracy: 0.5530 - val_loss: 2.0029\n", - "Epoch 16/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9998 - loss: 0.0026 - val_categorical_accuracy: 0.6045 - val_loss: 1.7305\n", - "Epoch 17/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0023 - val_categorical_accuracy: 0.6521 - val_loss: 1.5021\n", - "Epoch 18/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0021 - val_categorical_accuracy: 0.6901 - val_loss: 1.3324\n", - "Epoch 19/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9998 - loss: 0.0020 - val_categorical_accuracy: 0.7139 - val_loss: 1.2191\n", - "Epoch 20/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0019 - val_categorical_accuracy: 0.7291 - val_loss: 1.1580\n", - "Epoch 21/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9997 - loss: 0.0017 - val_categorical_accuracy: 0.7398 - val_loss: 1.1306\n", - "Epoch 22/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 255ms/step - categorical_accuracy: 0.9998 - loss: 0.0015 - val_categorical_accuracy: 0.7418 - val_loss: 1.1207\n", - "Epoch 23/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9997 - loss: 0.0015 - val_categorical_accuracy: 0.7438 - val_loss: 1.1222\n", - "Epoch 24/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9998 - loss: 0.0014 - val_categorical_accuracy: 0.7431 - val_loss: 1.1229\n", - "Epoch 25/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9997 - loss: 0.0014 - val_categorical_accuracy: 0.7444 - val_loss: 1.1300\n", - "Epoch 26/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9998 - loss: 0.0013 - val_categorical_accuracy: 0.7444 - val_loss: 1.1340\n", - "Epoch 27/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 253ms/step - categorical_accuracy: 0.9998 - loss: 0.0012 - val_categorical_accuracy: 0.7448 - val_loss: 1.1375\n", - "Epoch 28/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9997 - loss: 0.0012 - val_categorical_accuracy: 0.7449 - val_loss: 1.1414\n", - "Epoch 29/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9997 - loss: 0.0011 - val_categorical_accuracy: 0.7448 - val_loss: 1.1467\n", - "Epoch 30/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9998 - loss: 0.0011 - val_categorical_accuracy: 0.7429 - val_loss: 1.1510\n", - "Epoch 31/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9997 - loss: 9.7342e-04 - val_categorical_accuracy: 0.7432 - val_loss: 1.1534\n", - "Epoch 32/200\n", - "\u001b[1m63/63\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 252ms/step - categorical_accuracy: 0.9998 - loss: 9.3112e-04 - val_categorical_accuracy: 0.7444 - val_loss: 1.1565\n", - "Model trained for 200 epochs.\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "887aa522" - }, - "source": [ - "## Evaluate the model\n", - "\n", - "### Subtask:\n", - "Evaluate the trained model on the preprocessed CIFAR-100 test data." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "67d1be63" - }, - "source": [ - "**Reasoning**:\n", - "Evaluate the trained model on the test dataset using the `evaluate` method." - ] - }, - { - "cell_type": "code", - "source": [ - "model.metrics" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "4F6dwwMbo81N", - "outputId": "d67a2356-e7c8-4635-9c51-5937e10d7e42" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "[, ]" - ] - }, - "metadata": {}, - "execution_count": 17 - } - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "65a189cb", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "4b99be46-8f79-44cb-a3cc-320d4630a9d1" - }, - "source": [ - "# Evaluate the model on the test data\n", - "loss, accuracy = model.evaluate(x_test, y_test, verbose=0,)\n", - "\n", - "print(f\"Test Loss: {loss:.4f}\")\n", - "print(f\"Test Accuracy: {accuracy:.4f}\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Test Loss: 1.1207\n", - "Test Accuracy: 0.7420\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [], - "metadata": { - "id": "4mjtemwhrFXD" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5400d0b3" - }, - "source": [ - "# Task\n", - "Save the trained Keras model, load it, evaluate the loaded model, prune the loaded model, and evaluate the pruned model." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8e39308c" - }, - "source": [ - "## Save the model\n", - "\n", - "### Subtask:\n", - "Save the trained model to a file.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "f731890a" - }, - "source": [ - "**Reasoning**:\n", - "Save the trained Keras model to a file.\n", - "\n" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "d3a28c71", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "87dbf45c-9096-4a17-87ac-cd823f556110" - }, - "source": [ - "# model.save(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "# print(\"Model saved successfully.\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Model saved successfully.\n" - ] - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2f67026a" - }, - "source": [ - "## Load the model\n", - "\n", - "### Subtask:\n", - "Load the saved model from the file.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "665c2a00" - }, - "source": [ - "**Reasoning**:\n", - "Load the saved Keras model from the file using `load_model`.\n", - "\n" - ] - }, - { - "cell_type": "code", - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n" - ], - "metadata": { - "id": "RwAPp080l4DU", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 332 - }, - "outputId": "3cef2bef-aef8-449a-c65d-08a9b3e7c29a" - }, - "execution_count": 2, - "outputs": [ - { - "output_type": "error", - "ename": "ImportError", - "evalue": "cannot import name 'PruningConfig' from 'keras.pruning' (/content/keras_repo/keras/api/pruning/__init__.py)", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipython-input-1342103991.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodels\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mload_model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpruning\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mPruningConfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mLnPruning\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;31mImportError\u001b[0m: cannot import name 'PruningConfig' from 'keras.pruning' (/content/keras_repo/keras/api/pruning/__init__.py)", - "", - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n" - ], - "errorDetails": { - "actions": [ - { - "action": "open_url", - "actionText": "Open Examples", - "url": "/notebooks/snippets/importing_libraries.ipynb" - } - ] - } - } - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "ab89861a", - "outputId": "3ee300da-4574-4e03-d996-ed51b06b5718" - }, - "source": [ - "\n", - "# Load the saved model\n", - "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "print(\"Model loaded successfully.\")\n", - "\n", - "# Evaluate the loaded model on the test data\n", - "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", - "\n", - "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", - "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Model loaded successfully.\n", - "Loaded Model Test Loss: 1.5565\n", - "Loaded Model Test Accuracy: 0.5668\n" - ] - } - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "5b169cac", - "outputId": "6f5b283e-8617-4a52-e397-84e8a46ab127" - }, - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# config = PruningConfig(sparsity=0.1, method=\"l1\") # Old API\n", - "stats = loaded_model.prune(sparsity=0.1, method=\"l1\") # New API\n", - "\n", - "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", - "\n", - "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", - "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Loaded Model Test Loss: 3.6504\n", - "Loaded Model Test Accuracy: 0.1479\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# config = PruningConfig(sparsity=0.3, method=\"l1\") # Old API\n", - "stats = loaded_model.prune(sparsity=0.3, method=\"l1\") # New API\n", - "\n", - "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", - "\n", - "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", - "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "RBU4bMx4wU-Y", - "outputId": "f5301edc-c58a-4da7-827f-53cc86a9f39b" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Loaded Model Test Loss: 3.7006\n", - "Loaded Model Test Accuracy: 0.1382\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# config = PruningConfig(sparsity=0.5, method=\"l1\") # Old API\n", - "stats = loaded_model.prune(sparsity=0.5, method=\"l1\") # New API\n", - "\n", - "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", - "\n", - "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", - "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "kHbuTCBbw3en", - "outputId": "28462d57-4d31-4584-e547-87c4dd31df48" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Loaded Model Test Loss: 3.7443\n", - "Loaded Model Test Accuracy: 0.1329\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# config = PruningConfig(sparsity=0.7, method=LnPruning(n=4)) # Old API\n", - "stats = loaded_model.prune(sparsity=0.7, method=LnPruning(n=4)) # New API\n", - "\n", - "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", - "\n", - "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", - "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Amz1C_RexHC-", - "outputId": "313241c9-2b13-423b-ead2-8b48d205efe0" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Loaded Model Test Loss: 3.8623\n", - "Loaded Model Test Accuracy: 0.1145\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "loaded_model.loss" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "MvrldtC3k1Uk", - "outputId": "05ac513e-713f-4043-ee3c-ec4b9b5f75a3" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - ", kwargs={'from_logits': False, 'label_smoothing': 0.0, 'axis': -1})>" - ] - }, - "metadata": {}, - "execution_count": 12 - } - ] - }, - { - "cell_type": "code", - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# config = PruningConfig(sparsity=0.7, method=\"saliency\",dataset=(x_train, y_train),loss_fn=loaded_model.loss) # Old API\n", - "stats = loaded_model.prune(sparsity=0.7, method=\"saliency\", dataset=(x_train, y_train), loss_fn=loaded_model.loss) # New API\n", - "\n", - "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=1)\n", - "\n", - "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", - "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "0i2dhj-inUxX", - "outputId": "fa45e8df-977d-4cab-c69a-a4e4583212ec" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[1m313/313\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 37ms/step - categorical_accuracy: 0.1457 - loss: 3.6570\n", - "Loaded Model Test Loss: 3.6570\n", - "Loaded Model Test Accuracy: 0.1457\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# config = PruningConfig(sparsity=0.7, method=\"taylor\",dataset=(x_train, y_train),loss_fn=loaded_model.loss) # Old API\n", - "stats = loaded_model.prune(sparsity=0.7, method=\"taylor\", dataset=(x_train, y_train), loss_fn=loaded_model.loss) # New API\n", - "\n", - "loaded_loss, loaded_accuracy = loaded_model.evaluate(x_test, y_test, verbose=1)\n", - "\n", - "print(f\"Loaded Model Test Loss: {loaded_loss:.4f}\")\n", - "print(f\"Loaded Model Test Accuracy: {loaded_accuracy:.4f}\")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "nQa4k63uhC2M", - "outputId": "e19ae0e9-dab4-4168-d367-30b0ad853b7b" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[1m313/313\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m16s\u001b[0m 37ms/step - categorical_accuracy: 0.1484 - loss: 3.6519\n", - "Loaded Model Test Loss: 3.6519\n", - "Loaded Model Test Accuracy: 0.1484\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "stats" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "qUtS1rZmqv19", - "outputId": "f1459330-7833-45fb-afd8-a7004c555f93" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "{'initial_sparsity': 0.00020536343876534775,\n", - " 'final_sparsity': 0.699755017773933,\n", - " 'pruned_layers': 2,\n", - " 'target_sparsity': 0.7,\n", - " 'method': 'taylor'}" - ] - }, - "metadata": {}, - "execution_count": 15 - } - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "48fdb8de", - "outputId": "c5560c6e-f6c0-4343-9d2c-2dbd02c5c6f3" - }, - "source": [ - "with open(\"/content/PRUNING_DESIGN.md\", \"r\") as f:\n", - " print(f.read())" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "# Keras Model Pruning: Design Documentation\n", - "\n", - "## Table of Contents\n", - "- [Overview](#overview)\n", - "- [Architecture](#architecture)\n", - "- [Core Components](#core-components)\n", - "- [API Design](#api-design)\n", - "- [Pruning Methods](#pruning-methods)\n", - "- [Layer Selection System](#layer-selection-system)\n", - "- [Analysis and Verification Tools](#analysis-and-verification-tools)\n", - "- [Usage Examples](#usage-examples)\n", - "- [Implementation Details](#implementation-details)\n", - "- [Design Decisions](#design-decisions)\n", - "\n", - "## Overview\n", - "\n", - "The Keras Model Pruning system provides a comprehensive framework for reducing neural network model size by removing unnecessary weights while maintaining performance. The system supports multiple pruning algorithms, flexible layer selection, and extensive analysis tools.\n", - "\n", - "### Key Features\n", - "\n", - "- **Multiple Pruning Methods**: L1/L2 magnitude, structured, saliency-based, and Taylor expansion methods\n", - "- **Flexible Layer Selection**: Support for exact names, regex patterns, and mixed specifications\n", - "- **Direct Parameter API**: No configuration objects required - use parameters directly\n", - "- **Comprehensive Analysis**: Sparsity verification and performance benchmarking tools\n", - "- **Training Integration**: Callbacks for gradual pruning during training\n", - "- **Backend Agnostic**: Works with TensorFlow, JAX, and PyTorch backends\n", - "\n", - "## Architecture\n", - "\n", - "```\n", - "keras.src.pruning/\n", - "├── core.py # Core pruning logic and orchestration\n", - "├── pruning_method.py # Abstract base and concrete pruning methods\n", - "├── pruning_schedule.py # Scheduling for gradual pruning\n", - "├── pruning_utils.py # Analysis and verification utilities\n", - "├── config.py # Legacy configuration (deprecated)\n", - "└── __init__.py # Public API exports\n", - "\n", - "keras.src.callbacks/\n", - "└── pruning.py # Training callbacks for gradual pruning\n", - "\n", - "keras.src.models/\n", - "└── model.py # Model.prune() method integration\n", - "```\n", - "\n", - "### Component Relationships\n", - "\n", - "```mermaid\n", - "graph TD\n", - " A[Model.prune()] --> B[apply_pruning_to_model()]\n", - " B --> C[apply_pruning_to_layer()]\n", - " C --> D[PruningMethod.compute_mask()]\n", - " D --> E[Backend-specific implementation]\n", - " \n", - " F[PruningCallback] --> B\n", - " G[Analysis Tools] --> H[analyze_sparsity()]\n", - " G --> I[benchmark_inference()]\n", - " \n", - " J[Layer Selection] --> K[match_layers_by_patterns()]\n", - " K --> L[Regex Pattern Matching]\n", - "```\n", - "\n", - "## Core Components\n", - "\n", - "### 1. PruningMethod Base Class\n", - "\n", - "Abstract base class defining the pruning interface:\n", - "\n", - "```python\n", - "class PruningMethod(abc.ABC):\n", - " @abc.abstractmethod\n", - " def compute_mask(self, weights, sparsity_ratio, **kwargs):\n", - " \"\"\"Compute binary mask indicating which weights to prune.\"\"\"\n", - " pass\n", - " \n", - " def apply_mask(self, weights, mask):\n", - " \"\"\"Apply pruning mask to weights.\"\"\"\n", - " return weights * ops.cast(mask, weights.dtype)\n", - "```\n", - "\n", - "### 2. Core Pruning Functions\n", - "\n", - "#### `apply_pruning_to_model()`\n", - "Main orchestration function that applies pruning to selected layers:\n", - "\n", - "```python\n", - "def apply_pruning_to_model(model, sparsity, method=\"l1\", layers_to_prune=None, \n", - " dataset=None, loss_fn=None, **kwargs):\n", - " \"\"\"Apply pruning to specified layers in a model.\"\"\"\n", - "```\n", - "\n", - "#### `should_prune_layer()`\n", - "Determines if a layer should be pruned based on type and selection criteria:\n", - "\n", - "```python\n", - "def should_prune_layer(layer, layers_to_prune=None):\n", - " \"\"\"Determine if layer should be pruned based on type and selection.\"\"\"\n", - "```\n", - "\n", - "### 3. Layer Selection System\n", - "\n", - "#### Pattern Matching\n", - "The system supports flexible layer selection:\n", - "\n", - "- **Exact Names**: `[\"dense_1\", \"conv2d_3\"]`\n", - "- **Regex Patterns**: `[\"dense_.*\", \"conv2d_[0-9]\"]`\n", - "- **Mixed Specifications**: `[\"dense_input\", \"conv.*\", \"dense_hidden_2\"]`\n", - "- **Single String**: `\"dense_hidden_.*\"`\n", - "\n", - "#### Implementation\n", - "```python\n", - "def match_layers_by_patterns(model, patterns):\n", - " \"\"\"Find layers matching given patterns using exact and regex matching.\"\"\"\n", - "```\n", - "\n", - "## API Design\n", - "\n", - "### New Direct Parameter API\n", - "\n", - "The modern API accepts parameters directly without requiring configuration objects:\n", - "\n", - "```python\n", - "# Model pruning\n", - "model.prune(\n", - " sparsity=0.5, # Target sparsity level\n", - " method=\"l1\", # Pruning method\n", - " layers_to_prune=[\"dense_.*\"], # Layer selection (optional)\n", - " dataset=(x, y), # For gradient methods (optional)\n", - " loss_fn=\"mse\" # Loss function (optional)\n", - ")\n", - "\n", - "# Training callbacks\n", - "callback = keras.callbacks.PruningCallback(\n", - " sparsity=0.7,\n", - " method=\"structured\", \n", - " layers_to_prune=[\"conv.*\"],\n", - " start_step=100,\n", - " end_step=500,\n", - " frequency=50\n", - ")\n", - "```\n", - "\n", - "### Legacy Configuration API (Deprecated)\n", - "\n", - "For backwards compatibility, the old config-based API is still supported:\n", - "\n", - "```python\n", - "config = PruningConfig(sparsity=0.5, method=\"l1\")\n", - "model.prune(config=config) # Deprecated - issues warning\n", - "```\n", - "\n", - "## Pruning Methods\n", - "\n", - "### 1. Magnitude-Based Methods\n", - "\n", - "#### L1 Pruning\n", - "Prunes weights with smallest absolute values:\n", - "- **Formula**: Sort by `|w|`, remove smallest\n", - "- **Use Case**: General purpose, fast, no data required\n", - "- **Structured Option**: Can prune entire channels/filters\n", - "\n", - "```python\n", - "model.prune(sparsity=0.5, method=\"l1\") # Unstructured\n", - "model.prune(sparsity=0.3, method=\"l1_structured\") # Structured\n", - "```\n", - "\n", - "#### Ln Pruning\n", - "Generalizes to any norm order:\n", - "- **Formula**: Sort by `|w|^n`, configurable norm order\n", - "- **Use Case**: Research, experimentation with different norms\n", - "\n", - "```python\n", - "model.prune(sparsity=0.4, method=\"l2\") # L2 norm\n", - "```\n", - "\n", - "### 2. Gradient-Based Methods\n", - "\n", - "#### Saliency Pruning (First-Order)\n", - "Uses gradients to estimate weight importance:\n", - "- **Formula**: `|∂L/∂w × w|`\n", - "- **Mathematical Basis**: First-order Taylor approximation of loss change\n", - "- **Requirements**: Model, dataset, loss function\n", - "- **Backend Support**: TensorFlow (optimized), JAX/PyTorch (planned)\n", - "\n", - "```python\n", - "model.prune(\n", - " sparsity=0.4,\n", - " method=\"saliency\",\n", - " dataset=(x_sample, y_sample),\n", - " loss_fn=\"categorical_crossentropy\"\n", - ")\n", - "```\n", - "\n", - "#### Taylor Pruning (Second-Order)\n", - "Uses second-order approximation for better accuracy:\n", - "- **Formula**: `|∂L/∂w × w| + 0.5 × |H_ii × w²|`\n", - "- **Mathematical Basis**: Second-order Taylor approximation\n", - "- **Hessian Approximation**: Uses `(∂L/∂w)²` for computational efficiency\n", - "- **Requirements**: Model, dataset, loss function\n", - "\n", - "```python\n", - "model.prune(\n", - " sparsity=0.3,\n", - " method=\"taylor\",\n", - " dataset=(x_sample, y_sample),\n", - " loss_fn=\"mse\"\n", - ")\n", - "```\n", - "\n", - "### 3. Structured Methods\n", - "\n", - "#### Structured Pruning\n", - "Removes entire channels/filters based on L2 norms:\n", - "- **Formula**: Sort channels by `√(Σw²)`, remove smallest\n", - "- **Advantage**: Reduces model size and computation\n", - "- **Use Case**: Deployment optimization, hardware acceleration\n", - "\n", - "```python\n", - "model.prune(sparsity=0.4, method=\"structured\")\n", - "```\n", - "\n", - "### 4. Research Methods\n", - "\n", - "#### Random Pruning\n", - "Randomly selects weights to prune:\n", - "- **Use Case**: Baseline for research comparisons\n", - "- **Formula**: Random selection with specified sparsity\n", - "\n", - "```python\n", - "model.prune(sparsity=0.5, method=\"random\", seed=42)\n", - "```\n", - "\n", - "## Layer Selection System\n", - "\n", - "### Selection Criteria\n", - "\n", - "The system allows fine-grained control over which layers to prune:\n", - "\n", - "#### 1. All Eligible Layers (Default)\n", - "```python\n", - "model.prune(sparsity=0.5) # Prunes all Dense, Conv1D, Conv2D, Conv3D layers\n", - "```\n", - "\n", - "#### 2. Exact Layer Names\n", - "```python\n", - "model.prune(\n", - " sparsity=0.4, \n", - " layers_to_prune=[\"dense_1\", \"dense_2\", \"conv2d_features\"]\n", - ")\n", - "```\n", - "\n", - "#### 3. Regex Patterns\n", - "```python\n", - "model.prune(\n", - " sparsity=0.3,\n", - " layers_to_prune=[\n", - " \"dense_hidden_.*\", # All dense_hidden_* layers\n", - " \"conv2d_[0-9]+\", # conv2d_1, conv2d_2, etc.\n", - " \".*_features\" # Any layer ending with _features\n", - " ]\n", - ")\n", - "```\n", - "\n", - "#### 4. Mixed Specifications\n", - "```python\n", - "model.prune(\n", - " sparsity=0.6,\n", - " layers_to_prune=[\n", - " \"input_layer\", # Exact name\n", - " \"conv.*\", # Regex pattern\n", - " \"dense_output\" # Another exact name\n", - " ]\n", - ")\n", - "```\n", - "\n", - "### Pattern Matching Implementation\n", - "\n", - "The layer selection system uses a two-stage matching process:\n", - "\n", - "1. **Exact Match**: First tries exact string comparison\n", - "2. **Regex Match**: Falls back to regex pattern matching if exact match fails\n", - "\n", - "This ensures maximum flexibility while maintaining performance.\n", - "\n", - "## Analysis and Verification Tools\n", - "\n", - "### Sparsity Analysis\n", - "\n", - "#### `analyze_sparsity(model, layer_names=None)`\n", - "Provides detailed sparsity statistics:\n", - "\n", - "```python\n", - "stats = analyze_sparsity(model)\n", - "print(f\"Overall sparsity: {stats['overall_sparsity']:.3f}\")\n", - "print(f\"Layers analyzed: {stats['layers_analyzed']}\")\n", - "\n", - "# Analyze specific layer groups\n", - "hidden_stats = analyze_sparsity(model, layer_names=[\"dense_hidden_.*\"])\n", - "```\n", - "\n", - "#### `compare_sparsity(model_before, model_after)`\n", - "Compares sparsity between two models:\n", - "\n", - "```python\n", - "comparison = compare_sparsity(original_model, pruned_model)\n", - "print_sparsity_report(comparison)\n", - "```\n", - "\n", - "### Performance Benchmarking\n", - "\n", - "#### `benchmark_inference(model, test_data)`\n", - "Measures inference performance with statistical analysis:\n", - "\n", - "```python\n", - "benchmark = benchmark_inference(model, test_data, num_iterations=100)\n", - "print(f\"Mean time: {benchmark['mean_time']*1000:.3f} ms\")\n", - "print(f\"Throughput: {benchmark['throughput_samples_per_sec']:.1f} samples/sec\")\n", - "```\n", - "\n", - "#### `compare_inference_speed(model_before, model_after, test_data)`\n", - "Compares performance improvements:\n", - "\n", - "```python\n", - "comparison = compare_inference_speed(original_model, pruned_model, test_data)\n", - "print(f\"Speedup: {comparison['improvements']['speedup_factor']:.3f}x\")\n", - "print(f\"Time reduction: {comparison['improvements']['time_reduction_percent']:.2f}%\")\n", - "```\n", - "\n", - "### Complete Analysis\n", - "\n", - "#### `complete_pruning_analysis()`\n", - "Runs comprehensive analysis combining sparsity and performance metrics:\n", - "\n", - "```python\n", - "analysis = complete_pruning_analysis(\n", - " model_before=original_model,\n", - " model_after=pruned_model,\n", - " test_data=test_batch\n", - ")\n", - "# Automatically prints detailed reports\n", - "```\n", - "\n", - "## Usage Examples\n", - "\n", - "### Basic Usage\n", - "\n", - "```python\n", - "# Simple L1 pruning\n", - "model.prune(sparsity=0.5, method=\"l1\")\n", - "\n", - "# Structured pruning for deployment\n", - "model.prune(sparsity=0.3, method=\"structured\")\n", - "```\n", - "\n", - "### Advanced Usage\n", - "\n", - "```python\n", - "# Selective gradient-based pruning\n", - "model.prune(\n", - " sparsity=0.4,\n", - " method=\"saliency\",\n", - " layers_to_prune=[\"conv.*\", \"dense_hidden_.*\"],\n", - " dataset=(x_train[:100], y_train[:100]),\n", - " loss_fn=\"categorical_crossentropy\"\n", - ")\n", - "```\n", - "\n", - "### Training Integration\n", - "\n", - "```python\n", - "# Gradual pruning during training\n", - "callback = keras.callbacks.PruningCallback(\n", - " sparsity=0.8,\n", - " method=\"l1\",\n", - " layers_to_prune=[\"dense_.*\"],\n", - " start_step=100,\n", - " end_step=1000,\n", - " frequency=50,\n", - " schedule=\"polynomial\"\n", - ")\n", - "\n", - "model.fit(x, y, callbacks=[callback])\n", - "```\n", - "\n", - "### Analysis and Verification\n", - "\n", - "```python\n", - "# Complete analysis workflow\n", - "analysis = complete_pruning_analysis(\n", - " model_before=original,\n", - " model_after=pruned,\n", - " test_data=x_test[:32],\n", - " layer_names=[\"dense_.*\"] # Focus on specific layers\n", - ")\n", - "\n", - "# Custom analysis\n", - "sparsity_stats = analyze_sparsity(pruned_model, layer_names=[\"conv.*\"])\n", - "performance_stats = benchmark_inference(pruned_model, test_data)\n", - "```\n", - "\n", - "## Implementation Details\n", - "\n", - "### Backend Integration\n", - "\n", - "#### TensorFlow Backend (Optimized)\n", - "- Uses `GradientTape` for efficient gradient computation\n", - "- Batch processing for memory efficiency\n", - "- GPU acceleration support\n", - "\n", - "#### JAX/PyTorch Backends\n", - "- Uses backend-specific autodiff systems\n", - "- Clear error messages for unsupported methods\n", - "- Fallback to magnitude methods when gradients unavailable\n", - "\n", - "### Memory Management\n", - "\n", - "#### Gradient Computation\n", - "- Limits batch size to prevent OOM (default: 32 samples)\n", - "- Random sampling for large datasets\n", - "- Efficient tensor operations\n", - "\n", - "#### Model Cloning\n", - "- Supports deep copying for comparisons\n", - "- Preserves model architecture and compilation state\n", - "- Handles weight sharing correctly\n", - "\n", - "### Error Handling\n", - "\n", - "#### Clear Error Messages\n", - "- Specific requirements for each method\n", - "- Backend compatibility information\n", - "- Helpful suggestions for alternatives\n", - "\n", - "#### Graceful Degradation\n", - "- Falls back to magnitude methods when gradients fail\n", - "- Continues with available layers if some are incompatible\n", - "- Provides detailed statistics about what was processed\n", - "\n", - "## Design Decisions\n", - "\n", - "### 1. Direct Parameters vs Configuration Objects\n", - "\n", - "**Decision**: Moved from configuration objects to direct parameters\n", - "**Rationale**: \n", - "- Simpler API - no need to create config objects\n", - "- More intuitive for users\n", - "- Better IDE support with parameter hints\n", - "- Maintains backwards compatibility\n", - "\n", - "### 2. Layer Selection System\n", - "\n", - "**Decision**: Implemented flexible pattern matching with exact names and regex\n", - "**Rationale**:\n", - "- Complex architectures need fine-grained control\n", - "- Regex patterns enable powerful batch selection\n", - "- Mixed specifications provide maximum flexibility\n", - "- Performance optimized with two-stage matching\n", - "\n", - "### 3. Gradient Method Implementation\n", - "\n", - "**Decision**: Backend-specific implementations with clear error handling\n", - "**Rationale**:\n", - "- Different backends have different optimal approaches\n", - "- TensorFlow optimization provides significant speedup\n", - "- Clear errors better than incorrect fallbacks\n", - "- Maintains mathematical correctness\n", - "\n", - "### 4. Analysis Tool Design\n", - "\n", - "**Decision**: Comprehensive analysis suite with filtering capabilities\n", - "**Rationale**:\n", - "- Users need to verify actual vs target sparsity\n", - "- Performance measurement is crucial for deployment\n", - "- Layer-specific analysis enables targeted optimization\n", - "- Detailed reporting aids debugging and optimization\n", - "\n", - "### 5. Mathematical Correctness\n", - "\n", - "**Decision**: Prioritize mathematical accuracy over convenience\n", - "**Rationale**:\n", - "- Incorrect methods can mislead users\n", - "- Research applications need theoretical soundness\n", - "- Clear documentation of approximations and limitations\n", - "- Honest error reporting when methods don't work\n", - "\n", - "## Future Enhancements\n", - "\n", - "### Planned Features\n", - "\n", - "1. **Extended Backend Support**\n", - " - Full JAX gradient implementation\n", - " - PyTorch gradient optimization\n", - " - Custom gradient computation hooks\n", - "\n", - "2. **Advanced Pruning Methods**\n", - " - SNIP (Single-shot Network Pruning)\n", - " - LAMP (Layer-Adaptive Sparsity)\n", - " - Lottery Ticket Hypothesis support\n", - "\n", - "3. **Deployment Optimizations**\n", - " - Sparse tensor format support\n", - " - Hardware-specific optimizations\n", - " - Quantization integration\n", - "\n", - "4. **Enhanced Analysis**\n", - " - Model accuracy impact analysis\n", - " - Layer importance ranking\n", - " - Pruning sensitivity analysis\n", - "\n", - "### Research Integration\n", - "\n", - "The framework is designed to support research with:\n", - "- Extensible method base classes\n", - "- Comprehensive analysis tools\n", - "- Mathematical correctness verification\n", - "- Easy integration of new algorithms\n", - "\n", - "This design documentation reflects the current state of the Keras Model Pruning system, emphasizing practical usability while maintaining research-grade mathematical rigor and flexibility.\n", - "\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [], - "metadata": { - "id": "PSaoe8pCHRLj" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "982c8923", - "outputId": "9ed91c25-ec7e-4396-a5af-62d8a5b39e07" - }, - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "# Load the saved model\n", - "loaded_model_l1_30 = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# Apply L1 pruning with 30% sparsity\n", - "# config_l1_30 = PruningConfig(sparsity=0.3, method=\"l1\") # Old API\n", - "stats_l1_30 = loaded_model_l1_30.prune(sparsity=0.3, method=\"l1\") # New API\n", - "\n", - "print(\"\\n--- L1 Pruning (30% sparsity) Results ---\")\n", - "print(f\"Initial Sparsity: {stats_l1_30['initial_sparsity']:.4f}\")\n", - "print(f\"Final Sparsity: {stats_l1_30['final_sparsity']:.4f}\")\n", - "print(f\"Target Sparsity: {stats_l1_30['target_sparsity']:.4f}\")\n", - "print(f\"Pruning Method: {stats_l1_30['method']}\")\n", - "print(f\"Pruned Layers: {stats_l1_30['pruned_layers']}\")\n", - "\n", - "\n", - "# Evaluate the pruned model\n", - "loss_l1_30, accuracy_l1_30 = loaded_model_l1_30.evaluate(x_test, y_test, verbose=0)\n", - "print(f\"L1 Pruned Model Test Loss: {loss_l1_30:.4f}\")\n", - "print(f\"L1 Pruned Model Test Accuracy: {accuracy_l1_30:.4f}\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n", - "--- L1 Pruning (30% sparsity) Results ---\n", - "Initial Sparsity: 0.0002\n", - "Final Sparsity: 0.3000\n", - "Target Sparsity: 0.3000\n", - "Pruning Method: l1\n", - "Pruned Layers: 2\n", - "L1 Pruned Model Test Loss: 3.7006\n", - "L1 Pruned Model Test Accuracy: 0.1382\n" - ] - } - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "00c0b0f1", - "outputId": "983b743a-3ea4-45e5-a987-f6f6a1bbfe19" - }, - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "# Load the saved model\n", - "loaded_model_l2_50 = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# Apply L2 pruning with 50% sparsity\n", - "# config_l2_50 = PruningConfig(sparsity=0.5, method=\"l2\") # Old API\n", - "stats_l2_50 = loaded_model_l2_50.prune(sparsity=0.5, method=\"l2\") # New API\n", - "\n", - "print(\"\\n--- L2 Pruning (50% sparsity) Results ---\")\n", - "print(f\"Initial Sparsity: {stats_l2_50['initial_sparsity']:.4f}\")\n", - "print(f\"Final Sparsity: {stats_l2_50['final_sparsity']:.4f}\")\n", - "print(f\"Target Sparsity: {stats_l2_50['target_sparsity']:.4f}\")\n", - "print(f\"Pruning Method: {stats_l2_50['method']}\")\n", - "print(f\"Pruned Layers: {stats_l2_50['pruned_layers']}\")\n", - "\n", - "# Evaluate the pruned model\n", - "loss_l2_50, accuracy_l2_50 = loaded_model_l2_50.evaluate(x_test, y_test, verbose=0)\n", - "print(f\"L2 Pruned Model Test Loss: {loss_l2_50:.4f}\")\n", - "print(f\"L2 Pruned Model Test Accuracy: {accuracy_l2_50:.4f}\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n", - "--- L2 Pruning (50% sparsity) Results ---\n", - "Initial Sparsity: 0.0002\n", - "Final Sparsity: 0.4999\n", - "Target Sparsity: 0.5000\n", - "Pruning Method: l2\n", - "Pruned Layers: 2\n", - "L2 Pruned Model Test Loss: 3.7443\n", - "L2 Pruned Model Test Accuracy: 0.1329\n" - ] - } - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "a7407947", - "outputId": "2d137af4-1c7f-418f-af2e-e69a5c702146" - }, - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "# Load the saved model\n", - "loaded_model_structured_40 = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# Apply Structured pruning with 40% sparsity\n", - "# config_structured_40 = PruningConfig(sparsity=0.4, method=\"structured\") # Old API\n", - "stats_structured_40 = loaded_model_structured_40.prune(sparsity=0.4, method=\"structured\") # New API\n", - "\n", - "\n", - "print(\"\\n--- Structured Pruning (40% sparsity) Results ---\")\n", - "print(f\"Initial Sparsity: {stats_structured_40['initial_sparsity']:.4f}\")\n", - "print(f\"Final Sparsity: {stats_structured_40['final_sparsity']:.4f}\")\n", - "print(f\"Target Sparsity: {stats_structured_40['target_sparsity']:.4f}\")\n", - "print(f\"Pruning Method: {stats_structured_40['method']}\")\n", - "print(f\"Pruned Layers: {stats_structured_40['pruned_layers']}\")\n", - "\n", - "# Evaluate the pruned model\n", - "loss_structured_40, accuracy_structured_40 = loaded_model_structured_40.evaluate(x_test, y_test, verbose=0)\n", - "print(f\"Structured Pruned Model Test Loss: {loss_structured_40:.4f}\")\n", - "print(f\"Structured Pruned Model Test Accuracy: {accuracy_structured_40:.4f}\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n", - "--- Structured Pruning (40% sparsity) Results ---\n", - "Initial Sparsity: 0.0002\n", - "Final Sparsity: 0.4026\n", - "Target Sparsity: 0.4000\n", - "Pruning Method: structured\n", - "Pruned Layers: 2\n", - "Structured Pruned Model Test Loss: 3.9227\n", - "Structured Pruned Model Test Accuracy: 0.1330\n" - ] - } - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 384 - }, - "id": "e7d97366", - "outputId": "6d2193aa-8544-4623-f94c-c61cef1b5bf0" - }, - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "# Load the saved model\n", - "loaded_model_saliency_60_selective = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# Apply Saliency pruning with 60% sparsity on layers matching \"dense_.*\"\n", - "dataset_subset = (x_train[:1000], y_train[:1000]) # Use a smaller subset\n", - "loss_fn = loaded_model_saliency_60_selective.loss\n", - "\n", - "# config_saliency_60_selective = PruningConfig( # Old API\n", - "# sparsity=0.6,\n", - "# method=\"saliency\",\n", - "# layers_to_prune=[\"dense_.*\"], # Select layers by regex\n", - "# dataset=dataset_subset,\n", - "# loss_fn=loss_fn\n", - "# )\n", - "stats_saliency_60_selective = loaded_model_saliency_60_selective.prune( # New API\n", - " sparsity=0.6,\n", - " method=\"saliency\",\n", - " layers_to_prune=[\"dense_.*\"], # Select layers by regex\n", - " dataset=dataset_subset,\n", - " loss_fn=loss_fn\n", - ")\n", - "\n", - "print(\"\\n--- Saliency Pruning (60% sparsity, selective) Results ---\")\n", - "print(f\"Initial Sparsity: {stats_saliency_60_selective['initial_sparsity']:.4f}\")\n", - "print(f\"Final Sparsity: {stats_saliency_60_selective['final_sparsity']:.4f}\")\n", - "print(f\"Target Sparsity: {stats_saliency_60_selective['target_sparsity']:.4f}\")\n", - "print(f\"Pruning Method: {stats_saliency_60_selective['method']}\")\n", - "print(f\"Pruned Layers: {stats_saliency_60_selective['pruned_layers']}\")\n", - "\n", - "# Evaluate the pruned model\n", - "loss_saliency_60_selective, accuracy_saliency_60_selective = loaded_model_saliency_60_selective.evaluate(x_test, y_test, verbose=0)\n", - "print(f\"Saliency Pruned Model Test Loss: {loss_saliency_60_selective:.4f}\")\n", - "print(f\"Saliency Pruned Model Test Accuracy: {accuracy_saliency_60_selective:.4f}\")" - ], - "execution_count": 3, - "outputs": [ - { - "output_type": "error", - "ename": "ImportError", - "evalue": "cannot import name 'PruningConfig' from 'keras.pruning' (/content/keras_repo/keras/api/pruning/__init__.py)", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipython-input-2319192711.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodels\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mload_model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpruning\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mPruningConfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mLnPruning\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;31m# Load the saved model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mloaded_model_saliency_60_selective\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mImportError\u001b[0m: cannot import name 'PruningConfig' from 'keras.pruning' (/content/keras_repo/keras/api/pruning/__init__.py)", - "", - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n" - ], - "errorDetails": { - "actions": [ - { - "action": "open_url", - "actionText": "Open Examples", - "url": "/notebooks/snippets/importing_libraries.ipynb" - } - ] - } - } - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "21843d30", - "outputId": "bc60dce0-fc1e-4935-e85e-67a40e8c4bbe" - }, - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import PruningConfig, LnPruning\n", - "\n", - "# Load the saved model\n", - "loaded_model_taylor_70_selective = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# Apply Taylor pruning with 70% sparsity on layers matching \"conv.*\"\n", - "dataset_subset = (x_train[:1000], y_train[:1000]) # Use a smaller subset\n", - "loss_fn = loaded_model_taylor_70_selective.loss\n", - "\n", - "# config_taylor_70_selective = PruningConfig( # Old API\n", - "# sparsity=0.7,\n", - "# method=\"taylor\",\n", - "# layers_to_prune=[\"conv.*\"], # Select layers by regex\n", - "# dataset=dataset_subset,\n", - "# loss_fn=loss_fn\n", - "# )\n", - "stats_taylor_70_selective = loaded_model_taylor_70_selective.prune( # New API\n", - " sparsity=0.7,\n", - " method=\"taylor\",\n", - " layers_to_prune=[\"conv.*\"], # Select layers by regex\n", - " dataset=dataset_subset,\n", - " loss_fn=loss_fn\n", - ")\n", - "\n", - "print(\"\\n--- Taylor Pruning (70% sparsity, selective) Results ---\")\n", - "print(f\"Initial Sparsity: {stats_taylor_70_selective['initial_sparsity']:.4f}\")\n", - "print(f\"Final Sparsity: {stats_taylor_70_selective['final_sparsity']:.4f}\")\n", - "print(f\"Target Sparsity: {stats_taylor_70_selective['target_sparsity']:.4f}\")\n", - "print(f\"Pruning Method: {stats_taylor_70_selective['method']}\")\n", - "print(f\"Pruned Layers: {stats_taylor_70_selective['pruned_layers']}\")\n", - "\n", - "# Evaluate the pruned model\n", - "loss_taylor_70_selective, accuracy_taylor_70_selective = loaded_model_taylor_70_selective.evaluate(x_test, y_test, verbose=0)\n", - "print(f\"Taylor Pruned Model Test Loss: {loss_taylor_70_selective:.4f}\")\n", - "print(f\"Taylor Pruned Model Test Accuracy: {accuracy_taylor_70_selective:.4f}\")" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n", - "--- Taylor Pruning (70% sparsity, selective) Results ---\n", - "Initial Sparsity: 0.0002\n", - "Final Sparsity: 0.0002\n", - "Target Sparsity: 0.7000\n", - "Pruning Method: taylor\n", - "Pruned Layers: 0\n", - "Taylor Pruned Model Test Loss: 3.6502\n", - "Taylor Pruned Model Test Accuracy: 0.1477\n" - ] - } - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "e1d5d66c", - "outputId": "e8b0df0b-e99d-4970-a6bd-06c47a5a60c8" - }, - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import LnPruning\n", - "\n", - "# Load the saved model\n", - "loaded_model_random_80 = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# Apply Random pruning with 80% sparsity\n", - "# config_random_80 = PruningConfig(sparsity=0.8, method=\"random\", seed=42) # Old API\n", - "stats_random_80 = loaded_model_random_80.prune(sparsity=0.8, method=\"l1\", seed=42) # New API\n", - "\n", - "print(\"\\n--- Random Pruning (80% sparsity) Results ---\")\n", - "print(f\"Initial Sparsity: {stats_random_80['initial_sparsity']:.4f}\")\n", - "print(f\"Final Sparsity: {stats_random_80['final_sparsity']:.4f}\")\n", - "print(f\"Target Sparsity: {stats_random_80['target_sparsity']:.4f}\")\n", - "print(f\"Pruning Method: {stats_random_80['method']}\")\n", - "print(f\"Pruned Layers: {stats_random_80['pruned_layers']}\")\n", - "\n", - "# Evaluate the pruned model\n", - "loss_random_80, accuracy_random_80 = loaded_model_random_80.evaluate(x_test, y_test, verbose=0)\n", - "print(f\"Random Pruned Model Test Loss: {loss_random_80:.4f}\")\n", - "print(f\"Random Pruned Model Test Accuracy: {accuracy_random_80:.4f}\")\n", - "\n", - "# print(\"Skipping random pruning test as it is not currently supported.\")" - ], - "execution_count": 5, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n", - "--- Random Pruning (80% sparsity) Results ---\n", - "Initial Sparsity: 0.0000\n", - "Final Sparsity: 0.7991\n", - "Target Sparsity: 0.8000\n", - "Pruning Method: l1\n", - "Pruned Layers: 55\n", - "Random Pruned Model Test Loss: 4.9933\n", - "Random Pruned Model Test Accuracy: 0.0119\n" - ] - } - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "df2e703d", - "outputId": "16972751-965c-4654-ee82-9dde88ad29f9" - }, - "source": [ - "from keras.models import load_model\n", - "from keras.pruning import LnPruning\n", - "\n", - "# Load the saved model\n", - "loaded_model_ln4_50 = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - "# Apply Ln pruning (n=4) with 50% sparsity\n", - "# config_ln4_50 = PruningConfig(sparsity=0.5, method=LnPruning(n=4)) # Old API\n", - "stats_ln4_50 = loaded_model_ln4_50.prune(sparsity=0.5, method=LnPruning(n=4)) # New API\n", - "\n", - "\n", - "print(\"\\n--- Ln Pruning (n=4, 50% sparsity) Results ---\")\n", - "print(f\"Initial Sparsity: {stats_ln4_50['initial_sparsity']:.4f}\")\n", - "print(f\"Final Sparsity: {stats_ln4_50['final_sparsity']:.4f}\")\n", - "print(f\"Target Sparsity: {stats_ln4_50['target_sparsity']:.4f}\")\n", - "print(f\"Pruning Method: {stats_ln4_50['method']}\")\n", - "print(f\"Pruned Layers: {stats_ln4_50['pruned_layers']}\")\n", - "\n", - "# Evaluate the pruned model\n", - "loss_ln4_50, accuracy_ln4_50 = loaded_model_ln4_50.evaluate(x_test, y_test, verbose=0)\n", - "print(f\"Ln Pruned Model Test Loss: {loss_ln4_50:.4f}\")\n", - "print(f\"Ln Pruned Model Test Accuracy: {accuracy_ln4_50:.4f}\")" - ], - "execution_count": 6, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n", - "--- Ln Pruning (n=4, 50% sparsity) Results ---\n", - "Initial Sparsity: 0.0000\n", - "Final Sparsity: 0.4994\n", - "Target Sparsity: 0.5000\n", - "Pruning Method: \n", - "Pruned Layers: 55\n", - "Ln Pruned Model Test Loss: 2.2994\n", - "Ln Pruned Model Test Accuracy: 0.4893\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "import keras_hub\n", - "import time" - ], - "metadata": { - "id": "JjTBElU7FDi7" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "model=keras_hub.models.Llama3CausalLM.from_preset(\"hf://meta-llama/Llama-3.2-1B\",dtype='bfloat16')" - ], - "metadata": { - "id": "do3zkG9gFlg5" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "model.generate(\"what is keras\")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 140 - }, - "id": "InUr22QoTjVH", - "outputId": "4f3fef74-098e-456d-934e-1d0b677e5fec" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "'what is keras in machine learning\\nMachine learning is the process of building systems that can learn and adapt to change. The machine learning algorithms that are available in Python are: Linear Regression, Logistic Regression, SVM (Support Vector Machine), Decision Tree Classifier and K-Nearest Neighbour Classifier. The following are the steps involved in the process of machine learning. Machine learning is a branch of artificial intelligence which is used to create systems which can make decisions without being programmed to do so. In this article, we will discuss the steps involved in the process of machine learning, the various techniques used in it and some of its important applications. Machine Learning is the branch of Artificial Intelligence that is mainly concerned with the use of computers or other digital machines to perform tasks that require human intelligence to perform them. It involves the use of algorithms and data to create a model that can learn from past data. It is used to create systems that can make decisions without being programmed to do so. Machine Learning in Keras: A Complete Guide. Keras is one of the most popular libraries for machine learning in Python. It uses the concept of a neural network and is used to train a model. It is a tool that helps you to create models and train them. Machine learning in Python is one of the most popular libraries in the Python community. The following are the steps that are used in machine learning. Machine learning is the process where computers learn from past data, and then use that data to make predictions. Machine learning is a subset of artificial intelligence, a field that focuses on making machines that can perform tasks that would normally require human intelligence. In this article, we will be looking at the different steps involved in the process of machine learning. The following are the steps involved in the process of machine learning: The following are the steps of machine learning: In this article, we will discuss the steps involved in the process of machine learning. Machine learning is one of the most important branches of artificial intelligence that has been used to create a model that can learn from past data. In the process of machine learning, the following are the steps involved: The following are the steps of the process of machine learning: In this article, we will discuss the process of machine learning. It uses the concept of machine learning to train models and predict outcomes of new data. This is done by using a set of rules or algorithms, and then using that to create a model which can learn new things and predict future outcomes. Machine learning is a subset of artificial intelligence that is used to create a machine which can learn and adapt to new environments. Machine Learning in Keras is used for training neural networks. In machine learning, the following are the steps involved: Machine learning is used for the development of models and systems that can learn and adapt to new situations. Machine learning is a subset of artificial intelligence that is used to create a machine which can learn and adapt to new environments. The process of machine learning is divided into three main components. It uses a combination of data, algorithms and techniques to create systems that can learn new things and perform new tasks. The following are the steps that are used in machine learning: The steps involved in machine learning: Machine learning is a subset of artificial intelligence that is used to create a machine which can learn and adapt to new environments. Machine learning is a subset of artificial intelligence that is used to create a machine which can learn and adapt to new environments. The steps involved in machine learning: In machine learning, the following are the steps involved: The following are the steps involved in the process of machine learning: Machine Learning in Keras: A Complete Guide. The steps involved in machine learning: Machine learning is the process where computers learn from past data, and then use that data to make predictions.\\nThe Steps Involved In Machine Learning, Keras In Machine Learning , Steps Involved In Data Science , Steps In Machine Learning , Steps Involved In Data Analysis\\nwhat is keras in machine learning'" - ], - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - } - }, - "metadata": {}, - "execution_count": 10 - } - ] - }, - { - "cell_type": "code", - "source": [ - "\n", - "start_time = time.time()\n", - "for i in range(100):\n", - " model.generate(\"what is keras\")\n", - " print(i)\n", - "end_time = time.time()\n", - "\n", - "inference_time = end_time - start_time\n", - "print(f\"Inference time for 100 calls to model.generate: {inference_time:7f} seconds\")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "daewQ0VOT8Wi", - "outputId": "447d57f2-522a-43a8-936f-786035c99d66" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "0\n", - "1\n", - "2\n", - "3\n", - "4\n", - "5\n", - "6\n", - "7\n", - "8\n", - "9\n", - "10\n", - "11\n", - "12\n", - "13\n", - "14\n", - "15\n", - "16\n", - "17\n", - "18\n", - "19\n", - "20\n", - "21\n", - "22\n", - "23\n", - "24\n", - "25\n", - "26\n", - "27\n", - "28\n", - "29\n", - "30\n", - "31\n", - "32\n", - "33\n", - "34\n", - "35\n", - "36\n", - "37\n", - "38\n", - "39\n", - "40\n", - "41\n", - "42\n", - "43\n", - "44\n", - "45\n", - "46\n", - "47\n", - "48\n", - "49\n", - "50\n", - "51\n", - "52\n", - "53\n", - "54\n", - "55\n", - "56\n", - "57\n", - "58\n", - "59\n", - "60\n", - "61\n", - "62\n", - "63\n", - "64\n", - "65\n", - "66\n", - "67\n", - "68\n", - "69\n", - "70\n", - "71\n", - "72\n", - "73\n", - "74\n", - "75\n", - "76\n", - "77\n", - "78\n", - "79\n", - "80\n", - "81\n", - "82\n", - "83\n", - "84\n", - "85\n", - "86\n", - "87\n", - "88\n", - "89\n", - "90\n", - "91\n", - "92\n", - "93\n", - "94\n", - "95\n", - "96\n", - "97\n", - "98\n", - "99\n", - "Inference time for 100 calls to model.generate: 380.907693 seconds\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [ - "stats=model.prune(sparsity=0.7, method=\"l1\")\n" - ], - "metadata": { - "id": "PZfiXh1kGJ-f" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "stats" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "cs1itfE_Ggbj", - "outputId": "98d51b87-8c53-4aa9-c351-657884734a96" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "{'initial_sparsity': 0.0,\n", - " 'final_sparsity': 0.7014274586891306,\n", - " 'pruned_layers': 112,\n", - " 'target_sparsity': 0.7,\n", - " 'method': 'l1',\n", - " 'layers_pruned': ['feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query',\n", - " 'feedforward_output_dense',\n", - " 'feedforward_gate_dense',\n", - " 'feedforward_intermediate_dense',\n", - " 'attention_output',\n", - " 'value',\n", - " 'key',\n", - " 'query']}" - ] - }, - "metadata": {}, - "execution_count": 12 - } - ] - }, - { - "cell_type": "code", - "source": [ - "model.generate(\"what is keras\")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 140 - }, - "id": "RHORpOomGhTq", - "outputId": "555b7216-6c89-497c-b41a-f0ae54c52252" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "'what is keraslegegeelgevaanggdegegabdizahddeygimgdegyydeyinindaydeyyyyyyinydeydmalialithemyydatyininyyininthemdinagdatatinthinagthatthagthemthatagthagproaginagthdinthemagagatagdagagatatinatagxaginaginagagatcriagagagatagcrileininaginagaginemaginieatinagdininagdeleagleaginagininieiniehiileiinininleleleinatiiagininagdeleagiininagdininleinleleleinemininemleleleleleleemleinininatinagdiininatleatatleleinininininininleininatinleatdeleinleagleleleleileinleinininleininieminleleleininleleleinemininleleatatdleleleatinleinchinlelelechleleininininleleleleleleleinininleleininleleleinlenatleatinleinleinleinatinlelechinlecinleleleleininleleyininleininleininlechchlelelelechinininleclelelelelelelelelelelelelenyleleinncininlechyyyinleinleleyinleleleleininlechkinkchlechinkinlelechyyleleleleleleininininchininleinleleleininlelechinchlelechinlecinininininincinleleleleinleleinleleleleleleleleleininchchinleleininininininkinleleinleinininininchinlekinleinclechchininininkinleinleleleleinleinlelelechylelelekkkleyyleinininleleleininlelelelechlekkinininclelelekchinleleleleleleleleinckininininccleinincktechylekininkcleinletekleinininininininteleleleleleleinleleleinleinklelecleclelekinlecinkkinleinlemitkkleleinininleleintekleinlekinlekleinteleinlelelelekinininteininlelelekkininklelekinleinininininleleinleleinleleleleleleleleleinincclelekkinlelechkteinklekkinleinlekinlelelelelemitinclekkintecinlekinleinininininlelelelelekinleleleleleleinlelelelecinininleleleleincleleincleleinleleleinleleleinleleleinleleininleininlelecleleinleleleleinlelelelekkchleckinlekclekkinlelelechleleinkleinleinleklelelelemitleclelecchinclekkcckininininlecleinleleleleleinlelelekkleleinlelelelelelekkkkkkkinleleinlelelelelemitmitlekkkininleleleleleleinlekkchkkinklelelemitkkinlelekckinkinumlelekinlemitkchcklekinktelelelekkumlekkininininumlelekinteinincckkinckkkkteleleinininctecclechkleteininkkininlekumcleinumk'" - ], - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - } - }, - "metadata": {}, - "execution_count": 13 - } - ] - }, - { - "cell_type": "code", - "source": [ - "\n", - "start_time = time.time()\n", - "for i in range(10):\n", - " model.generate(\"what is keras\")\n", - " print(i)\n", - "end_time = time.time()\n", - "\n", - "inference_time = end_time - start_time\n", - "print(f\"Inference time for 100 calls to model.generate: {inference_time:7f} seconds\")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "7S-VXK0STiEq", - "outputId": "d7e24082-2de4-483f-bcf1-afb232eb726b" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "0\n", - "1\n", - "2\n", - "3\n", - "4\n", - "5\n", - "6\n", - "7\n", - "8\n", - "9\n", - "Inference time for 100 calls to model.generate: 42.329632 seconds\n" - ] - } - ] - }, - { - "cell_type": "code", - "source": [], - "metadata": { - "id": "-kKBl2hQUSCR" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8e719c7a" - }, - "source": [ - "# Task\n", - "Modify the code in the selected cell to add early stopping criteria to the model fit." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "aa1fc53e" - }, - "source": [ - "## Prune and evaluate in a loop\n", - "\n", - "### Subtask:\n", - "Iterate through the pruning configurations, load the model each time, prune it according to the current configuration, and evaluate the pruned model on the test data. Store the results (pruning ratio, method, loss, and accuracy).\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "44564af4" - }, - "source": [ - "**Reasoning**:\n", - "Iterate through the defined pruning ratios, load the model for each ratio, prune the loaded model, evaluate it, and store the results.\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "521d81a5" - }, - "source": [ - "**Reasoning**:\n", - "The previous code failed because `x_test` and `y_test` were not defined in the current scope. I need to reload the CIFAR-100 dataset.\n", - "\n" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "id": "sBynZyCXtebS", - "outputId": "266466e0-1c6d-4619-a33f-422d3e505c29" - }, - "source": [ - "from keras.datasets import cifar100\n", - "from keras.utils import to_categorical\n", - "from keras.models import load_model\n", - "import pandas as pd\n", - "from keras.pruning import LnPruning\n", - "\n", - "# Load the CIFAR-100 dataset\n", - "(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n", - "\n", - "# Normalize the image data\n", - "x_train = x_train.astype('float32') / 255.0\n", - "x_test = x_test.astype('float32') / 255.0\n", - "\n", - "# Convert labels to one-hot encoding\n", - "num_classes = 100\n", - "y_train = to_categorical(y_train, num_classes)\n", - "y_test = to_categorical(y_test, num_classes)\n", - "\n", - "print(\"CIFAR-100 dataset loaded and preprocessed again.\")\n", - "print(f\"Training data shape: {x_train.shape}\")\n", - "print(f\"Training labels shape: {y_train.shape}\")\n", - "print(f\"Testing data shape: {x_test.shape}\")\n", - "print(f\"Testing labels shape: {y_test.shape}\")\n", - "\n", - "# Define pruning ratios\n", - "pruning_ratios = [i * 0.05 for i in range(1, 19)] + [0.95, 0.97, 0.99]\n", - "\n", - "# Define pruning methods\n", - "pruning_methods = [\"l1\", \"l2\", LnPruning(n=3), LnPruning(n=4), \"saliency\", \"taylor\"]\n", - "\n", - "# Initialize an empty list to store the results\n", - "pruning_results = []\n", - "\n", - "# Iterate through each pruning method\n", - "for pruning_method in pruning_methods:\n", - " print(f\"\\n--- Pruning Method: {pruning_method} ---\")\n", - " # Iterate through each pruning ratio\n", - " for ratio in pruning_ratios:\n", - " print(f\"Processing pruning ratio: {ratio}\")\n", - "\n", - " # Load the saved model\n", - " loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - " # Apply pruning\n", - " try:\n", - " if pruning_method in [\"saliency\", \"taylor\"]:\n", - " # Use a smaller subset for gradient-based methods to save time\n", - " dataset_subset = (x_train[:1000], y_train[:1000])\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method, dataset=dataset_subset, loss_fn=loaded_model.loss)\n", - " elif isinstance(pruning_method, str):\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", - " else:\n", - " # Assume it's a PruningMethod instance like LnPruning\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", - "\n", - "\n", - " print(f\" Pruning successful. Final sparsity: {stats['final_sparsity']:.4f}\")\n", - "\n", - " # Evaluate the pruned model\n", - " loss, accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", - "\n", - " # Append the results\n", - " pruning_results.append({\n", - " 'Pruning Ratio': ratio,\n", - " 'Pruning Method': str(pruning_method), # Convert method object to string\n", - " 'Test Loss': loss,\n", - " 'Test Accuracy': accuracy\n", - " })\n", - " print(f\" Evaluation complete. Test Loss: {loss:.4f}, Test Accuracy: {accuracy:.4f}\")\n", - "\n", - " except ValueError as e:\n", - " print(f\" Skipping pruning for ratio {ratio} with method '{pruning_method}' due to error: {e}\")\n", - " # Optionally store a record indicating failure\n", - " pruning_results.append({\n", - " 'Pruning Ratio': ratio,\n", - " 'Pruning Method': str(pruning_method), # Convert method object to string\n", - " 'Test Loss': None,\n", - " 'Test Accuracy': None,\n", - " 'Error': str(e)\n", - " })\n", - "\n", - "\n", - "# Convert results to a pandas DataFrame for easy display and analysis\n", - "results_df = pd.DataFrame(pruning_results)\n", - "\n", - "# Display the results table\n", - "print(\"\\n--- Pruning Evaluation Results ---\")\n", - "display(results_df)" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "CIFAR-100 dataset loaded and preprocessed again.\n", - "Training data shape: (50000, 32, 32, 3)\n", - "Training labels shape: (50000, 100)\n", - "Testing data shape: (10000, 32, 32, 3)\n", - "Testing labels shape: (10000, 100)\n", - "\n", - "--- Pruning Method: l1 ---\n", - "Processing pruning ratio: 0.05\n", - " Pruning successful. Final sparsity: 0.0499\n", - " Evaluation complete. Test Loss: 1.1221, Test Accuracy: 0.7420\n", - "Processing pruning ratio: 0.1\n", - " Pruning successful. Final sparsity: 0.0999\n", - " Evaluation complete. Test Loss: 1.1254, Test Accuracy: 0.7417\n", - "Processing pruning ratio: 0.15000000000000002\n", - " Pruning successful. Final sparsity: 0.1498\n", - " Evaluation complete. Test Loss: 1.1429, Test Accuracy: 0.7361\n", - "Processing pruning ratio: 0.2\n", - " Pruning successful. Final sparsity: 0.1998\n", - " Evaluation complete. Test Loss: 1.1587, Test Accuracy: 0.7305\n", - "Processing pruning ratio: 0.25\n", - " Pruning successful. Final sparsity: 0.2497\n", - " Evaluation complete. Test Loss: 1.2086, Test Accuracy: 0.7187\n", - "Processing pruning ratio: 0.30000000000000004\n", - " Pruning successful. Final sparsity: 0.2997\n", - " Evaluation complete. Test Loss: 1.2585, Test Accuracy: 0.7042\n", - "Processing pruning ratio: 0.35000000000000003\n", - " Pruning successful. Final sparsity: 0.3496\n", - " Evaluation complete. Test Loss: 1.4132, Test Accuracy: 0.6687\n", - "Processing pruning ratio: 0.4\n", - " Pruning successful. Final sparsity: 0.3996\n", - " Evaluation complete. Test Loss: 1.4379, Test Accuracy: 0.6616\n", - "Processing pruning ratio: 0.45\n", - " Pruning successful. Final sparsity: 0.4495\n", - " Evaluation complete. Test Loss: 1.9416, Test Accuracy: 0.5578\n", - "Processing pruning ratio: 0.5\n", - " Pruning successful. Final sparsity: 0.4994\n", - " Evaluation complete. Test Loss: 2.2998, Test Accuracy: 0.4889\n", - "Processing pruning ratio: 0.55\n", - " Pruning successful. Final sparsity: 0.5494\n", - " Evaluation complete. Test Loss: 2.5721, Test Accuracy: 0.4360\n", - "Processing pruning ratio: 0.6000000000000001\n", - " Pruning successful. Final sparsity: 0.5993\n", - " Evaluation complete. Test Loss: 3.0715, Test Accuracy: 0.3502\n", - "Processing pruning ratio: 0.65\n", - " Pruning successful. Final sparsity: 0.6493\n", - " Evaluation complete. Test Loss: 3.7629, Test Accuracy: 0.2079\n", - "Processing pruning ratio: 0.7000000000000001\n", - " Pruning successful. Final sparsity: 0.6992\n", - " Evaluation complete. Test Loss: 4.2523, Test Accuracy: 0.1267\n", - "Processing pruning ratio: 0.75\n", - " Pruning successful. Final sparsity: 0.7492\n", - " Evaluation complete. Test Loss: 4.7808, Test Accuracy: 0.0330\n", - "Processing pruning ratio: 0.8\n", - " Pruning successful. Final sparsity: 0.7991\n", - " Evaluation complete. Test Loss: 4.9930, Test Accuracy: 0.0119\n", - "Processing pruning ratio: 0.8500000000000001\n", - " Pruning successful. Final sparsity: 0.8490\n", - " Evaluation complete. Test Loss: 5.3660, Test Accuracy: 0.0132\n", - "Processing pruning ratio: 0.9\n", - " Pruning successful. Final sparsity: 0.8990\n", - " Evaluation complete. Test Loss: 11.6017, Test Accuracy: 0.0121\n", - "Processing pruning ratio: 0.95\n", - " Pruning successful. Final sparsity: 0.9489\n", - " Evaluation complete. Test Loss: 71.2134, Test Accuracy: 0.0100\n", - "Processing pruning ratio: 0.97\n", - " Pruning successful. Final sparsity: 0.9689\n", - " Evaluation complete. Test Loss: 142.3717, Test Accuracy: 0.0100\n", - "Processing pruning ratio: 0.99\n", - " Pruning successful. Final sparsity: 0.9889\n", - " Evaluation complete. Test Loss: 9.3456, Test Accuracy: 0.0100\n", - "\n", - "--- Pruning Method: l2 ---\n", - "Processing pruning ratio: 0.05\n", - " Pruning successful. Final sparsity: 0.0499\n", - " Evaluation complete. Test Loss: 1.1221, Test Accuracy: 0.7420\n", - "Processing pruning ratio: 0.1\n", - " Pruning successful. Final sparsity: 0.0999\n", - " Evaluation complete. Test Loss: 1.1254, Test Accuracy: 0.7417\n", - "Processing pruning ratio: 0.15000000000000002\n", - " Pruning successful. Final sparsity: 0.1498\n", - " Evaluation complete. Test Loss: 1.1429, Test Accuracy: 0.7361\n", - "Processing pruning ratio: 0.2\n", - " Pruning successful. Final sparsity: 0.1998\n", - " Evaluation complete. Test Loss: 1.1587, Test Accuracy: 0.7305\n", - "Processing pruning ratio: 0.25\n", - " Pruning successful. Final sparsity: 0.2497\n", - " Evaluation complete. Test Loss: 1.2086, Test Accuracy: 0.7187\n", - "Processing pruning ratio: 0.30000000000000004\n", - " Pruning successful. Final sparsity: 0.2997\n", - " Evaluation complete. Test Loss: 1.2585, Test Accuracy: 0.7042\n", - "Processing pruning ratio: 0.35000000000000003\n", - " Pruning successful. Final sparsity: 0.3496\n", - " Evaluation complete. Test Loss: 1.4132, Test Accuracy: 0.6687\n", - "Processing pruning ratio: 0.4\n", - " Pruning successful. Final sparsity: 0.3996\n", - " Evaluation complete. Test Loss: 1.4379, Test Accuracy: 0.6616\n", - "Processing pruning ratio: 0.45\n", - " Pruning successful. Final sparsity: 0.4495\n", - " Evaluation complete. Test Loss: 1.9416, Test Accuracy: 0.5578\n", - "Processing pruning ratio: 0.5\n", - " Pruning successful. Final sparsity: 0.4994\n", - " Evaluation complete. Test Loss: 2.2998, Test Accuracy: 0.4889\n", - "Processing pruning ratio: 0.55\n", - " Pruning successful. Final sparsity: 0.5494\n", - " Evaluation complete. Test Loss: 2.5721, Test Accuracy: 0.4360\n", - "Processing pruning ratio: 0.6000000000000001\n", - " Pruning successful. Final sparsity: 0.5993\n", - " Evaluation complete. Test Loss: 3.0715, Test Accuracy: 0.3502\n", - "Processing pruning ratio: 0.65\n", - " Pruning successful. Final sparsity: 0.6493\n", - " Evaluation complete. Test Loss: 3.7629, Test Accuracy: 0.2079\n", - "Processing pruning ratio: 0.7000000000000001\n", - " Pruning successful. Final sparsity: 0.6992\n", - " Evaluation complete. Test Loss: 4.2523, Test Accuracy: 0.1267\n", - "Processing pruning ratio: 0.75\n", - " Pruning successful. Final sparsity: 0.7492\n", - " Evaluation complete. Test Loss: 4.7808, Test Accuracy: 0.0330\n", - "Processing pruning ratio: 0.8\n", - " Pruning successful. Final sparsity: 0.7991\n", - " Evaluation complete. Test Loss: 4.9930, Test Accuracy: 0.0119\n", - "Processing pruning ratio: 0.8500000000000001\n", - " Pruning successful. Final sparsity: 0.8490\n", - " Evaluation complete. Test Loss: 5.3660, Test Accuracy: 0.0132\n", - "Processing pruning ratio: 0.9\n", - " Pruning successful. Final sparsity: 0.8990\n", - " Evaluation complete. Test Loss: 11.6017, Test Accuracy: 0.0121\n", - "Processing pruning ratio: 0.95\n", - " Pruning successful. Final sparsity: 0.9489\n", - " Evaluation complete. Test Loss: 71.2134, Test Accuracy: 0.0100\n", - "Processing pruning ratio: 0.97\n", - " Pruning successful. Final sparsity: 0.9689\n", - " Evaluation complete. Test Loss: 142.3717, Test Accuracy: 0.0100\n", - "Processing pruning ratio: 0.99\n", - " Pruning successful. Final sparsity: 0.9889\n", - " Evaluation complete. Test Loss: 9.3456, Test Accuracy: 0.0100\n", - "\n", - "--- Pruning Method: ---\n", - "Processing pruning ratio: 0.05\n", - " Pruning successful. Final sparsity: 0.0499\n", - " Evaluation complete. Test Loss: 1.1221, Test Accuracy: 0.7420\n", - "Processing pruning ratio: 0.1\n", - " Pruning successful. Final sparsity: 0.0999\n", - " Evaluation complete. Test Loss: 1.1254, Test Accuracy: 0.7417\n", - "Processing pruning ratio: 0.15000000000000002\n", - " Pruning successful. Final sparsity: 0.1498\n", - " Evaluation complete. Test Loss: 1.1429, Test Accuracy: 0.7361\n", - "Processing pruning ratio: 0.2\n", - " Pruning successful. Final sparsity: 0.1998\n", - " Evaluation complete. Test Loss: 1.1587, Test Accuracy: 0.7305\n", - "Processing pruning ratio: 0.25\n", - " Pruning successful. Final sparsity: 0.2497\n", - " Evaluation complete. Test Loss: 1.2086, Test Accuracy: 0.7187\n", - "Processing pruning ratio: 0.30000000000000004\n", - " Pruning successful. Final sparsity: 0.2997\n", - " Evaluation complete. Test Loss: 1.2585, Test Accuracy: 0.7042\n", - "Processing pruning ratio: 0.35000000000000003\n", - " Pruning successful. Final sparsity: 0.3496\n", - " Evaluation complete. Test Loss: 1.4132, Test Accuracy: 0.6687\n", - "Processing pruning ratio: 0.4\n", - " Pruning successful. Final sparsity: 0.3996\n", - " Evaluation complete. Test Loss: 1.4379, Test Accuracy: 0.6616\n", - "Processing pruning ratio: 0.45\n", - " Pruning successful. Final sparsity: 0.4495\n", - " Evaluation complete. Test Loss: 1.9416, Test Accuracy: 0.5578\n", - "Processing pruning ratio: 0.5\n", - " Pruning successful. Final sparsity: 0.4994\n", - " Evaluation complete. Test Loss: 2.2998, Test Accuracy: 0.4889\n", - "Processing pruning ratio: 0.55\n", - " Pruning successful. Final sparsity: 0.5494\n", - " Evaluation complete. Test Loss: 2.5721, Test Accuracy: 0.4360\n", - "Processing pruning ratio: 0.6000000000000001\n", - " Pruning successful. Final sparsity: 0.5993\n", - " Evaluation complete. Test Loss: 3.0715, Test Accuracy: 0.3502\n", - "Processing pruning ratio: 0.65\n", - " Pruning successful. Final sparsity: 0.6493\n", - " Evaluation complete. Test Loss: 3.7629, Test Accuracy: 0.2079\n", - "Processing pruning ratio: 0.7000000000000001\n", - " Pruning successful. Final sparsity: 0.6992\n", - " Evaluation complete. Test Loss: 4.2523, Test Accuracy: 0.1267\n", - "Processing pruning ratio: 0.75\n", - " Pruning successful. Final sparsity: 0.7492\n", - " Evaluation complete. Test Loss: 4.7808, Test Accuracy: 0.0330\n", - "Processing pruning ratio: 0.8\n", - " Pruning successful. Final sparsity: 0.7991\n", - " Evaluation complete. Test Loss: 4.9930, Test Accuracy: 0.0119\n", - "Processing pruning ratio: 0.8500000000000001\n", - " Pruning successful. Final sparsity: 0.8490\n", - " Evaluation complete. Test Loss: 5.3660, Test Accuracy: 0.0132\n", - "Processing pruning ratio: 0.9\n", - " Pruning successful. Final sparsity: 0.8990\n", - " Evaluation complete. Test Loss: 11.6017, Test Accuracy: 0.0121\n", - "Processing pruning ratio: 0.95\n", - " Pruning successful. Final sparsity: 0.9489\n", - " Evaluation complete. Test Loss: 71.2134, Test Accuracy: 0.0100\n", - "Processing pruning ratio: 0.97\n", - " Pruning successful. Final sparsity: 0.9689\n", - " Evaluation complete. Test Loss: 142.3717, Test Accuracy: 0.0100\n", - "Processing pruning ratio: 0.99\n", - " Pruning successful. Final sparsity: 0.9889\n", - " Evaluation complete. Test Loss: 9.3456, Test Accuracy: 0.0100\n", - "\n", - "--- Pruning Method: ---\n", - "Processing pruning ratio: 0.05\n", - " Pruning successful. Final sparsity: 0.0499\n", - " Evaluation complete. Test Loss: 1.1221, Test Accuracy: 0.7420\n", - "Processing pruning ratio: 0.1\n", - " Pruning successful. Final sparsity: 0.0999\n", - " Evaluation complete. Test Loss: 1.1254, Test Accuracy: 0.7417\n", - "Processing pruning ratio: 0.15000000000000002\n", - " Pruning successful. Final sparsity: 0.1498\n", - " Evaluation complete. Test Loss: 1.1429, Test Accuracy: 0.7361\n", - "Processing pruning ratio: 0.2\n", - " Pruning successful. Final sparsity: 0.1998\n", - " Evaluation complete. Test Loss: 1.1587, Test Accuracy: 0.7305\n", - "Processing pruning ratio: 0.25\n", - " Pruning successful. Final sparsity: 0.2497\n", - " Evaluation complete. Test Loss: 1.2086, Test Accuracy: 0.7187\n", - "Processing pruning ratio: 0.30000000000000004\n", - " Pruning successful. Final sparsity: 0.2997\n", - " Evaluation complete. Test Loss: 1.2585, Test Accuracy: 0.7042\n", - "Processing pruning ratio: 0.35000000000000003\n", - " Pruning successful. Final sparsity: 0.3496\n", - " Evaluation complete. Test Loss: 1.4132, Test Accuracy: 0.6687\n", - "Processing pruning ratio: 0.4\n", - " Pruning successful. Final sparsity: 0.3996\n", - " Evaluation complete. Test Loss: 1.4379, Test Accuracy: 0.6616\n", - "Processing pruning ratio: 0.45\n", - " Pruning successful. Final sparsity: 0.4495\n", - " Evaluation complete. Test Loss: 1.9416, Test Accuracy: 0.5578\n", - "Processing pruning ratio: 0.5\n", - " Pruning successful. Final sparsity: 0.4994\n", - " Evaluation complete. Test Loss: 2.2998, Test Accuracy: 0.4889\n", - "Processing pruning ratio: 0.55\n", - " Pruning successful. Final sparsity: 0.5494\n", - " Evaluation complete. Test Loss: 2.5721, Test Accuracy: 0.4360\n", - "Processing pruning ratio: 0.6000000000000001\n", - " Pruning successful. Final sparsity: 0.5993\n", - " Evaluation complete. Test Loss: 3.0715, Test Accuracy: 0.3502\n", - "Processing pruning ratio: 0.65\n", - " Pruning successful. Final sparsity: 0.6493\n", - " Evaluation complete. Test Loss: 3.7629, Test Accuracy: 0.2079\n", - "Processing pruning ratio: 0.7000000000000001\n", - " Pruning successful. Final sparsity: 0.6992\n", - " Evaluation complete. Test Loss: 4.2523, Test Accuracy: 0.1267\n", - "Processing pruning ratio: 0.75\n", - " Pruning successful. Final sparsity: 0.7492\n", - " Evaluation complete. Test Loss: 4.7808, Test Accuracy: 0.0330\n", - "Processing pruning ratio: 0.8\n", - " Pruning successful. Final sparsity: 0.7991\n", - " Evaluation complete. Test Loss: 4.9930, Test Accuracy: 0.0119\n", - "Processing pruning ratio: 0.8500000000000001\n", - " Pruning successful. Final sparsity: 0.8490\n", - " Evaluation complete. Test Loss: 5.3660, Test Accuracy: 0.0132\n", - "Processing pruning ratio: 0.9\n", - " Pruning successful. Final sparsity: 0.8990\n", - " Evaluation complete. Test Loss: 11.6017, Test Accuracy: 0.0121\n", - "Processing pruning ratio: 0.95\n", - " Pruning successful. Final sparsity: 0.9489\n", - " Evaluation complete. Test Loss: 71.2134, Test Accuracy: 0.0100\n", - "Processing pruning ratio: 0.97\n", - " Pruning successful. Final sparsity: 0.9689\n", - " Evaluation complete. Test Loss: 142.3717, Test Accuracy: 0.0100\n", - "Processing pruning ratio: 0.99\n", - " Pruning successful. Final sparsity: 0.9889\n", - " Evaluation complete. Test Loss: 9.3456, Test Accuracy: 0.0100\n", - "\n", - "--- Pruning Method: saliency ---\n", - "Processing pruning ratio: 0.05\n", - " Skipping pruning for ratio 0.05 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.1\n", - " Skipping pruning for ratio 0.1 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.15000000000000002\n", - " Skipping pruning for ratio 0.15000000000000002 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.2\n", - " Skipping pruning for ratio 0.2 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.25\n", - " Skipping pruning for ratio 0.25 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.30000000000000004\n", - " Skipping pruning for ratio 0.30000000000000004 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.35000000000000003\n", - " Skipping pruning for ratio 0.35000000000000003 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.4\n", - " Skipping pruning for ratio 0.4 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.45\n", - " Skipping pruning for ratio 0.45 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.5\n", - " Skipping pruning for ratio 0.5 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.55\n", - " Skipping pruning for ratio 0.55 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.6000000000000001\n", - " Skipping pruning for ratio 0.6000000000000001 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.65\n", - " Skipping pruning for ratio 0.65 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.7000000000000001\n", - " Skipping pruning for ratio 0.7000000000000001 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.75\n", - " Skipping pruning for ratio 0.75 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.8\n", - " Skipping pruning for ratio 0.8 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.8500000000000001\n", - " Skipping pruning for ratio 0.8500000000000001 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.9\n", - " Skipping pruning for ratio 0.9 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.95\n", - " Skipping pruning for ratio 0.95 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.97\n", - " Skipping pruning for ratio 0.97 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.99\n", - " Skipping pruning for ratio 0.99 with method 'saliency' due to error: Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\n", - "\n", - "--- Pruning Method: taylor ---\n", - "Processing pruning ratio: 0.05\n", - " Skipping pruning for ratio 0.05 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.1\n", - " Skipping pruning for ratio 0.1 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.15000000000000002\n", - " Skipping pruning for ratio 0.15000000000000002 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.2\n", - " Skipping pruning for ratio 0.2 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.25\n", - " Skipping pruning for ratio 0.25 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.30000000000000004\n", - " Skipping pruning for ratio 0.30000000000000004 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.35000000000000003\n", - " Skipping pruning for ratio 0.35000000000000003 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.4\n", - " Skipping pruning for ratio 0.4 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.45\n", - " Skipping pruning for ratio 0.45 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.5\n", - " Skipping pruning for ratio 0.5 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.55\n", - " Skipping pruning for ratio 0.55 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.6000000000000001\n", - " Skipping pruning for ratio 0.6000000000000001 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.65\n", - " Skipping pruning for ratio 0.65 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.7000000000000001\n", - " Skipping pruning for ratio 0.7000000000000001 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.75\n", - " Skipping pruning for ratio 0.75 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.8\n", - " Skipping pruning for ratio 0.8 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.8500000000000001\n", - " Skipping pruning for ratio 0.8500000000000001 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.9\n", - " Skipping pruning for ratio 0.9 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.95\n", - " Skipping pruning for ratio 0.95 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.97\n", - " Skipping pruning for ratio 0.97 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "Processing pruning ratio: 0.99\n", - " Skipping pruning for ratio 0.99 with method 'taylor' due to error: Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\n", - "\n", - "--- Pruning Evaluation Results ---\n" - ] - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - " Pruning Ratio Pruning Method Test Loss Test Accuracy \\\n", - "0 0.05 l1 1.122052 0.7420 \n", - "1 0.10 l1 1.125432 0.7417 \n", - "2 0.15 l1 1.142868 0.7361 \n", - "3 0.20 l1 1.158665 0.7305 \n", - "4 0.25 l1 1.208641 0.7187 \n", - ".. ... ... ... ... \n", - "121 0.85 taylor NaN NaN \n", - "122 0.90 taylor NaN NaN \n", - "123 0.95 taylor NaN NaN \n", - "124 0.97 taylor NaN NaN \n", - "125 0.99 taylor NaN NaN \n", - "\n", - " Error \n", - "0 NaN \n", - "1 NaN \n", - "2 NaN \n", - "3 NaN \n", - "4 NaN \n", - ".. ... \n", - "121 Could not find layer corresponding to weight t... \n", - "122 Could not find layer corresponding to weight t... \n", - "123 Could not find layer corresponding to weight t... \n", - "124 Could not find layer corresponding to weight t... \n", - "125 Could not find layer corresponding to weight t... \n", - "\n", - "[126 rows x 5 columns]" - ], - "text/html": [ - "\n", - "
\n", - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
Pruning RatioPruning MethodTest LossTest AccuracyError
00.05l11.1220520.7420NaN
10.10l11.1254320.7417NaN
20.15l11.1428680.7361NaN
30.20l11.1586650.7305NaN
40.25l11.2086410.7187NaN
..................
1210.85taylorNaNNaNCould not find layer corresponding to weight t...
1220.90taylorNaNNaNCould not find layer corresponding to weight t...
1230.95taylorNaNNaNCould not find layer corresponding to weight t...
1240.97taylorNaNNaNCould not find layer corresponding to weight t...
1250.99taylorNaNNaNCould not find layer corresponding to weight t...
\n", - "

126 rows × 5 columns

\n", - "
\n", - "
\n", - "\n", - "
\n", - " \n", - "\n", - " \n", - "\n", - " \n", - "
\n", - "\n", - "\n", - "
\n", - " \n", - "\n", - "\n", - "\n", - " \n", - "
\n", - "\n", - "
\n", - " \n", - " \n", - " \n", - "
\n", - "\n", - "
\n", - "
\n" - ], - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "dataframe", - "variable_name": "results_df", - "summary": "{\n \"name\": \"results_df\",\n \"rows\": 126,\n \"fields\": [\n {\n \"column\": \"Pruning Ratio\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.2973575048897971,\n \"min\": 0.05,\n \"max\": 0.99,\n \"num_unique_values\": 21,\n \"samples\": [\n 0.05,\n 0.9,\n 0.8\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Pruning Method\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 6,\n \"samples\": [\n \"l1\",\n \"l2\",\n \"taylor\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Test Loss\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 32.59375736132438,\n \"min\": 1.12205171585083,\n \"max\": 142.3716583251953,\n \"num_unique_values\": 21,\n \"samples\": [\n 1.12205171585083,\n 11.601749420166016,\n 4.992961406707764\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Test Accuracy\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 0.3082612624968978,\n \"min\": 0.009999999776482582,\n \"max\": 0.7419999837875366,\n \"num_unique_values\": 19,\n \"samples\": [\n 0.7419999837875366,\n 0.704200029373169,\n 0.35019999742507935\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"Error\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 2,\n \"samples\": [\n \"Could not find layer corresponding to weight tensor with shape (1, 1, 512, 2048)\",\n \"Could not find gradients for weight tensor with shape (1, 1, 512, 2048)\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}" - } - }, - "metadata": {} - } - ] - }, - { - "cell_type": "code", - "source": [ - "from keras.datasets import cifar100\n", - "from keras.utils import to_categorical\n", - "from keras.models import load_model\n", - "import pandas as pd\n", - "from keras.pruning import LnPruning\n", - "\n", - "# Load the CIFAR-100 dataset\n", - "(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n", - "\n", - "# Normalize the image data\n", - "x_train = x_train.astype('float32') / 255.0\n", - "x_test = x_test.astype('float32') / 255.0\n", - "\n", - "# Convert labels to one-hot encoding\n", - "num_classes = 100\n", - "y_train = to_categorical(y_train, num_classes)\n", - "y_test = to_categorical(y_test, num_classes)\n", - "\n", - "print(\"CIFAR-100 dataset loaded and preprocessed again.\")\n", - "print(f\"Training data shape: {x_train.shape}\")\n", - "print(f\"Training labels shape: {y_train.shape}\")\n", - "print(f\"Testing data shape: {x_test.shape}\")\n", - "print(f\"Testing labels shape: {y_test.shape}\")\n", - "\n", - "# Define pruning ratios\n", - "pruning_ratios = [i * 0.1 for i in range(1, 10)] + [0.95, 0.97, 0.99]\n", - "pruning_ratios = [.3]\n", - "\n", - "# Define pruning methods\n", - "pruning_methods = [\"saliency\"]\n", - "\n", - "# Initialize an empty list to store the results\n", - "pruning_results = []\n", - "\n", - "# Iterate through each pruning method\n", - "for pruning_method in pruning_methods:\n", - " print(f\"\\n--- Pruning Method: {pruning_method} ---\")\n", - " # Iterate through each pruning ratio\n", - " for ratio in pruning_ratios:\n", - " print(f\"Processing pruning ratio: {ratio}\")\n", - "\n", - " # Load the saved model\n", - " loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - " # Apply pruning\n", - " try:\n", - " if pruning_method in [\"saliency\", \"taylor\"]:\n", - " # Use a smaller subset for gradient-based methods to save time\n", - " dataset_subset = (x_train, y_train)\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method, dataset=dataset_subset, loss_fn=loaded_model.loss)\n", - " elif isinstance(pruning_method, str):\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", - " else:\n", - " # Assume it's a PruningMethod instance like LnPruning\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", - "\n", - "\n", - " print(f\" Pruning successful. Final sparsity: {stats['final_sparsity']:.4f}\")\n", - "\n", - " # Evaluate the pruned model\n", - " loss, accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", - "\n", - " # Append the results\n", - " pruning_results.append({\n", - " 'Pruning Ratio': ratio,\n", - " 'Pruning Method': str(pruning_method), # Convert method object to string\n", - " 'Test Loss': loss,\n", - " 'Test Accuracy': accuracy\n", - " })\n", - " print(f\" Evaluation complete. Test Loss: {loss:.4f}, Test Accuracy: {accuracy:.4f}\")\n", - "\n", - " except ValueError as e:\n", - " print(f\" Skipping pruning for ratio {ratio} with method '{pruning_method}' due to error: {e}\")\n", - " # Optionally store a record indicating failure\n", - " pruning_results.append({\n", - " 'Pruning Ratio': ratio,\n", - " 'Pruning Method': str(pruning_method), # Convert method object to string\n", - " 'Test Loss': None,\n", - " 'Test Accuracy': None,\n", - " 'Error': str(e)\n", - " })\n", - "\n", - "\n", - "# Convert results to a pandas DataFrame for easy display and analysis\n", - "results_df = pd.DataFrame(pruning_results)\n", - "\n", - "# Display the results table\n", - "print(\"\\n--- Pruning Evaluation Results ---\")\n", - "display(results_df)" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 512 - }, - "id": "dmY8GKOBUpkM", - "outputId": "5cdbdb52-b950-4114-cb49-cf6d93d0696f" - }, - "execution_count": 6, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "CIFAR-100 dataset loaded and preprocessed again.\n", - "Training data shape: (50000, 32, 32, 3)\n", - "Training labels shape: (50000, 100)\n", - "Testing data shape: (10000, 32, 32, 3)\n", - "Testing labels shape: (10000, 100)\n", - "\n", - "--- Pruning Method: saliency ---\n", - "Processing pruning ratio: 0.3\n" - ] - }, - { - "output_type": "stream", - "name": "stderr", - "text": [ - "Computing saliency gradients: 68%|██████▊ | 529/782 [04:15<02:02, 2.07batch/s, batches=529]\n" - ] - }, - { - "output_type": "error", - "ename": "KeyboardInterrupt", - "evalue": "", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipython-input-3734017654.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;31m# Use a smaller subset for gradient-based methods to save time\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0mdataset_subset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mx_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdataset_subset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/model.py\u001b[0m in \u001b[0;36mprune\u001b[0;34m(self, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 529\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 530\u001b[0m \u001b[0;31m# Use direct parameter approach\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 531\u001b[0;31m stats = apply_pruning_to_model(\n\u001b[0m\u001b[1;32m 532\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_model\u001b[0;34m(model, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 477\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlist_of_sublayers\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 478\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshould_prune_layer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayers_to_prune\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 479\u001b[0;31m if apply_pruning_to_layer(\n\u001b[0m\u001b[1;32m 480\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 481\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_layer\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 346\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0;31m# Use the new get_pruning_mask function for consistency\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 348\u001b[0;31m mask = get_pruning_mask(\n\u001b[0m\u001b[1;32m 349\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 350\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mget_pruning_mask\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, **kwargs)\u001b[0m\n\u001b[1;32m 248\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0;31m# Compute mask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 250\u001b[0;31m \u001b[0mmask\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpruning_method\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmask_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 251\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 252\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mcompute_mask\u001b[0;34m(self, weights, sparsity_ratio, **kwargs)\u001b[0m\n\u001b[1;32m 430\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 431\u001b[0m \u001b[0;31m# Compute saliency scores (pass validated loss_fn in kwargs)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 432\u001b[0;31m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'loss_fn'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 433\u001b[0m \u001b[0msaliency_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcalculate_scores\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights_tensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 434\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_saliency_scores\u001b[0;34m(self, weights, **kwargs)\u001b[0m\n\u001b[1;32m 547\u001b[0m \u001b[0mloss_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_obj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpredictions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 548\u001b[0m \u001b[0;31m# Ensure loss is scalar\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 549\u001b[0;31m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_val\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_val\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mloss_val\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 550\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 551\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mtape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgradient\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtf_var\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_tensorflow_gradients\u001b[0;34m(self, target_weight_var, model, batch_x, batch_y, loss_fn)\u001b[0m\n\u001b[1;32m 462\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mkeras\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 463\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 464\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtqdm\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtqdm\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 465\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 466\u001b[0m \u001b[0;31m# Extract parameters from kwargs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/backprop.py\u001b[0m in \u001b[0;36mgradient\u001b[0;34m(self, target, sources, output_gradients, unconnected_gradients)\u001b[0m\n\u001b[1;32m 1064\u001b[0m for x in output_gradients]\n\u001b[1;32m 1065\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1066\u001b[0;31m flat_grad = imperative_grad.imperative_grad(\n\u001b[0m\u001b[1;32m 1067\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_tape\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1068\u001b[0m \u001b[0mflat_targets\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/imperative_grad.py\u001b[0m in \u001b[0;36mimperative_grad\u001b[0;34m(tape, target, sources, output_gradients, sources_raw, unconnected_gradients)\u001b[0m\n\u001b[1;32m 65\u001b[0m \"Unknown value for unconnected_gradients: %r\" % unconnected_gradients)\n\u001b[1;32m 66\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 67\u001b[0;31m return pywrap_tfe.TFE_Py_TapeGradient(\n\u001b[0m\u001b[1;32m 68\u001b[0m \u001b[0mtape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_tape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 69\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/backprop.py\u001b[0m in \u001b[0;36m_gradient_function\u001b[0;34m(op_name, attr_tuple, num_inputs, inputs, outputs, out_grads, skip_input_indices, forward_pass_name_scope)\u001b[0m\n\u001b[1;32m 146\u001b[0m \u001b[0mgradient_name_scope\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mforward_pass_name_scope\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m\"/\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 147\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname_scope\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgradient_name_scope\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 148\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mgrad_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmock_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mout_grads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 149\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mgrad_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmock_op\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mout_grads\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/math_grad.py\u001b[0m in \u001b[0;36m_AddGrad\u001b[0;34m(op, grad)\u001b[0m\n\u001b[1;32m 1376\u001b[0m \u001b[0mgx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mskip_input_indices\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mgrad\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1377\u001b[0m \u001b[0mgy\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;36m1\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mskip_input_indices\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mgrad\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1378\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_ReduceGradientArgs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1379\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1380\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/math_grad.py\u001b[0m in \u001b[0;36m_ReduceGradientArgs\u001b[0;34m(x, y, gx, gy)\u001b[0m\n\u001b[1;32m 142\u001b[0m \u001b[0mbx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mby\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSmartBroadcastGradientArgs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 143\u001b[0m \u001b[0mgx\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_ReduceGradientArg\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 144\u001b[0;31m \u001b[0mgy\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_ReduceGradientArg\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgy\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mby\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 145\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mgx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgy\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 146\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/math_grad.py\u001b[0m in \u001b[0;36m_ReduceGradientArg\u001b[0;34m(grad, shape_axes_must_reduce)\u001b[0m\n\u001b[1;32m 133\u001b[0m \u001b[0;31m# emit extra ops to recover reduced indices for broadcasting.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 134\u001b[0m \u001b[0mgrad\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmath_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreduce_sum\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxes\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkeepdims\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 135\u001b[0;31m \u001b[0mgrad\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0marray_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgrad\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 136\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mgrad\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 137\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/weak_tensor_ops.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_auto_dtype_conversion_enabled\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 88\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 89\u001b[0m \u001b[0mbound_arguments\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msignature\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 90\u001b[0m \u001b[0mbound_arguments\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_defaults\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/dispatch.py\u001b[0m in \u001b[0;36mop_dispatch_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 1258\u001b[0m \u001b[0;31m# Fallback dispatch system (dispatch v1):\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1259\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1260\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdispatch_target\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1261\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mTypeError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m \u001b[0;31m# Note: convert_to_eager_tensor currently raises a ValueError, not a\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0mA\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mTensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mHas\u001b[0m \u001b[0mthe\u001b[0m \u001b[0msame\u001b[0m \u001b[0mtype\u001b[0m \u001b[0;32mas\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 198\u001b[0m \"\"\"\n\u001b[0;32m--> 199\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgen_array_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 200\u001b[0m \u001b[0mshape_util\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmaybe_set_static_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/gen_array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 8785\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mtld\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_eager\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8786\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 8787\u001b[0;31m _result = pywrap_tfe.TFE_Py_FastPathExecute(\n\u001b[0m\u001b[1;32m 8788\u001b[0m _ctx, \"Reshape\", name, tensor, shape)\n\u001b[1;32m 8789\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mKeyboardInterrupt\u001b[0m: " - ] - } - ] - }, - { - "source": [ - "# @title Pruning Ratio\n", - "\n", - "from matplotlib import pyplot as plt\n", - "results_df['Pruning Ratio'].plot(kind='hist', bins=20, title='Pruning Ratio')\n", - "plt.gca().spines[['top', 'right',]].set_visible(False)" - ], - "cell_type": "code", - "execution_count": null, - "outputs": [ - { - "output_type": "error", - "ename": "NameError", - "evalue": "name 'results_df' is not defined", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipython-input-459143830.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mmatplotlib\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpyplot\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0mresults_df\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'Pruning Ratio'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkind\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'hist'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbins\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m20\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtitle\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'Pruning Ratio'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgca\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspines\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'top'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'right'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_visible\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mNameError\u001b[0m: name 'results_df' is not defined" - ] - } - ], - "metadata": { - "cellView": "form", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 193 - }, - "id": "46ziXkflTK0P", - "outputId": "99d131cb-1652-4440-ee94-e48ebf2d5a8e" - } - }, - { - "cell_type": "code", - "source": [ - "from keras.datasets import cifar100\n", - "from keras.utils import to_categorical\n", - "from keras.models import load_model\n", - "import pandas as pd\n", - "from keras.pruning import LnPruning\n", - "\n", - "# Load the CIFAR-100 dataset\n", - "(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n", - "\n", - "# Normalize the image data\n", - "x_train = x_train.astype('float32') / 255.0\n", - "x_test = x_test.astype('float32') / 255.0\n", - "\n", - "# Convert labels to one-hot encoding\n", - "num_classes = 100\n", - "y_train = to_categorical(y_train, num_classes)\n", - "y_test = to_categorical(y_test, num_classes)\n", - "\n", - "print(\"CIFAR-100 dataset loaded and preprocessed again.\")\n", - "print(f\"Training data shape: {x_train.shape}\")\n", - "print(f\"Training labels shape: {y_train.shape}\")\n", - "print(f\"Testing data shape: {x_test.shape}\")\n", - "print(f\"Testing labels shape: {y_test.shape}\")\n", - "\n", - "# Define pruning ratios\n", - "pruning_ratios = [i * 0.2 for i in range(1, 4)] + [0.95, 0.97, 0.99]\n", - "\n", - "# Define pruning methods\n", - "pruning_methods = [\"saliency\",\"taylor\"]\n", - "\n", - "# Initialize an empty list to store the results\n", - "pruning_results = []\n", - "\n", - "# Iterate through each pruning method\n", - "for pruning_method in pruning_methods:\n", - " print(f\"\\n--- Pruning Method: {pruning_method} ---\")\n", - " # Iterate through each pruning ratio\n", - " for ratio in pruning_ratios:\n", - " print(f\"Processing pruning ratio: {ratio}\")\n", - "\n", - " # Load the saved model\n", - " loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - " # Apply pruning\n", - " try:\n", - " if pruning_method in [\"saliency\", \"taylor\"]:\n", - " # Use a smaller subset for gradient-based methods to save time\n", - " dataset_subset = (x_train, y_train)\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method, dataset=dataset_subset, loss_fn=loaded_model.loss)\n", - " elif isinstance(pruning_method, str):\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", - " else:\n", - " # Assume it's a PruningMethod instance like LnPruning\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", - "\n", - "\n", - " print(f\" Pruning successful. Final sparsity: {stats['final_sparsity']:.4f}\")\n", - "\n", - " # Evaluate the pruned model\n", - " loss, accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", - "\n", - " # Append the results\n", - " pruning_results.append({\n", - " 'Pruning Ratio': ratio,\n", - " 'Pruning Method': str(pruning_method), # Convert method object to string\n", - " 'Test Loss': loss,\n", - " 'Test Accuracy': accuracy\n", - " })\n", - " print(f\" Evaluation complete. Test Loss: {loss:.4f}, Test Accuracy: {accuracy:.4f}\")\n", - "\n", - " except ValueError as e:\n", - " print(f\" Skipping pruning for ratio {ratio} with method '{pruning_method}' due to error: {e}\")\n", - " # Optionally store a record indicating failure\n", - " pruning_results.append({\n", - " 'Pruning Ratio': ratio,\n", - " 'Pruning Method': str(pruning_method), # Convert method object to string\n", - " 'Test Loss': None,\n", - " 'Test Accuracy': None,\n", - " 'Error': str(e)\n", - " })\n", - "\n", - "\n", - "# Convert results to a pandas DataFrame for easy display and analysis\n", - "results_df = pd.DataFrame(pruning_results)\n", - "\n", - "# Display the results table\n", - "print(\"\\n--- Pruning Evaluation Results ---\")\n", - "display(results_df)" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 512 - }, - "id": "tRuPuPaydqx4", - "outputId": "b5e2d516-95ac-4472-b28d-bcd834626794" - }, - "execution_count": 2, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "CIFAR-100 dataset loaded and preprocessed again.\n", - "Training data shape: (50000, 32, 32, 3)\n", - "Training labels shape: (50000, 100)\n", - "Testing data shape: (10000, 32, 32, 3)\n", - "Testing labels shape: (10000, 100)\n", - "\n", - "--- Pruning Method: saliency ---\n", - "Processing pruning ratio: 0.2\n" - ] - }, - { - "output_type": "stream", - "name": "stderr", - "text": [ - "Computing saliency gradients: 2%|▏ | 18/782 [00:09<06:30, 1.95batch/s, batches=18]\n" - ] - }, - { - "output_type": "error", - "ename": "KeyboardInterrupt", - "evalue": "", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipython-input-137121260.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 47\u001b[0m \u001b[0;31m# Use a smaller subset for gradient-based methods to save time\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0mdataset_subset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mx_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 49\u001b[0;31m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdataset_subset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 50\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/model.py\u001b[0m in \u001b[0;36mprune\u001b[0;34m(self, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 529\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 530\u001b[0m \u001b[0;31m# Use direct parameter approach\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 531\u001b[0;31m stats = apply_pruning_to_model(\n\u001b[0m\u001b[1;32m 532\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_model\u001b[0;34m(model, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 477\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlist_of_sublayers\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 478\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshould_prune_layer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayers_to_prune\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 479\u001b[0;31m if apply_pruning_to_layer(\n\u001b[0m\u001b[1;32m 480\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 481\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_layer\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 346\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0;31m# Use the new get_pruning_mask function for consistency\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 348\u001b[0;31m mask = get_pruning_mask(\n\u001b[0m\u001b[1;32m 349\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 350\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mget_pruning_mask\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, **kwargs)\u001b[0m\n\u001b[1;32m 248\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0;31m# Compute mask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 250\u001b[0;31m \u001b[0mmask\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpruning_method\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmask_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 251\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 252\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mcompute_mask\u001b[0;34m(self, weights, sparsity_ratio, **kwargs)\u001b[0m\n\u001b[1;32m 430\u001b[0m \u001b[0;31m# Compute saliency scores (pass validated loss_fn in kwargs)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 431\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'loss_fn'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 432\u001b[0;31m \u001b[0msaliency_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_saliency_scores\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 433\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 434\u001b[0m \u001b[0mflat_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msaliency_scores\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_saliency_scores\u001b[0;34m(self, weights, **kwargs)\u001b[0m\n\u001b[1;32m 547\u001b[0m \u001b[0;31m# Backend-specific gradient computation\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 548\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mbackend_name\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"tensorflow\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 549\u001b[0;31m \u001b[0mbatch_avg_gradients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_tensorflow_gradients\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 550\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mbackend_name\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"jax\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 551\u001b[0m \u001b[0mbatch_avg_gradients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_jax_gradients\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_tensorflow_gradients\u001b[0;34m(self, target_weight_var, model, batch_x, batch_y, loss_fn)\u001b[0m\n\u001b[1;32m 453\u001b[0m \u001b[0mtf_var\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_value\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'_value'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 454\u001b[0m \u001b[0mtape\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwatch\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf_var\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 455\u001b[0;31m \u001b[0mpredictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 456\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcallable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_fn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 457\u001b[0m \u001b[0mloss_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpredictions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training, mask, **kwargs)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmask\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_keras_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m outputs = self._run_through_graph(\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m operation_fn=lambda op: operation_fn(\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/function.py\u001b[0m in \u001b[0;36m_run_through_graph\u001b[0;34m(self, inputs, operation_fn, call_fn)\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0moperation\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_operation_for_node\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0mop\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moperation_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moperation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# Update tensor_dict.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 642\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 643\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 644\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moperation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 645\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 646\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcall\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training, mask, **kwargs)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmask\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_keras_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m outputs = self._run_through_graph(\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m operation_fn=lambda op: operation_fn(\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/function.py\u001b[0m in \u001b[0;36m_run_through_graph\u001b[0;34m(self, inputs, operation_fn, call_fn)\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0moperation\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_operation_for_node\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0mop\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moperation_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moperation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# Update tensor_dict.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 642\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 643\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 644\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moperation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 645\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 646\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcall\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/layers/convolutional/base_conv.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 257\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0mbias_shape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfilters\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrank\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 259\u001b[0;31m \u001b[0mbias\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbias_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 260\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbias\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 261\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/numpy.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(x, newshape)\u001b[0m\n\u001b[1;32m 5365\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0many_symbolic_tensors\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5366\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mReshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msymbolic_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 5367\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5368\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5369\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/backend/tensorflow/numpy.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(x, newshape)\u001b[0m\n\u001b[1;32m 2168\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2169\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2170\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2171\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2172\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/weak_tensor_ops.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_auto_dtype_conversion_enabled\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 88\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 89\u001b[0m \u001b[0mbound_arguments\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msignature\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 90\u001b[0m \u001b[0mbound_arguments\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_defaults\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/dispatch.py\u001b[0m in \u001b[0;36mop_dispatch_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 1258\u001b[0m \u001b[0;31m# Fallback dispatch system (dispatch v1):\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1259\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1260\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdispatch_target\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1261\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mTypeError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m \u001b[0;31m# Note: convert_to_eager_tensor currently raises a ValueError, not a\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0mA\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mTensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mHas\u001b[0m \u001b[0mthe\u001b[0m \u001b[0msame\u001b[0m \u001b[0mtype\u001b[0m \u001b[0;32mas\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 198\u001b[0m \"\"\"\n\u001b[0;32m--> 199\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgen_array_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 200\u001b[0m \u001b[0mshape_util\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmaybe_set_static_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/gen_array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 8793\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8794\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 8795\u001b[0;31m return reshape_eager_fallback(\n\u001b[0m\u001b[1;32m 8796\u001b[0m tensor, shape, name=name, ctx=_ctx)\n\u001b[1;32m 8797\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0m_core\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_SymbolicException\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/gen_array_ops.py\u001b[0m in \u001b[0;36mreshape_eager_fallback\u001b[0;34m(tensor, shape, name, ctx)\u001b[0m\n\u001b[1;32m 8815\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mreshape_eager_fallback\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mAnnotated\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTV_Reshape_T\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mAnnotated\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTV_Reshape_Tshape\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mAnnotated\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTV_Reshape_T\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8816\u001b[0m \u001b[0m_attr_T\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_execute\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs_to_matching_eager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 8817\u001b[0;31m \u001b[0m_attr_Tshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_execute\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs_to_matching_eager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0m_dtypes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mint32\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_dtypes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mint64\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_dtypes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mint32\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8818\u001b[0m \u001b[0m_inputs_flat\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8819\u001b[0m \u001b[0m_attrs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m\"T\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_attr_T\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"Tshape\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_attr_Tshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/execute.py\u001b[0m in \u001b[0;36margs_to_matching_eager\u001b[0;34m(***failed resolving arguments***)\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0;31m# not list allowed dtypes, in which case we should skip this.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 250\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdtype\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mallowed_dtypes\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 251\u001b[0;31m \u001b[0mtensor\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtensor_conversion_registry\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconvert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 252\u001b[0m \u001b[0;31m# If we did not match an allowed dtype, try again with the default\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 253\u001b[0m \u001b[0;31m# dtype. This could be because we have an empty tensor and thus we\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor_conversion_registry.py\u001b[0m in \u001b[0;36mconvert\u001b[0;34m(value, dtype, name, as_ref, preferred_dtype, accepted_result_types)\u001b[0m\n\u001b[1;32m 232\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 234\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconversion_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mas_ref\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 235\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 236\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNotImplemented\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/constant_tensor_conversion.py\u001b[0m in \u001b[0;36m_constant_tensor_conversion_function\u001b[0;34m(v, dtype, name, as_ref)\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0m_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 29\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconstant_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconstant\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 30\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 31\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/weak_tensor_ops.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 141\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_auto_dtype_conversion_enabled\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 142\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 143\u001b[0m \u001b[0mbound_arguments\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msignature\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 144\u001b[0m \u001b[0mbound_arguments\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_defaults\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/constant_op.py\u001b[0m in \u001b[0;36mconstant\u001b[0;34m(value, dtype, shape, name)\u001b[0m\n\u001b[1;32m 274\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mcalled\u001b[0m \u001b[0mon\u001b[0m \u001b[0ma\u001b[0m \u001b[0msymbolic\u001b[0m \u001b[0mtensor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 275\u001b[0m \"\"\"\n\u001b[0;32m--> 276\u001b[0;31m return _constant_impl(value, dtype, shape, name, verify_shape=False,\n\u001b[0m\u001b[1;32m 277\u001b[0m allow_broadcast=True)\n\u001b[1;32m 278\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/constant_op.py\u001b[0m in \u001b[0;36m_constant_impl\u001b[0;34m(value, dtype, shape, name, verify_shape, allow_broadcast)\u001b[0m\n\u001b[1;32m 287\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mtrace\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTrace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"tf.constant\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 288\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_constant_eager_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mctx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverify_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 289\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_constant_eager_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mctx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mverify_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 290\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 291\u001b[0m const_tensor = ops._create_graph_constant( # pylint: disable=protected-access\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/constant_op.py\u001b[0m in \u001b[0;36m_constant_eager_impl\u001b[0;34m(ctx, value, dtype, shape, verify_shape)\u001b[0m\n\u001b[1;32m 299\u001b[0m ) -> ops._EagerTensorBase:\n\u001b[1;32m 300\u001b[0m \u001b[0;34m\"\"\"Creates a constant on the current device.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 301\u001b[0;31m \u001b[0mt\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconvert_to_eager_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 302\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshape\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 303\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/constant_op.py\u001b[0m in \u001b[0;36mconvert_to_eager_tensor\u001b[0;34m(value, ctx, dtype)\u001b[0m\n\u001b[1;32m 106\u001b[0m \u001b[0mdtype\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdtypes\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_dtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_datatype_enum\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 107\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mensure_initialized\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 108\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mEagerTensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 109\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 110\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mKeyboardInterrupt\u001b[0m: " - ] - } - ] - }, - { - "cell_type": "code", - "source": [], - "metadata": { - "id": "E4IAQjyoxi_V" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "%pwd\n", - "%cd /content/keras_repo/\n", - "!pip install -e ." - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 637 - }, - "id": "ggNuv7Gpis0U", - "outputId": "fea8224f-2eb5-40cd-beb5-c50fdaf658ee" - }, - "execution_count": 2, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "/content/keras_repo\n", - "Obtaining file:///content/keras_repo\n", - " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", - " Checking if build backend supports build_editable ... \u001b[?25l\u001b[?25hdone\n", - " Getting requirements to build editable ... \u001b[?25l\u001b[?25hdone\n", - " Preparing editable metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", - "Requirement already satisfied: absl-py in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (1.4.0)\n", - "Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (2.0.2)\n", - "Requirement already satisfied: rich in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (13.9.4)\n", - "Requirement already satisfied: namex in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.1.0)\n", - "Requirement already satisfied: h5py in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (3.14.0)\n", - "Requirement already satisfied: optree in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.17.0)\n", - "Requirement already satisfied: ml-dtypes in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (0.5.3)\n", - "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from keras==3.11.0) (25.0)\n", - "Requirement already satisfied: typing-extensions>=4.6.0 in /usr/local/lib/python3.11/dist-packages (from optree->keras==3.11.0) (4.14.1)\n", - "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.11/dist-packages (from rich->keras==3.11.0) (4.0.0)\n", - "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.11/dist-packages (from rich->keras==3.11.0) (2.19.2)\n", - "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.11/dist-packages (from markdown-it-py>=2.2.0->rich->keras==3.11.0) (0.1.2)\n", - "Building wheels for collected packages: keras\n", - " Building editable for keras (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for keras: filename=keras-3.11.0-0.editable-py3-none-any.whl size=9410 sha256=14a9f79fa98ab9a13b5339f11ce408f6e2badd5a481b5d1057488251bd657e3e\n", - " Stored in directory: /tmp/pip-ephem-wheel-cache-tniy7f4x/wheels/09/7a/d4/6dbe98c57884e68eba731115af18ec3a7f493640582bacb80f\n", - "Successfully built keras\n", - "Installing collected packages: keras\n", - " Attempting uninstall: keras\n", - " Found existing installation: keras 3.11.0\n", - " Uninstalling keras-3.11.0:\n", - " Successfully uninstalled keras-3.11.0\n", - "Successfully installed keras-3.11.0\n" - ] - }, - { - "output_type": "display_data", - "data": { - "application/vnd.colab-display-data+json": { - "pip_warning": { - "packages": [ - "keras" - ] - }, - "id": "9253b25bc602444b8c1ab91268b664d8" - } - }, - "metadata": {} - } - ] - }, - { - "cell_type": "code", - "source": [ - "from keras.datasets import cifar100\n", - "from keras.utils import to_categorical\n", - "from keras.models import load_model\n", - "import pandas as pd\n", - "from keras.pruning import LnPruning\n", - "\n", - "# Load the CIFAR-100 dataset\n", - "(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n", - "\n", - "# Normalize the image data\n", - "x_train = x_train.astype('float32') / 255.0\n", - "x_test = x_test.astype('float32') / 255.0\n", - "\n", - "# Convert labels to one-hot encoding\n", - "num_classes = 100\n", - "y_train = to_categorical(y_train, num_classes)\n", - "y_test = to_categorical(y_test, num_classes)\n", - "\n", - "print(\"CIFAR-100 dataset loaded and preprocessed again.\")\n", - "print(f\"Training data shape: {x_train.shape}\")\n", - "print(f\"Training labels shape: {y_train.shape}\")\n", - "print(f\"Testing data shape: {x_test.shape}\")\n", - "print(f\"Testing labels shape: {y_test.shape}\")\n", - "\n", - "# Define pruning ratios\n", - "pruning_ratios = [i * 0.2 for i in range(1, 4)] + [0.95, 0.97, 0.99]\n", - "pruning_ratios = [.3]\n", - "\n", - "# Define pruning methods\n", - "pruning_methods = [\"saliency\",\"taylor\"]\n", - "\n", - "# Initialize an empty list to store the results\n", - "pruning_results = []\n", - "\n", - "# Iterate through each pruning method\n", - "for pruning_method in pruning_methods:\n", - " print(f\"\\n--- Pruning Method: {pruning_method} ---\")\n", - " # Iterate through each pruning ratio\n", - " for ratio in pruning_ratios:\n", - " print(f\"Processing pruning ratio: {ratio}\")\n", - "\n", - " # Load the saved model\n", - " loaded_model = load_model(\"/content/drive/MyDrive/Projects/model-pruning/cifar100_resnet50_classifier.keras\")\n", - "\n", - " # Apply pruning\n", - " try:\n", - " if pruning_method in [\"saliency\", \"taylor\"]:\n", - " # Use a smaller subset for gradient-based methods to save time\n", - " dataset_subset = (x_train[:256], y_train[:256])\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method, dataset=dataset_subset, loss_fn=loaded_model.loss)\n", - " elif isinstance(pruning_method, str):\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method)\n", - " else:\n", - " # Assume it's a PruningMethod instance like LnPruning\n", - " stats = loaded_model.prune(sparsity=ratio, method=pruning_method, pruning_batch_size=32)\n", - "\n", - "\n", - " print(f\" Pruning successful. Final sparsity: {stats['final_sparsity']:.4f}\")\n", - "\n", - " # Evaluate the pruned model\n", - " loss, accuracy = loaded_model.evaluate(x_test, y_test, verbose=0)\n", - "\n", - " # Append the results\n", - " pruning_results.append({\n", - " 'Pruning Ratio': ratio,\n", - " 'Pruning Method': str(pruning_method), # Convert method object to string\n", - " 'Test Loss': loss,\n", - " 'Test Accuracy': accuracy\n", - " })\n", - " print(f\" Evaluation complete. Test Loss: {loss:.4f}, Test Accuracy: {accuracy:.4f}\")\n", - "\n", - " except ValueError as e:\n", - " print(f\" Skipping pruning for ratio {ratio} with method '{pruning_method}' due to error: {e}\")\n", - " # Optionally store a record indicating failure\n", - " pruning_results.append({\n", - " 'Pruning Ratio': ratio,\n", - " 'Pruning Method': str(pruning_method), # Convert method object to string\n", - " 'Test Loss': None,\n", - " 'Test Accuracy': None,\n", - " 'Error': str(e)\n", - " })\n", - "\n", - "\n", - "# Convert results to a pandas DataFrame for easy display and analysis\n", - "results_df = pd.DataFrame(pruning_results)\n", - "\n", - "# Display the results table\n", - "print(\"\\n--- Pruning Evaluation Results ---\")\n", - "display(results_df)" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "id": "jJnwc8i2xptV", - "outputId": "957c279e-5ffa-4587-dc4b-53dff728c01e" - }, - "execution_count": 1, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "CIFAR-100 dataset loaded and preprocessed again.\n", - "Training data shape: (50000, 32, 32, 3)\n", - "Training labels shape: (50000, 100)\n", - "Testing data shape: (10000, 32, 32, 3)\n", - "Testing labels shape: (10000, 100)\n", - "\n", - "--- Pruning Method: saliency ---\n", - "Processing pruning ratio: 0.3\n" - ] - }, - { - "output_type": "stream", - "name": "stderr", - "text": [ - "WARNING:tensorflow:5 out of the last 5 calls to .compute_gradients_cached at 0x7dd5a3b63b00> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n", - "WARNING:tensorflow:6 out of the last 6 calls to .compute_gradients_cached at 0x7dd5a3b63b00> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" - ] - }, - { - "output_type": "error", - "ename": "KeyboardInterrupt", - "evalue": "", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipython-input-1493771993.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;31m# Use a smaller subset for gradient-based methods to save time\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0mdataset_subset\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mx_train\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m256\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;36m256\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdataset_subset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0mstats\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloaded_model\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprune\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mratio\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpruning_method\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/model.py\u001b[0m in \u001b[0;36mprune\u001b[0;34m(self, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 529\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 530\u001b[0m \u001b[0;31m# Use direct parameter approach\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 531\u001b[0;31m stats = apply_pruning_to_model(\n\u001b[0m\u001b[1;32m 532\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 533\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_model\u001b[0;34m(model, sparsity, method, layers_to_prune, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 477\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlist_of_sublayers\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 478\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshould_prune_layer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayers_to_prune\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 479\u001b[0;31m if apply_pruning_to_layer(\n\u001b[0m\u001b[1;32m 480\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 481\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mapply_pruning_to_layer\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, reinitialize, **kwargs)\u001b[0m\n\u001b[1;32m 346\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0;31m# Use the new get_pruning_mask function for consistency\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 348\u001b[0;31m mask = get_pruning_mask(\n\u001b[0m\u001b[1;32m 349\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlayer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 350\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/core.py\u001b[0m in \u001b[0;36mget_pruning_mask\u001b[0;34m(layer, sparsity, method, model, dataset, loss_fn, **kwargs)\u001b[0m\n\u001b[1;32m 248\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 249\u001b[0m \u001b[0;31m# Compute mask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 250\u001b[0;31m \u001b[0mmask\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpruning_method\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msparsity\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmask_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 251\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 252\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mcompute_mask\u001b[0;34m(self, weights, sparsity_ratio, **kwargs)\u001b[0m\n\u001b[1;32m 1007\u001b[0m \u001b[0;31m# Compute saliency scores (pass validated loss_fn in kwargs)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1008\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'loss_fn'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1009\u001b[0;31m \u001b[0msaliency_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcalculate_scores\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights_tensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1010\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1011\u001b[0m \u001b[0mflat_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msaliency_scores\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mcalculate_scores\u001b[0;34m(self, weights, **kwargs)\u001b[0m\n\u001b[1;32m 1027\u001b[0m \"\"\"\n\u001b[1;32m 1028\u001b[0m \u001b[0;31m# Use efficient gradient computation (matches model.fit performance)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1029\u001b[0;31m \u001b[0mgradients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_gradients\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1030\u001b[0m \u001b[0;31m# JAXPruner's simple and clean saliency calculation\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1031\u001b[0m \u001b[0msaliency_scores\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mabs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mweights\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mgradients\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_gradients\u001b[0;34m(self, weights, **kwargs)\u001b[0m\n\u001b[1;32m 294\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 295\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mbackend_name\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"tensorflow\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 296\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_gradients_tf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 297\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mbackend_name\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"jax\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 298\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compute_gradients_jax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtarget_weight_var\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36m_compute_gradients_tf\u001b[0;34m(self, variable, model, loss_fn, dataset)\u001b[0m\n\u001b[1;32m 337\u001b[0m \u001b[0;31m# Use cached tf.function following standard Keras pattern\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 338\u001b[0m \u001b[0mgradient_fn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_get_tf_gradient_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 339\u001b[0;31m return gradient_fn(\n\u001b[0m\u001b[1;32m 340\u001b[0m \u001b[0mtrainable_variables\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 341\u001b[0m \u001b[0mtf_dataset\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 831\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 832\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mOptionalXlaContext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_jit_compile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 833\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 834\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 835\u001b[0m \u001b[0mnew_tracing_count\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperimental_get_tracing_count\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36m_call\u001b[0;34m(self, *args, **kwds)\u001b[0m\n\u001b[1;32m 876\u001b[0m \u001b[0;31m# In this case we have not created variables on the first call. So we can\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 877\u001b[0m \u001b[0;31m# run the first trace but we should fail if variables are created.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 878\u001b[0;31m results = tracing_compilation.call_function(\n\u001b[0m\u001b[1;32m 879\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_variable_creation_config\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 880\u001b[0m )\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36mcall_function\u001b[0;34m(args, kwargs, tracing_options)\u001b[0m\n\u001b[1;32m 130\u001b[0m \u001b[0margs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0margs\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0margs\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 131\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 132\u001b[0;31m function = trace_function(\n\u001b[0m\u001b[1;32m 133\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtracing_options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 134\u001b[0m )\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36mtrace_function\u001b[0;34m(args, kwargs, tracing_options)\u001b[0m\n\u001b[1;32m 176\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 177\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 178\u001b[0;31m concrete_function = _maybe_define_function(\n\u001b[0m\u001b[1;32m 179\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 180\u001b[0m )\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36m_maybe_define_function\u001b[0;34m(args, kwargs, tracing_options)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 282\u001b[0m \u001b[0mtarget_func_type\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlookup_func_type\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 283\u001b[0;31m concrete_function = _create_concrete_function(\n\u001b[0m\u001b[1;32m 284\u001b[0m \u001b[0mtarget_func_type\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlookup_func_context\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfunc_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 285\u001b[0m )\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36m_create_concrete_function\u001b[0;34m(function_type, type_context, func_graph, tracing_options)\u001b[0m\n\u001b[1;32m 308\u001b[0m \u001b[0mattributes_lib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDISABLE_ACD\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 309\u001b[0m )\n\u001b[0;32m--> 310\u001b[0;31m traced_func_graph = func_graph_module.func_graph_from_py_func(\n\u001b[0m\u001b[1;32m 311\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 312\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython_function\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/func_graph.py\u001b[0m in \u001b[0;36mfunc_graph_from_py_func\u001b[0;34m(name, python_func, args, kwargs, signature, func_graph, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, create_placeholders)\u001b[0m\n\u001b[1;32m 1058\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1059\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moriginal_func\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_decorator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0munwrap\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpython_func\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1060\u001b[0;31m \u001b[0mfunc_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpython_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mfunc_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mfunc_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1061\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1062\u001b[0m \u001b[0;31m# invariant: `func_outputs` contains only Tensors, CompositeTensors,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36mwrapped_fn\u001b[0;34m(*args, **kwds)\u001b[0m\n\u001b[1;32m 597\u001b[0m \u001b[0;31m# the function a weak reference to itself to avoid a reference cycle.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 598\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mOptionalXlaContext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcompile_with_xla\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 599\u001b[0;31m \u001b[0mout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mweak_wrapped_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__wrapped__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 600\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 601\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/autograph_util.py\u001b[0m in \u001b[0;36mautograph_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0;34m\"\"\"Calls a converted version of original_func.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m return api.converted_call(\n\u001b[0m\u001b[1;32m 42\u001b[0m \u001b[0moriginal_func\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36mconverted_call\u001b[0;34m(f, args, kwargs, caller_fn_scope, options)\u001b[0m\n\u001b[1;32m 437\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 438\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 439\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconverted_f\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0meffective_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 440\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 441\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconverted_f\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0meffective_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mtf__compute_gradients_cached\u001b[0;34m(trainable_variables, tf_dataset, model, loss_fn, target_var_index)\u001b[0m\n\u001b[1;32m 64\u001b[0m \u001b[0mgrad\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mUndefined\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'grad'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0mbatch_gradients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mUndefined\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'batch_gradients'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 66\u001b[0;31m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfor_stmt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf_dataset\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloop_body_1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mget_state_2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mset_state_2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m'total_samples'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m'iterate_names'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'(batch_x, batch_y)'\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 67\u001b[0m \u001b[0maveraged_gradients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mif_exp\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtotal_samples\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mlambda\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtotal_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mlambda\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'total_samples > 0'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mg\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0maccumulated_gradients\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 68\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/operators/control_flow.py\u001b[0m in \u001b[0;36mfor_stmt\u001b[0;34m(iter_, extra_test, body, get_state, set_state, symbol_names, opts)\u001b[0m\n\u001b[1;32m 447\u001b[0m \u001b[0mfor_fn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_tf_distributed_iterable_for_stmt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 448\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 449\u001b[0;31m \u001b[0mfor_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miter_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mextra_test\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbody\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mget_state\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mset_state\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msymbol_names\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mopts\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 450\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 451\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/dataset_autograph.py\u001b[0m in \u001b[0;36m_tf_ag_dataset_for_stmt\u001b[0;34m(ds, extra_test, body, get_state, set_state, symbol_names, opts)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mnew_reduce_state\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 118\u001b[0;31m \u001b[0mds\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_general_purpose_scan\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minit_vars\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mscan_body\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 119\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mextra_test\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0mds\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mds\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtake_while_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtake_while\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtake_while_predicate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/dataset_autograph.py\u001b[0m in \u001b[0;36m_general_purpose_scan\u001b[0;34m(ds, init_state, body)\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;31m# pylint: disable=g-import-not-at-top,protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mops\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mscan_op\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 45\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mscan_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_ScanDataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minit_state\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbody\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0muse_default_device\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 46\u001b[0m \u001b[0;31m# pylint: enable=g-import-not-at-top,protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 47\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/scan_op.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, input_dataset, initial_state, scan_func, use_default_device, name)\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0mneed_to_rerun\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 56\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 57\u001b[0;31m wrapped_func = structured_function.StructuredFunctionWrapper(\n\u001b[0m\u001b[1;32m 58\u001b[0m \u001b[0mscan_func\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 59\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_transformation_name\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/structured_function.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, func, transformation_name, dataset, input_classes, input_shapes, input_types, input_structure, add_to_graph, use_legacy_function, defun_kwargs)\u001b[0m\n\u001b[1;32m 263\u001b[0m \u001b[0mfn_factory\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrace_tf_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdefun_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 264\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 265\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_function\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfn_factory\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 266\u001b[0m \u001b[0;31m# There is no graph to add in eager mode.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 267\u001b[0m \u001b[0madd_to_graph\u001b[0m \u001b[0;34m&=\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecuting_eagerly\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36mget_concrete_function\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1254\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mget_concrete_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1255\u001b[0m \u001b[0;31m# Implements PolymorphicFunction.get_concrete_function.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1256\u001b[0;31m \u001b[0mconcrete\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_concrete_function_garbage_collected\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1257\u001b[0m \u001b[0mconcrete\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_garbage_collector\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrelease\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1258\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mconcrete\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36m_get_concrete_function_garbage_collected\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1224\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_variable_creation_config\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1225\u001b[0m \u001b[0minitializers\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1226\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_initialize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0madd_initializers_to\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minitializers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1227\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_initialize_uninitialized_variables\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minitializers\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1228\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36m_initialize\u001b[0;34m(self, args, kwds, add_initializers_to)\u001b[0m\n\u001b[1;32m 694\u001b[0m )\n\u001b[1;32m 695\u001b[0m \u001b[0;31m# Force the definition of the function for these arguments\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 696\u001b[0;31m self._concrete_variable_creation_fn = tracing_compilation.trace_function(\n\u001b[0m\u001b[1;32m 697\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_variable_creation_config\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 698\u001b[0m )\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36mtrace_function\u001b[0;34m(args, kwargs, tracing_options)\u001b[0m\n\u001b[1;32m 176\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 177\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 178\u001b[0;31m concrete_function = _maybe_define_function(\n\u001b[0m\u001b[1;32m 179\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 180\u001b[0m )\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36m_maybe_define_function\u001b[0;34m(args, kwargs, tracing_options)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 282\u001b[0m \u001b[0mtarget_func_type\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlookup_func_type\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 283\u001b[0;31m concrete_function = _create_concrete_function(\n\u001b[0m\u001b[1;32m 284\u001b[0m \u001b[0mtarget_func_type\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlookup_func_context\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfunc_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 285\u001b[0m )\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/tracing_compilation.py\u001b[0m in \u001b[0;36m_create_concrete_function\u001b[0;34m(function_type, type_context, func_graph, tracing_options)\u001b[0m\n\u001b[1;32m 308\u001b[0m \u001b[0mattributes_lib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDISABLE_ACD\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 309\u001b[0m )\n\u001b[0;32m--> 310\u001b[0;31m traced_func_graph = func_graph_module.func_graph_from_py_func(\n\u001b[0m\u001b[1;32m 311\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 312\u001b[0m \u001b[0mtracing_options\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpython_function\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/func_graph.py\u001b[0m in \u001b[0;36mfunc_graph_from_py_func\u001b[0;34m(name, python_func, args, kwargs, signature, func_graph, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, create_placeholders)\u001b[0m\n\u001b[1;32m 1058\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1059\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moriginal_func\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_decorator\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0munwrap\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpython_func\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1060\u001b[0;31m \u001b[0mfunc_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpython_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mfunc_args\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mfunc_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1061\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1062\u001b[0m \u001b[0;31m# invariant: `func_outputs` contains only Tensors, CompositeTensors,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/polymorphic_function/polymorphic_function.py\u001b[0m in \u001b[0;36mwrapped_fn\u001b[0;34m(*args, **kwds)\u001b[0m\n\u001b[1;32m 597\u001b[0m \u001b[0;31m# the function a weak reference to itself to avoid a reference cycle.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 598\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mOptionalXlaContext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcompile_with_xla\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 599\u001b[0;31m \u001b[0mout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mweak_wrapped_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__wrapped__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 600\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 601\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/structured_function.py\u001b[0m in \u001b[0;36mwrapped_fn\u001b[0;34m(*args)\u001b[0m\n\u001b[1;32m 229\u001b[0m \u001b[0;31m# Note: wrapper_helper will apply autograph based on context.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 230\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapped_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pylint: disable=missing-docstring\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 231\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mwrapper_helper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 232\u001b[0m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstructure\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_tensor_list\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_output_structure\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mret\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconvert_to_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mt\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mret\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/structured_function.py\u001b[0m in \u001b[0;36mwrapper_helper\u001b[0;34m(*args)\u001b[0m\n\u001b[1;32m 159\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0m_should_unpack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnested_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \u001b[0mnested_args\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mnested_args\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 161\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mautograph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtf_convert\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_func\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mag_ctx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mnested_args\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 162\u001b[0m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvariable_utils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconvert_variables_to_tensors\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mret\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0m_should_pack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mret\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 688\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 689\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mconversion_ctx\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 690\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconverted_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 691\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pylint:disable=broad-except\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 692\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'ag_error_metadata'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36mconverted_call\u001b[0;34m(f, args, kwargs, caller_fn_scope, options)\u001b[0m\n\u001b[1;32m 375\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 376\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muser_requested\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mconversion\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_allowlisted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 377\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_call_unconverted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 378\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 379\u001b[0m \u001b[0;31m# internal_convert_user_code is for example turned off when issuing a dynamic\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36m_call_unconverted\u001b[0;34m(f, args, kwargs, options, update_cache)\u001b[0m\n\u001b[1;32m 457\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 458\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 459\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 460\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 461\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/dataset_autograph.py\u001b[0m in \u001b[0;36mscan_body\u001b[0;34m(scan_state, scan_inputs)\u001b[0m\n\u001b[1;32m 102\u001b[0m \u001b[0;31m# TODO(mdan): the optimizer should be able to remove an invariant cond?\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[0mextra_cond\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mconstant_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconstant\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# dummy value, unused\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 104\u001b[0;31m \u001b[0mnew_loop_vars\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmain_path\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 106\u001b[0m \u001b[0mscan_outputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnew_loop_vars\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mextra_cond\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/data/ops/dataset_autograph.py\u001b[0m in \u001b[0;36mmain_path\u001b[0;34m()\u001b[0m\n\u001b[1;32m 84\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 85\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mmain_path\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 86\u001b[0;31m \u001b[0mbody\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0miterate\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 87\u001b[0m \u001b[0mnew_loop_vars\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_state\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 88\u001b[0m control_flow.verify_tf_loop_vars(\n", - "\u001b[0;32m/content/keras_repo/keras/src/pruning/pruning_method.py\u001b[0m in \u001b[0;36mloop_body_1\u001b[0;34m(itr_1)\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[0mbatch_x\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_y\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mitr_1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mGradientTape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtape\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 26\u001b[0;31m \u001b[0mpredictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconverted_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_x\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtraining\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfscope\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 27\u001b[0m \u001b[0mloss_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconverted_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_fn\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbatch_y\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpredictions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfscope\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconverted_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreduce_mean\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mag__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mld\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_val\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfscope\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36mconverted_call\u001b[0;34m(f, args, kwargs, caller_fn_scope, options)\u001b[0m\n\u001b[1;32m 329\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mconversion\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_in_allowlist_cache\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 330\u001b[0m \u001b[0mlogging\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlog\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'Allowlisted %s: from cache'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 331\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_call_unconverted\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 332\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 333\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mag_ctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontrol_status_ctx\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstatus\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mag_ctx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mStatus\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDISABLED\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/autograph/impl/api.py\u001b[0m in \u001b[0;36m_call_unconverted\u001b[0;34m(f, args, kwargs, options, update_cache)\u001b[0m\n\u001b[1;32m 457\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 458\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 459\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 460\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 461\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training, mask, **kwargs)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmask\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_keras_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m outputs = self._run_through_graph(\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m operation_fn=lambda op: operation_fn(\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/function.py\u001b[0m in \u001b[0;36m_run_through_graph\u001b[0;34m(self, inputs, operation_fn, call_fn)\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0moperation\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_operation_for_node\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0mop\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moperation_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moperation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# Update tensor_dict.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 642\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 643\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 644\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moperation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 645\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 646\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcall\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs, training, mask, **kwargs)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmask\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_keras_mask\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m outputs = self._run_through_graph(\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m operation_fn=lambda op: operation_fn(\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/function.py\u001b[0m in \u001b[0;36m_run_through_graph\u001b[0;34m(self, inputs, operation_fn, call_fn)\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[0moperation\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_operation_for_node\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[0mop\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moperation_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moperation\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 206\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 207\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;31m# Update tensor_dict.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/models/functional.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 642\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 643\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 644\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moperation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 645\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 646\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcall\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/layers/layer.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 939\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 940\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 941\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 942\u001b[0m \u001b[0;31m# Change the layout for the layer output if needed.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 943\u001b[0m \u001b[0;31m# This is useful for relayout intermediate tensor in the model\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/operation.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 57\u001b[0m \u001b[0mobject_name\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"{self.__class__.__name__}.call()\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 58\u001b[0m )\n\u001b[0;32m---> 59\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcall_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 60\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Plain flow.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/utils/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 154\u001b[0m \u001b[0mbound_signature\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 156\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 157\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 158\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"_keras_call_info_injected\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/layers/convolutional/base_conv.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 257\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 258\u001b[0m \u001b[0mbias_shape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfilters\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrank\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 259\u001b[0;31m \u001b[0mbias\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbias_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 260\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbias\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 261\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/ops/numpy.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(x, newshape)\u001b[0m\n\u001b[1;32m 5365\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0many_symbolic_tensors\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5366\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mReshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msymbolic_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 5367\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mbackend\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 5368\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5369\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/backend/tensorflow/numpy.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(x, newshape)\u001b[0m\n\u001b[1;32m 2168\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput_shape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2169\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2170\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnewshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2171\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2172\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/weak_tensor_ops.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrapper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 87\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_auto_dtype_conversion_enabled\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 88\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 89\u001b[0m \u001b[0mbound_arguments\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msignature\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 90\u001b[0m \u001b[0mbound_arguments\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mapply_defaults\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/dispatch.py\u001b[0m in \u001b[0;36mop_dispatch_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 1258\u001b[0m \u001b[0;31m# Fallback dispatch system (dispatch v1):\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1259\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1260\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdispatch_target\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1261\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mTypeError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m \u001b[0;31m# Note: convert_to_eager_tensor currently raises a ValueError, not a\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0mA\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mTensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m \u001b[0mHas\u001b[0m \u001b[0mthe\u001b[0m \u001b[0msame\u001b[0m \u001b[0mtype\u001b[0m \u001b[0;32mas\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;31m`\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 198\u001b[0m \"\"\"\n\u001b[0;32m--> 199\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgen_array_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 200\u001b[0m \u001b[0mshape_util\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmaybe_set_static_shape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/gen_array_ops.py\u001b[0m in \u001b[0;36mreshape\u001b[0;34m(tensor, shape, name)\u001b[0m\n\u001b[1;32m 8798\u001b[0m \u001b[0;32mpass\u001b[0m \u001b[0;31m# Add nodes to the TensorFlow graph.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8799\u001b[0m \u001b[0;31m# Add nodes to the TensorFlow graph.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 8800\u001b[0;31m _, _, _op, _outputs = _op_def_library._apply_op_helper(\n\u001b[0m\u001b[1;32m 8801\u001b[0m \"Reshape\", tensor=tensor, shape=shape, name=name)\n\u001b[1;32m 8802\u001b[0m \u001b[0m_result\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_outputs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/op_def_library.py\u001b[0m in \u001b[0;36m_apply_op_helper\u001b[0;34m(op_type_name, name, **keywords)\u001b[0m\n\u001b[1;32m 776\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_default\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname_scope\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mscope\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 777\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfallback\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 778\u001b[0;31m _ExtractInputsAndAttrs(op_type_name, op_def, allowed_list_attr_map,\n\u001b[0m\u001b[1;32m 779\u001b[0m \u001b[0mkeywords\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdefault_type_attr_map\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mattrs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 780\u001b[0m input_types)\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/op_def_library.py\u001b[0m in \u001b[0;36m_ExtractInputsAndAttrs\u001b[0;34m(op_type_name, op_def, allowed_list_attr_map, keywords, default_type_attr_map, attrs, inputs, input_types)\u001b[0m\n\u001b[1;32m 549\u001b[0m preferred_dtype=default_dtype)\n\u001b[1;32m 550\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 551\u001b[0;31m values = ops.convert_to_tensor(\n\u001b[0m\u001b[1;32m 552\u001b[0m \u001b[0mvalues\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 553\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minput_arg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/profiler/trace.py\u001b[0m in \u001b[0;36mwrapped\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 181\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mTrace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrace_name\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mtrace_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 182\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 183\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 184\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 185\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mwrapped\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/ops.py\u001b[0m in \u001b[0;36mconvert_to_tensor\u001b[0;34m(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)\u001b[0m\n\u001b[1;32m 734\u001b[0m \u001b[0;31m# TODO(b/142518781): Fix all call-sites and remove redundant arg\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 735\u001b[0m \u001b[0mpreferred_dtype\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpreferred_dtype\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mdtype_hint\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 736\u001b[0;31m return tensor_conversion_registry.convert(\n\u001b[0m\u001b[1;32m 737\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpreferred_dtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maccepted_result_types\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 738\u001b[0m )\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor_conversion_registry.py\u001b[0m in \u001b[0;36mconvert\u001b[0;34m(value, dtype, name, as_ref, preferred_dtype, accepted_result_types)\u001b[0m\n\u001b[1;32m 207\u001b[0m \u001b[0moverload\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgetattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"__tf_tensor__\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 208\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0moverload\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 209\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0moverload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# pylint: disable=not-callable\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 210\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 211\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mbase_type\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconversion_func\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/content/keras_repo/keras/src/backend/tensorflow/core.py\u001b[0m in \u001b[0;36m__tf_tensor__\u001b[0;34m(self, dtype, name)\u001b[0m\n\u001b[1;32m 82\u001b[0m \u001b[0;31m# Overload native accessor.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 83\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__tf_tensor__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 84\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconvert_to_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 85\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0;31m# Methods below are for SavedModel support\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/traceback_utils.py\u001b[0m in \u001b[0;36merror_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 148\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 150\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 151\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 152\u001b[0m \u001b[0mfiltered_tb\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_process_traceback_frames\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__traceback__\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/util/dispatch.py\u001b[0m in \u001b[0;36mop_dispatch_handler\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 1258\u001b[0m \u001b[0;31m# Fallback dispatch system (dispatch v1):\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1259\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1260\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mdispatch_target\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1261\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mTypeError\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1262\u001b[0m \u001b[0;31m# Note: convert_to_eager_tensor currently raises a ValueError, not a\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor_conversion.py\u001b[0m in \u001b[0;36mconvert_to_tensor_v2_with_dispatch\u001b[0;34m(value, dtype, dtype_hint, name)\u001b[0m\n\u001b[1;32m 159\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mIf\u001b[0m \u001b[0mthe\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;31m`\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0ma\u001b[0m \u001b[0mtensor\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mof\u001b[0m \u001b[0mgiven\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;31m`\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mgraph\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 160\u001b[0m \"\"\"\n\u001b[0;32m--> 161\u001b[0;31m return convert_to_tensor_v2(\n\u001b[0m\u001b[1;32m 162\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype_hint\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype_hint\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m )\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor_conversion.py\u001b[0m in \u001b[0;36mconvert_to_tensor_v2\u001b[0;34m(value, dtype, dtype_hint, name)\u001b[0m\n\u001b[1;32m 169\u001b[0m \u001b[0;34m\"\"\"Converts the given `value` to a `Tensor`.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 170\u001b[0m \u001b[0;31m# preferred_dtype = preferred_dtype or dtype_hint\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 171\u001b[0;31m return tensor_conversion_registry.convert(\n\u001b[0m\u001b[1;32m 172\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpreferred_dtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype_hint\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 173\u001b[0m )\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor_conversion_registry.py\u001b[0m in \u001b[0;36mconvert\u001b[0;34m(value, dtype, name, as_ref, preferred_dtype, accepted_result_types)\u001b[0m\n\u001b[1;32m 232\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 233\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 234\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconversion_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mas_ref\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 235\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 236\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mret\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNotImplemented\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36m_dense_var_to_tensor\u001b[0;34m(var, dtype, name, as_ref)\u001b[0m\n\u001b[1;32m 2376\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2377\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_dense_var_to_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvar\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2378\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mvar\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dense_var_to_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mas_ref\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mas_ref\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# pylint: disable=protected-access\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2379\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2380\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36m_dense_var_to_tensor\u001b[0;34m(***failed resolving arguments***)\u001b[0m\n\u001b[1;32m 1622\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1623\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1624\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1625\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1626\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__iadd__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0munused_other\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36mvalue\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 656\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_cached_value\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 657\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolocate_with\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mignore_existing\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 658\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_read_variable_op\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 659\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 660\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_as_graph_element\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36m_read_variable_op\u001b[0;34m(self, no_copy)\u001b[0m\n\u001b[1;32m 841\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mread_and_set_handle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mno_copy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 842\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 843\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mread_and_set_handle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mno_copy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 844\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 845\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexecuting_eagerly\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/resource_variable_ops.py\u001b[0m in \u001b[0;36mread_and_set_handle\u001b[0;34m(no_copy)\u001b[0m\n\u001b[1;32m 831\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mno_copy\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mforward_compat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward_compatible\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2022\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m5\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 832\u001b[0m \u001b[0mgen_resource_variable_ops\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdisable_copy_on_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 833\u001b[0;31m result = gen_resource_variable_ops.read_variable_op(\n\u001b[0m\u001b[1;32m 834\u001b[0m self.handle, self._dtype)\n\u001b[1;32m 835\u001b[0m \u001b[0m_maybe_set_handle_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dtype\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/ops/gen_resource_variable_ops.py\u001b[0m in \u001b[0;36mread_variable_op\u001b[0;34m(resource, dtype, name)\u001b[0m\n\u001b[1;32m 552\u001b[0m \u001b[0m_attrs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;34m\"dtype\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_attr_type\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"dtype\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 553\u001b[0m \u001b[0m_inputs_flat\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_op\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 554\u001b[0;31m _execute.record_gradient(\n\u001b[0m\u001b[1;32m 555\u001b[0m \"ReadVariableOp\", _inputs_flat, _attrs, _result)\n\u001b[1;32m 556\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_result\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/eager/backprop.py\u001b[0m in \u001b[0;36mrecord_gradient\u001b[0;34m(op_name, inputs, attrs, outputs)\u001b[0m\n\u001b[1;32m 169\u001b[0m \u001b[0moutputs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mA\u001b[0m \u001b[0mlist\u001b[0m \u001b[0mof\u001b[0m \u001b[0mtensor\u001b[0m \u001b[0moutputs\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mthe\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 170\u001b[0m \"\"\"\n\u001b[0;32m--> 171\u001b[0;31m pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs,\n\u001b[0m\u001b[1;32m 172\u001b[0m ops.get_name_scope())\n\u001b[1;32m 173\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.11/dist-packages/tensorflow/python/framework/tensor.py\u001b[0m in \u001b[0;36m_shape_tuple\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 343\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 344\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 345\u001b[0;31m \u001b[0;32mdef\u001b[0m \u001b[0m_shape_tuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 346\u001b[0m \u001b[0mshape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_shape_as_list\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 347\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mshape\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mKeyboardInterrupt\u001b[0m: " - ] - } - ] - }, - { - "source": [ - "from google.colab import sheets\n", - "sheet = sheets.InteractiveSheet(df=results_df)" - ], - "cell_type": "code", - "execution_count": 2, - "outputs": [ - { - "output_type": "error", - "ename": "NameError", - "evalue": "name 'results_df' is not defined", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipython-input-3464966735.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mgoogle\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolab\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0msheets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0msheet\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msheets\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mInteractiveSheet\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mresults_df\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;31mNameError\u001b[0m: name 'results_df' is not defined" - ] - } - ], - "metadata": { - "cellView": "form", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 159 - }, - "id": "zgzIjLwxze6j", - "outputId": "458ec0b1-c6ee-4910-8b09-bb87ab684336" - } - }, - { - "cell_type": "code", - "source": [], - "metadata": { - "id": "TgcQ_o3Kwq0G" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "c3f7a9af" - }, - "source": [ - "## Visualize results with a line plot\n", - "\n", - "### Subtask:\n", - "Create a line plot showing the accuracy and loss for each pruning ratio and method." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "c0605653" - }, - "source": [ - "**Reasoning**:\n", - "Visualize the pruning results using a line plot to compare the performance of different pruning methods across various sparsity levels. Plotting both accuracy and loss will provide a comprehensive view of how pruning affects the model." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 389 - }, - "id": "c1c68d7f", - "outputId": "c3e73461-402c-4511-d1c4-421cd27b3ae5" - }, - "source": [ - "import matplotlib.pyplot as plt\n", - "import seaborn as sns\n", - "\n", - "# Set the style for the plots\n", - "sns.set_style(\"whitegrid\")\n", - "\n", - "# Create a figure and a set of subplots\n", - "fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(18, 6))\n", - "\n", - "# Plot Test Accuracy\n", - "sns.lineplot(data=results_df, x='Pruning Ratio', y='Test Accuracy', hue='Pruning Method', marker='o', ax=axes[0])\n", - "axes[0].set_title('Test Accuracy vs. Pruning Ratio')\n", - "axes[0].set_xlabel('Pruning Ratio')\n", - "axes[0].set_ylabel('Test Accuracy')\n", - "axes[0].set_ylim(0, 1) # Set y-axis limit for accuracy between 0 and 1\n", - "axes[0].legend(title='Pruning Method')\n", - "\n", - "# Plot Test Loss\n", - "sns.lineplot(data=results_df, x='Pruning Ratio', y='Test Loss', hue='Pruning Method', marker='o', ax=axes[1])\n", - "axes[1].set_title('Test Loss vs. Pruning Ratio')\n", - "axes[1].set_xlabel('Pruning Ratio')\n", - "axes[1].set_ylabel('Test Loss')\n", - "# axes[1].set_ylim(0, 5) # Optional: set a reasonable y-limit for loss\n", - "axes[1].legend(title='Pruning Method')\n", - "\n", - "# Adjust layout to prevent overlapping titles and labels\n", - "plt.tight_layout()\n", - "\n", - "# Display the plots\n", - "plt.show()" - ], - "execution_count": null, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "
" - ], - "image/png": "iVBORw0KGgoAAAANSUhEUgAABv4AAAJOCAYAAAB/dnBOAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzs3XVcFOkfB/DP0iUmdqO7GKQoiiiKnh2oZwsGnt16it2HeQYYnIF9JmC3P+uMszvOAhMVg4Zld35/cDvHuouCAivyeb9e+4Kd/M7ss7PfmWfmeSSCIAggIiIiIiIiIiIiIiIiohxNT9cBEBEREREREREREREREdG3Y8UfERERERERERERERER0Q+AFX9EREREREREREREREREPwBW/BERERERERERERERERH9AFjxR0RERERERERERERERPQDYMUfERERERERERERERER0Q+AFX9EREREREREREREREREPwBW/BERERERERERERERERH9AFjxR0RERERERERERERERPQDYMUfERERfbXz589DJpPh/Pnzug4ly/n7+0Mmk+k6DCIiIiIionTJTecwXl5e8PLy0nUYRETfBQNdB0BE9CNKb2K9bt06uLi4fNO64uPjsXLlStSoUSPDyzpx4gT69OkDKysrnDx5Enp6vB/ke5a6XEkkEhQqVAhSqRR9+/b95nKUEwUHB2Ps2LHie319fRQsWBC1a9fG8OHDUaRIkQwv81u+T0RERERE3+p7Ppc8f/48vL29sWjRIjRp0uSb1v2jUe0bFQMDAxQtWhTVqlXD4MGDUapUKR1Gpxu+vr4ICQkR3xsaGqJEiRJo1qwZ+vXrB2Nj4wwv88GDB9i/fz/atGmDkiVLZma4REQ/FFb8ERFlgTlz5qi937lzJ/766y+N4dbW1t+8rvj4eAQEBGDQoEEZPvHbtWsXSpQogefPn+PcuXNwdXX95ngoa9WuXRutW7eGIAh49uwZ/vzzT3Tv3h2BgYFwd3fP9niqV6+O69evw9DQMNvXrTJkyBCULFkSSUlJuHr1KkJCQnDp0iXs2bMnwyeTn/s+9e/fH3369MnM0ImIiIiI1OSUc0nSzsvLC7a2tkhOTsbt27exZcsWnDhxArt27fqqGxO/la7PYYyMjDBjxgwAQExMDI4ePYqlS5ciPDwc8+fPz/DyHjx4gICAANSoUUOj4m/VqlWZEjMR0Y+AFX9ERFmgdevWau+vXbuGv/76S2O4LsXFxeHYsWMYMWIEgoODsXv37u+24i8uLg5mZma6DuO7ULZsWbVy9NNPP6FVq1ZYt25dmhV/iYmJMDQ0zJInOvX09L7qTs3MVLduXdja2gIA2rdvj/z582PFihU4evQomjVrlmnrMTAwgIEBUyciIiIiyjo54VyS0ubs7Cw+DdmuXTuULVsWM2bMQGhoKPr27at1nqw839X1OYyBgYFa2e3SpQs6deqEvXv3YuzYsShUqFCmrcvIyCjTlkVElNOxTTciIh1RKpVYs2YNmjdvDltbW7i6umLSpEn4+PGj2nQ3btyAj48PXFxcYGdnBw8PD7F5w2fPnqFWrVoAgICAAMhkMshkMvj7+39x/YcPH0ZCQgKaNGmCZs2a4dChQ0hMTNSYLjExEf7+/mjcuDFsbW3h5uaGQYMGITw8XG1b1q5di5YtW8LW1hY1a9aEj48Pbty4IcYpk8kQHByssfxP41X1QfDgwQOMHDkS1atXR5cuXQAAd+/eha+vLxo0aABbW1vUrl0bY8eOxfv37zWWGxERgXHjxsHNzQ1Vq1aFh4cHJk+ejKSkJDx9+hQymQxr1qzRmO/y5cuQyWTYs2eP1v329u1bVK5cGQEBARrjHj16BJlMhg0bNgAA5HI5AgIC0KhRI9ja2sLFxQWdO3fGX3/9pXXZX0MmkyF//vx49uwZgP/63Nu7dy8WLFiAOnXqwN7eHjExMWn27xAcHAyZTCYuAwA8PDzQt29fXLx4ET///DNsbW3RoEEDhIaGqs2rrY8/Ly8vtGjRAg8ePICXlxfs7e1Rp04drFixQmPdz58/R79+/eDg4IBatWrht99+w6lTp76p30BnZ2cAwNOnT8VhSUlJWLRoEdq2bYtq1arBwcEBXbp0wblz58RpvvR90rb/kpOTsWTJEjRs2FAsZ7///juSkpK+KnYiIiIioi/R9bnklzx9+hRDhgxBjRo1YG9vjw4dOuD48eMa061fvx7NmzeHvb09qlevjrZt22L37t3i+JiYGMycORMeHh6oWrUqatWqhZ49e+LWrVtprvvAgQOQyWT4+++/NcZt3rwZMpkM9+/fBwC8efMGY8eORd26dVG1alW4ubmhf//+audF36pmzZoAIC7zc+e7afVR5+vrCw8PD/G96vx61apV2LJli3gu0q5dO1y/fl1tXm3nMDKZDNOmTcORI0fQokULVK1aFc2bN8fJkyc11n3+/Hm0bdsWtra2aNiwITZv3vxN/QZKJBI4OTlBEAS187Xnz59jypQpaNy4Mezs7ODi4oIhQ4aofRbBwcEYOnQoAMDb21sss6rzRm37LzIyEuPGjYOrqytsbW3RqlUrteZHiYh+VLxtnYhIRyZNmoSQkBC0bdsWXl5eePbsGTZu3Ijbt2/jzz//hKGhISIjI+Hj44P8+fOjT58+sLS0xLNnz3D48GEAQIECBTBlyhRMmTIFP/30E3766ScA6esXYvfu3XBxcYGVlRWaN2+O+fPn49ixY2jatKk4jUKhQN++fXH27Fk0b94c3t7eiI2NxV9//YX79++jdOnSAIDx48cjODgYdevWxc8//wyFQoGLFy/i2rVr4pNYGTV06FCUKVMGw4cPhyAIAIAzZ87g6dOnaNu2LaysrPDPP/9g69atePDgAbZu3QqJRAIgpdLv559/RnR0NDp06IDy5csjIiICBw8eREJCAkqVKgUnJyfs2rULPXr00Ngv5ubmaNCggda4ChUqhOrVq2P//v0YNGiQ2rh9+/ZBX19fvMMzICAAgYGBaN++Pezs7BATE4ObN2/i1q1bqF279lftl099/PgRUVFRKFOmjNrwpUuXwtDQED4+PkhKSvqqpjjDwsIwdOhQ/Pzzz2jTpg127NgBX19fVKlSBRUrVvxiXL1798ZPP/2Epk2b4uDBg5g3bx6kUqn4ZGJcXBy6d++ON2/ewNvbG4UKFcKePXu+usJP5fnz5wAAS0tLcVhMTAy2bduGFi1aoH379oiNjcX27dvRu3dvbNu2DZUqVfqq79OECRMQEhKCxo0bo2fPnrh+/ToCAwPx8OFDLFmy5Ju2g4iIiIhIG12fS37O27dv0alTJ8THx8PLywv58+dHSEgI+vfvj8WLF4vr2bp1K2bMmIHGjRvD29sbiYmJuHfvHq5du4aWLVsCACZPnoyDBw+iW7dusLa2xocPH3Dp0iU8fPgQVapU0br+evXqwczMDPv370eNGjXUxu3btw8VK1aEVCoFAAwePBgPHjxAt27dUKJECbx79w5//fUXXr58mWn9x6lumM2XL5/acG3nuxm1Z88exMbGomPHjpBIJFi5ciUGDx6MI0eOfPH879KlSzh06BC6dOkCc3NzrF+/HkOGDMH//vc/5M+fHwBw+/Zt9O7dG1ZWVhg8eDCUSiWWLFmCAgUKfFW8KtrO127cuIErV66gefPmKFq0KJ4/f44///wT3t7e2Lt3L0xNTVG9enV4eXlh/fr16NevH8qXLw8g7WZvExIS4OXlhfDwcHTt2hUlS5bEgQMH4Ovri6ioKHTv3v2btoOI6LsmEBFRlps6daoglUrF9xcuXBCkUqmwa9cutelOnjypNvzw4cOCVCoVrl+/nuayIyMjBalUKixevDjd8bx9+1aoXLmysHXrVnFYx44dhf79+6tNt337dkEqlQpBQUEay1AqlYIgCMLZs2cFqVQqTJ8+Pc1pnj59KkilUmHHjh0a03wa++LFiwWpVCqMGDFCY9r4+HiNYXv27BGkUqlw4cIFcdjo0aMFGxsbrftNFdPmzZsFqVQqPHjwQByXlJQkuLi4CGPGjNGYLzXVvPfu3VMb3qxZM8Hb21t836pVK6FPnz6fXVZGSKVSYdy4cUJkZKQQGRkpXLt2TejevbsglUqF1atXC4IgCOfOnROkUqnQoEEDjf2l2ref2rFjhyCVSoWnT5+Kw+rXr6+xXyMjI4WqVasKs2bNEoep1nfu3DlxWLdu3QSpVCqEhISIwxITE4XatWsLgwcPFoetXr1akEqlwuHDh8VhCQkJQpMmTTSWqY0q7jNnzgiRkZHCy5cvhQMHDgg1a9YUqlatKrx8+VKcNjk5WUhMTFSb/+PHj4Krq6swduxYtW1M6/v06f67c+eOIJVKhfHjx6tNN2vWLEEqlQpnz579bPxERERERF/yPZ1LqnL//fv3pznNzJkzNc4jYmJiBA8PD6F+/fqCQqEQBEEQ+vfvLzRv3vyz66tWrZowderUdMWW2ogRI4RatWoJycnJ4rDXr18LNjY2QkBAgCAIKecCUqlUWLlyZYaXr41q32zfvl2IjIwUIiIihOPHjwv169cXZDKZ+Dl87ny3W7duQrdu3TSGjxkzRqhfv774XnV+XaNGDeHDhw/i8CNHjghSqVQ4duyYOEzbOaBUKhWqVKkihIWFicNU5zbr168Xh/Xt21ewt7cXXr16JQ578uSJULlyZa3nldridnBwEM9fw8LChFWrVgkymUxo0aKFeG4uCNrP9a9cuaJxXrl///40zxU/3X9r1qwRpFKpsHPnTnFYUlKS0LFjR8HBwUGIjo7+4jYQEeVUbOqTiEgHDhw4gDx58qB27dp49+6d+KpSpQrMzMzEJ57y5MkDADh+/DjkcnmmrX/v3r2QSCRo1KiROKxFixY4efKkWvMwhw4dQv78+dGtWzeNZaierjt06BAkEonG02+pp/kanTp10hhmYmIi/p+YmIh3797B3t4eAMTmXpRKJY4cOYL69etrfdpQFVPTpk1hbGys1pTM6dOn8f79e7Rq1eqzsf30008wMDDAvn37xGH379/HgwcP1PqUs7S0xD///IMnT56kY4vTZ/v27ahVqxZq1aqF9u3b4/Lly+jZs6fG3Yqenp5q++trVKhQQWw2E0i5K7hcuXJqTbKkxczMTK0vByMjI9ja2qrNe+rUKRQpUkTt6UpjY2N06NAhQ3H26NEDtWrVgru7O4YMGQJTU1MsW7YMRYsWFafR19cX+3xQKpX48OEDkpOTUbVqVdy+fTtD61M5ceIEAKBnz55qw3v16qU2noiIiIgos+j6XPJLTpw4ATs7O7XzCHNzc3Ts2BHPnz/HgwcPAKScK7169UqjacrULC0tce3aNURERGQohqZNmyIyMlKtuc+DBw9CqVSK52smJiYwNDTE33//rdFE6rcYN24catWqhTp16qBPnz6Ij4/HrFmzNM5NtZ3vZlSzZs2QN29e8b22Lg/S4urqKrbgAwA2NjawsLAQ51UoFDh79iwaNGiAIkWKiNOVKVMGderUSXeMcXFx4vnrTz/9hNmzZ8PJyQlLly5Vu16Q+txVLpfj/fv3KF26NCwtLb/6fO3kyZOwsrJCixYtxGGGhobw8vJCXFwcLly48FXLJSLKCdjUJxGRDoSFhSE6OlrsU+FTkZGRAIAaNWqgcePGCAgIwJo1a1CjRg00bNgQLVu2/KaOq3ft2gU7Ozt8+PABHz58AABUqlQJcrkcBw4cQMeOHQGkNEtSrly5z3YGHh4ejsKFC2s0XfKttDWt8uHDBwQEBGDfvn3iPlKJjo4GALx79w4xMTFfbIrS0tIS9evXx549ezBs2DAAKc18FilSROyHIS0FChRAzZo1sX//fnHeffv2wcDAQGy6BgCGDBmCAQMGoHHjxpBKpXBzc0Pr1q1hY2Pzpc1PU4MGDdCtWzdIJBKYm5ujQoUKWjuCz4ymaYoVK6YxLG/evOk6MS5atKhGxW/evHlx79498f3z589RunRpjelSn4Cmx6RJk1CuXDlER0djx44duHDhgtbvR0hICFavXo3Hjx+rXfz42n31/Plz6OnpacRrZWUFS0tLsQkbIiIiIqLMoutzyS958eKFeHNmaqpmGV+8eAGpVIpffvkFZ86cQfv27VGmTBnUrl0bLVq0QLVq1cR5Ro0aBV9fX9SrVw9VqlSBu7s7PD09UapUqc/GULduXeTJkwf79u0T99O+fftQqVIllCtXDkDKjYmjRo3C7NmzUbt2bdjb26NevXrw9PSElZXVV2//wIED4ezsDD09PeTPnx/W1tZaz6ez4nxNVQkYFRWV4XlV86vmjYyMREJCgkaXEgC0DkuLsbExli9fDgB49eoVVq5cicjISBgbG6tNl5CQgMDAQAQHByMiIkKt+VPVuX5GPX/+HGXKlIGenvpzL6qmQV+8ePFVyyUiyglY8UdEpANKpRIFCxbEvHnztI5XtZkvkUiwePFiXL16Ff/73/9w6tQpjBs3DkFBQdiyZQvMzc0zvO4nT57gxo0bAKD2xJ/K7t27xYq/zJLWk38KhSLNeT49EQCAYcOG4cqVK/Dx8UGlSpVgZmYGpVKJ3r17f1W/CJ6enjhw4AAuX74MqVSKY8eOoXPnzhonBto0b94cY8eOxZ07d1CpUiXs378fNWvWVOvvoHr16jh8+DCOHj2Kv/76C9u3b8fatWsxdepUtG/fPsPxAikVaq6url+cTtvTfhn9HPT19TMWXCbNm1F2dnbiHbQNGzZEly5dMHLkSBw4cED8juzcuRO+vr5o2LAhfHx8ULBgQejr6yMwMDBdd8R+zrc82UpERERElBG6PJfMTNbW1jhw4ACOHz+OU6dO4dChQ9i0aRMGDhyIIUOGAEh5os3Z2RmHDx/GX3/9hVWrVmHFihXw9/cX+w3XxsjICA0bNsThw4cxefJkREZG4vLlyxgxYoTadD169ICHhweOHDmC06dPY9GiRfjjjz+wdu1aVK5c+au2SyqVput8Tdv5bloyer6WnnPjb5k3I/T19dX2h5ubG5o2bYpJkyaJFYIAMH36dAQHB6N79+5wcHBAnjx5IJFIvqkPRCKi3IwVf0REOlC6dGmcPXsWTk5O6WqO0cHBAQ4ODhg+fDh2796NUaNGYd++fWjfvn2GKx12794NQ0NDzJkzR6OC69KlS1i/fj1evHiB4sWLo3Tp0rh27RrkcnmanYOXLl0ap0+fxocPH9J86i+tOw8zcofdx48fcfbsWQwePFitWdFPm9EsUKAALCws8M8//3xxmXXq1EGBAgWwe/du2NvbIz4+Xq15ys9p2LAhJk2aJDb3+eTJE/Tt21djunz58qFdu3Zo164dYmNj0a1bN/j7+391xd+3UHWeHhUVpdaRuq7udCxRogQePHgAQRDUynF4ePhXL1NfXx8jRoyAt7c3Nm7ciD59+gBIadqnVKlSCAgIUFvX4sWL1ebPyPepRIkSUCqVCAsLU+tQ/u3bt4iKikKJEiW+ejuIiIiIiLTR5blkehQvXhyPHz/WGP7o0SNxvIqZmRmaNWuGZs2aISkpCYMHD8by5cvRt29fsWKscOHC6Nq1K7p27YrIyEi0adMGy5cv/2zFH5DS3GdISAjOnj2Lhw8fQhAENG3aVGO60qVLo1evXujVqxeePHkCT09PrF69Os2K1ayUN29erTcl6uJ8rWDBgjA2NkZYWJjGOG3D0qtw4cLo0aMHAgICcPXqVTg4OABIOV/z9PSEr6+vOG1iYqLG034ZPV+7d+8elEql2rUPbWWRiOhHwz7+iIh0oGnTplAoFFi6dKnGuOTkZLGC7OPHjxp3t1WqVAkAkJSUBAAwNTUFkL7mPICUir9q1aqhWbNmaNKkidqrd+/eAIA9e/YASHki8P3799i4caPGclRxNWrUCIIgICAgIM1pLCwskD9/fly8eFFt/KZNm9IVM5D2HYlr165Ve6+np4eGDRvif//7n/hko7aYAMDAwADNmzfH/v37ERwcDKlUmu5mOC0tLeHm5ob9+/dj7969MDQ0RMOGDdWmef/+vdp7c3NzlC5dWvzsgJRmSx4+fPjVzZdkhKpJytR9GcTFxSE0NDTL162Nm5sbIiIicPToUXFYYmIitm7d+k3LdXFxgZ2dHdauXYvExEQA/5Wf1J//tWvXcPXqVbV5M/J9Ul1s+LQMBgUFqY0nIiIiIsosujyXTA93d3dcv34dV65cEYfFxcVh69atKFGiBCpUqABA81zJyMgI1tbWEAQBcrkcCoVC4xypYMGCKFy4sNr5VFpcXV2RL18+7Nu3D/v374ednZ1aE6Hx8fHiuYJK6dKlYW5urrb8169f4+HDh9nST2KpUqXw6NEjvHv3Thx29+5dXL58OcvX/SnVk3pHjx5V62MxLCwMp06d+qZld+vWDaampvjjjz/U1vep9evXazztqCqz6Tl/rlu3Lt68eSPerAukfEfWr18PMzMzVK9e/Ws3gYjou8cn/oiIdKBGjRro2LEjAgMDcefOHdSuXRuGhoZ48uQJDhw4gPHjx6NJkyYICQnBn3/+iYYNG6J06dKIjY3F1q1bYWFhgbp16wJIadKxQoUK2L9/P8qWLYt8+fKhYsWKkEqlGuu9du0awsLC0LVrV61xFSlSBJUrV8bu3bvRp08feHp6IjQ0FH5+frh+/TqqVauG+Ph4nD17Fp07d0bDhg1Rs2ZNtG7dGuvXr0dYWBjq1KkDpVKJS5cuwcXFBd26dQMAtG/fHn/88QfGjx+PqlWr4uLFi1rvBE2LhYUFqlevjpUrV0Iul6NIkSL466+/8OzZM41pR4wYgb/++gteXl7o0KEDrK2t8ebNGxw4cACbNm1Se9rN09MT69evx/nz5zFq1Kh0xwOkND3z66+/YtOmTXBzc1NbLpDSHGiNGjVQpUoV5MuXDzdu3MDBgwfFfQIAhw8fxtixY+Hn54e2bdtmaP0ZVbt2bRQvXhzjx4/Ho0ePoK+vjx07diB//vw6uYu0Y8eO2LBhA0aOHAlvb29YWVlh9+7d4t2933IHso+PD4YOHYrg4GB07twZ9erVw6FDhzBw4EDUq1cPz549w+bNm1GhQgXExcWJ82Xk+2RjY4M2bdpgy5YtiIqKQvXq1XHjxg2EhISI3w0iIiIiosykq3PJ1A4dOiQ+NZVamzZt0KdPH+zduxe//PILvLy8kDdvXoSGhuLZs2fw9/cXn7zy8fFBoUKF4OTkhIIFC+LRo0fYsGED3N3dYWFhgaioKLi7u6Nx48awsbGBmZkZzpw5gxs3bqg9FZYWQ0ND/PTTT9i7dy/i4+MxZswYtfFPnjxBjx490KRJE1SoUAH6+vo4cuQI3r59i+bNm4vT/f777wgJCcHRo0czpV++z/n555+xZs0a+Pj44Oeff0ZkZKR4zhIbG5ul69Zm0KBBOH36NDp37ozOnTtDqVRiw4YNqFixIu7cufPVy82fPz/atm2LTZs24eHDh7C2tka9evWwc+dOWFhYoEKFCrh69SrOnDmj0apQpUqVoK+vjxUrViA6OhpGRkaoWbMmChYsqLGejh07YsuWLfD19cWtW7dQokQJHDx4EJcvX8a4ceNgYWHx1dtARPS9Y8UfEZGOTJs2DVWrVsXmzZuxYMEC6Ovro0SJEmjVqhWcnJwApJzU3bhxA/v27cPbt2+RJ08e2NnZYd68eWp3K86YMQPTp0+Hn58f5HI5Bg0apPVkbffu3QAADw+PNOPy8PCAv78/7t69CxsbG6xYsQLLli3Dnj17cOjQIeTLlw9OTk6QyWTiPH5+fpDJZNi+fTvmzJmDPHnyoGrVqnB0dBSnGThwIN69e4eDBw9i//79qFu3LlauXJlmp/TazJ8/H9OnT8emTZsgCAJq166NFStWoE6dOmrTFSlSBFu3bsWiRYuwe/duxMTEoEiRIqhbt65GczhVq1ZFxYoV8fDhQ7Rq1Srdsaj2lYmJCWJjY9GsWTON8V5eXjh27Bj++usvJCUloXjx4hg2bBh8fHwytJ7MYmhoiICAAEydOhWLFi2ClZUVunfvDktLS4wdOzbb4zE3N8fatWsxY8YMrFu3DmZmZvD09ISjoyMGDx6coX4vPtWoUSOULl0aq1evRocOHdC2bVu8ffsWW7ZswenTp1GhQgXMnTsXBw4cwN9//602b3q/T6ppS5YsiZCQEBw5cgSFChVC37591ZqjJSIiIiLKTLo4l0xt7969WofXqFEDzs7O2Lx5M+bOnYsNGzYgMTERMpkMy5cvR7169cRpO3bsiN27dyMoKAhxcXEoWrQovLy8MGDAAAAplZKdO3fGX3/9hUOHDkEQBJQuXRqTJ09Gly5d0rWfmjVrhm3btkEikWg081m0aFE0b94cZ8+exa5du6Cvr4/y5ctj4cKFaNy4cbqWn9msra0xe/ZsLF68GH5+fqhQoQLmzJmDPXv2aJyzZIeqVatixYoVmDNnDhYtWoRixYphyJAhePTokdaK34zo2bMnNm/ejBUrVmDWrFkYP3489PT0sHv3biQmJsLJyQlBQUFiq0QqVlZWmDp1KgIDAzF+/HgoFAqsW7dOa8WfiYkJ1q9fj3nz5iEkJAQxMTEoV65cttx0S0SkaxKBPaQSEVEu5+npibx582o02Ui6sWbNGvj5+eHkyZMoUqSIrsMhIiIiIiKifw0YMAAPHjzAoUOHdB0KERGlgX38ERFRrnbjxg3cuXMHnp6eug4lV0pISFB7n5iYiC1btqBs2bKs9CMiIiIiItKhT8/Xnjx5gpMnT6JGjRo6ioiIiNKDTX0SEVGudP/+fdy6dQurV6+GlZWV1qY6KesNGjQIxYsXh42NDWJiYrBr1y48evQI8+bN03VoREREREREuVrDhg3Rpk0blCpVCs+fP8fmzZthaGio0QQnERF9X1jxR0REudLBgwexZMkSlCtXDr///vs39SdHX8/NzQ3bt2/H7t27oVAoUKFCBSxYsIAVsURERERERDpWp04d7N27F2/evIGRkREcHBwwYsQIlC1bVtehERHRZ+i0j78LFy5g1apVuHnzJt68eYMlS5agYcOGn53n/PnzmDVrFv755x8UK1YM/fv3Z4esRERERERERERERERElOvptI+/uLg4yGQyTJ48OV3TP336FH379oWLiwt27tyJ7t27Y8KECTh16lQWR0pERERERERERERERET0fdNpU5/u7u5wd3dP9/SbN29GyZIl4evrCwCwtrbGpUuXsGbNGtSpUyerwiQiIiIiIiIiIiIiIiL67uWoPv6uXr2KWrVqqQ1zc3PDb7/9lu5lKJVKJCcnQ09PDxKJJLNDJCIiIiIiyjaCIECpVMLAwAB6ejpt0OW7w3M/IiIiIiL6UWTk3C9HVfy9ffsWhQoVUhtWqFAhxMTEICEhASYmJl9cRnJyMm7cuJFVIRIREREREWU7W1tbGBkZ6TqM7wrP/YiIiIiI6EeTnnO/HFXxlxlUNaGVK1eGvr6+jqOh3EChUOD27dssc6QTLH+kKyx7pCsse6RLuih/qnXyaT9Nqn1ia2vL4wFlC4VCgRs3brDMUbZj2SNdYvkjXWHZI13SRflTrTM95345quKvUKFCePv2rdqwt2/fwsLCIl1P+wEQm3gxMjLiAYGyhUKhAMAyR7rB8ke6wrJHusKyR7qki/KnWiebstSk2if6+vo8HlC2YpkjXWHZI11i+SNdYdkjXdJF+UvPuV+Oui3UwcEB586dUxt25swZODg46CYgIiIiIiIiIiIiIiIiou+ETiv+YmNjcefOHdy5cwcA8OzZM9y5cwcvXrwAAMyfPx+jR48Wp+/UqROePn2KOXPm4OHDh9i4cSP279+PHj166CJ8IiIiIiIiIiIiIiIiou+GTpv6vHnzJry9vcX3fn5+AIA2bdpg1qxZePPmDV6+fCmOL1WqFAIDA+Hn54d169ahaNGimDFjBurUqZPtsRMRERERERERERERERF9T3Ra8efi4oJ79+6lOX7WrFla5wkNDc3CqIjoe6dQKCCXy3UdRo6g6vcnISGB7Z1TtmLZI11h2SNdyqryZ2RklK4O3OnrMLekzMLfINIVlj3SJZa/nMXQ0JCfE1EuoNOKPyKijBAEAa9evcKHDx90HUqOIQgCDAwMEBYWlq6OX4kyC8se6QrLHulSVpU/PT09lCtXDkZGRpm2TGJuSZmPv0GkKyx7pEssfzlPvnz5ULRoUX5eRD8wVvwRUY6hujBTuHBhmJmZMUFJB0EQEB8fD1NTU+4vylYse6QrLHukS1lR/pRKJV68eIGXL1+idOnSLNeZiLklZTb+BpGusOyRLrH85RyCICAuLg6vX78GABQrVkzHERFRVmHFHxHlCAqFQrwwU7BgQV2Hk2MIggClUgkTExMm4JStWPZIV1j2SJeyqvxZWVnhxYsXSE5OhqGhYaYtNzdjbklZgb9BpCsse6RLLH85i6mpKQDg9evXKFy4MJv9JPpBsaMIIsoRVP2umJmZ6TgSIiIiouylauJT1YcOfTvmlkRERJRbqfIf9nFM9ONixR8R5Si8e4yIiIhyG+Y/WYf7loiIiHIb5j9EPz5W/BERERERERERERERERH9AFjxR0RERD+U4OBgODs76zqMTOPv74/WrVtn+nLPnz8PmUyGqKioTF82ERER0Y+AeWX6MK8kIiL6vrDij4iIiDKdr68vnJycYGNjg6pVq+Knn35CQEAAkpOTs3zdzZo1w8GDB7N8PcHBwZDJZGjatKnGuP3790Mmk8HDwyNDy5TJZDhy5EhmhUhERESU4/n6+kImk0EmkzGvzADmlURERLkXK/6IiIgoS7i6uuLUqVM4ePAgevbsiYCAAKxatUrrtElJSZm2XhMTExQsWDDTlvc5ZmZmePfuHa5cuaI2fPv27ShevHi2xEBERET0o6tTpw5Onz7NvJKIiIgoHVjxR0RERFnCyMgIVlZWKFGiBLp06QJXV1ccO3YMQMqd2wMGDMCyZcvg5uaGJk2aANB+Z7KzszOCg4MBAM+ePYNMJsOhQ4fg5eUFe3t7tGrVSu0CyadNMqmaNAoNDYWHhweqVauG4cOHIyYmRpwmJiYGI0eOhIODA9zc3LBmzRp4eXlh5syZn91GfX19tGjRAjt27BCHvXr1Cn///TdatGihMf2RI0fQpk0b2NraokGDBmp3q6vu4h44cKDWu7o/F39SUhJmzJiBWrVqwdbWFp07d8b169fV5j9x4gQaN24MOzs7eHl54fnz55/dNiIiIqLvBfPKz+eVDRs2RGBgIPNKIiIiAsCKPyIiIsomxsbGkMvl4vuzZ8/i8ePHCAoKQmBgYIaWtWDBAvj4+CA0NBRly5bFyJEjP9vcU3h4OI4ePYrly5cjMDAQFy5cwIoVK8Txs2bNwpUrV7Bs2TKsXr0aFy9exK1bt9IVS7t27bB//37Ex8cDSLlAVKdOHY27wy9evIgxY8bA29sb+/btw7Rp0xAcHIzly5cDSLmbGwD8/Pxw+vRp8X164p8zZw4OHjyIWbNmISQkBGXKlEHv3r3x4cMHAMDLly8xaNAg1K9fH6GhoWjfvj3mz5+fru0jIiIi+t4wr1TPK6dOnYrdu3czryQiIiIArPgjIiKiLCYIAs6cOYPTp0/DxcVFHG5mZoYZM2agYsWKqFixYoaW2atXL9SrVw/lypXDkCFD8Pz5c4SFhX02Bj8/P0ilUjg7O6NVq1Y4e/YsgJS7skNDQzF69GjUqlULUqkUfn5+UCqV6YqlcuXKKFWqFA4ePAhBEBASEoJ27dppTBcQEIA+ffqgTZs2KFWqFGrXro2hQ4di8+bNAIACBQoAACwtLWFlZSW+/1L8cXFx2Lx5M0aPHg13d3dUqFAB06dPh7GxsXiR588//0Tp0qXh6+uL8uXLo1WrVmjTpk26to+IiIjoe8G8MoW2vLJ///7YsmULAOaVREREuZ2BrgMgIiKiH9OpU6fg5OQEuVwOQRDQokULDB48WBwvlUphZGT0VcuWyWTi/1ZWVgCAd+/ewdraWuv0JUqUgIWFhfi+cOHCiIyMBJDSzJNcLoednZ04Pk+ePChXrly642nXrh127NiBYsWKIT4+Hu7u7tiwYYPaNHfv3sXly5fFO7EBQKFQIDExEfHx8TA1NU1z+Z+LPzw8HHK5HE5OTuJ4Q0ND2NnZ4eHDhwCAhw8fqm0fADg4OKR7+4iIiIh06fjx43B0dGRe+S/mlURERGmTx0ZD38gEiqiP0LfMC0VSAgzN8+g6rGzFij8iIiLKEs7Ozpg2bRqMjIxQuHBhGBiopx3aLkhIJBIIgqA2TFtTS4aGhmrzAPjsndSfrhuAxnq+RcuWLTF37lwEBASgVatWWtcXFxeHwYMHo1GjRhrjjI2NP7v8rI6fiIiI6Hvm4uKCKVOmwNDQkHklNPNKQRDECj/mlURElJspExPxflUQ3m/YCGVUFPQsLZHfqxsK9ekDvS/8Rv5I2NQnERERZQlTU1OUKVMGxYsX13qBQZsCBQrg9evX4vsnT56IfZxklZIlS8LQ0BA3btwQh0VHR+PJkyfpXka+fPng4eGBv//+W2tzTEBK002PHz9GmTJlNF56eikpmaGhIRQKRYbiL126NAwNDXH58mVxmFwux40bN1ChQgUAgLW1tdr2AcC1a9cytB4iIiIiXWFeqU5bXlm6dGnmlURElKvJY6PxNjAQkUuXQRkVBQBQRkUhcslSvP3jD8hjo3UcYfbhE39ERET03ahZsyY2btwIR0dHKBQKzJs3T+0u7KxgYWEBT09PzJkzB3nz5kXBggXh7+8PiUQi3vWdHrNmzcLkyZORP39+reMHDhyIfv36oXjx4mjcuDH09PRw9+5d3L9/H8OHDweQ0vTS2bNn4eTkBCMjI+TNm/eL6zUzM0Pnzp3F+IsXL46VK1ciISEBP//8MwCgU6dOWL16NWbPno327dvj1q1bCAkJSfe2EREREeU0uSmvlEgkuH79OsLCwphXEhFRrqVvZIL3GzZqHfd+/QYU6tcvmyPSHT7xR0RERN+NMWPGoFixYujatStGjRqFXr16wcTEJMvX6+vrCwcHB/Tr1w89e/aEk5MTrK2tv9hUUmomJiZpXpwBgDp16mD58uU4ffo0fv75Z3To0AFr1qxBiRIlxGnGjBmDM2fOoF69emjTpk261z1q1Cg0btwYo0ePRps2bRAWFoaVK1eKF3iKFy8Of39/HD16FK1bt8bmzZvFi0JEREREP6LclFd27NgRGzduRPHixcVpmFcSEVFuo4j6KD7p9yllVBQU0drH/YgkQi5ryFuhUODq1atwcHCAvr6+rsOhXIBlLnMkJCTg8ePHKFeuXLacrP0oBEFAXFwczMzMMnSHKdG3yullLy4uDnXr1sWYMWPQvn17XYdDGZDTyx7lbFlV/j6XBzHXTNvn9g1zS8oK/A0ibbIjr2TZI11i+ct5fpQ8iHkwfUopl+Of2m5aK//0LC1R8a/T0Mukp/91Uf4ysk429UlERES53u3bt/Ho0SPY2dkhOjoaS5YsAQA0aNBAx5ERERERUU7CvJKIiEg3FEkJyO/VDZFLlmqMy+/VDYqkhEyr+PvesalPIiIiIgCrV69G69at0bNnT8THx2Pjxo0oUKCArsMiIspxLly4gH79+sHNzQ0ymQxHjhxJc9pJkyZBJpNhzZo1asM/fPiAkSNHwsnJCc7Ozhg3bhxiY2OzOHIioszBvJKIiCj7GZrnQaE+fVBwQH/oWVoCSHnSr+DAASjUpw8MzfPoOMLswyf+iIiIKNerXLkygoODdR0GEdEPIS4uDjKZDO3atcOgQYPSnO7w4cO4du0aChcurDFu1KhRePPmDYKCgiCXyzFu3DhMmjQJ8+fPz8rQiYi+GfNKIiIi3dEzNoZJzRqo2Ls3FB8+QN/KKuVJvwz0tfsjYMUfERERERERZRp3d3e4u7t/dpqIiAhMnz4dq1atQt++fdXGPXz4EKdOncL27dtha2sLAJgwYQL69OmD0aNHo0iRIlkWOxERERER5Wx/j+qJ0vL8iJUWh+Pa7bmmec/U2NQnERERERERZRulUolff/0VPj4+qFixosb4K1euwNLSUqz0AwBXV1fo6enh+vXr2RkqERERERHlMCYJgOL9e8TeuaXrUHSGT/wRERERERFRtlmxYgUMDAzg7e2tdfzbt281+sIyMDBA3rx58ebNmwyvT6FQaB0mCIL4IsoMqrLEMkXZjWWPdInlL+dR5T8KhUJrnpRTqGLPydtAWcM0IeWvYXLWlQ9dlL+MrIsVf0RERERERJQtbt68iXXr1iE4OBgSiSRb1nnjxg2tww0MDBAfHw+lUpktcVDuER8fr+sQKJdi2SNdYvnLORITEyGXy3H37l1dh5Ip0sr1KHdKSohDvqSU/w2TgatXr2bp+r7X8seKPyIiIiIiIsoWFy9eRGRkJOrXry8OUygUmD17NtatW4djx46hUKFCePfundp8ycnJ+PjxI6ysrDK8TltbW+jr66sNS0hIQFhYGExNTWFiYvJ1G0P0CUEQEB8fD1NT02yr2CYCWPZIt1j+ch49PT0YGhqiQoUKOToPUigUuHHjhtZcj3Kvx3f+hvzf/43kgLRqVegbZH41mC7Kn2qd6cGKPyIiIiIiIsoWrVu3hqurq9owHx8ftG7dGm3btgUAODo6IioqCjdv3kTVqlUBAOfOnYNSqYSdnV2G16mvr69xMq6vrw+JRCK+iDITyxXpCsse6RLLX86h+qy05Ug50Y+yHZQ5Ip/9A8t//9cXgKSEWFjkLfDZeb7F91r+WPFHRJTFvLy8YGNjg/Hjx+s6FCIiIqIsFxsbi/DwcPH9s2fPcOfOHeTNmxfFixdH/vz51aY3NDREoUKFUL58eQCAtbU16tSpg4kTJ2Lq1KmQy+WYPn06mjdvjiJFimTrtnyPmFsSEREREWn38VW4WPEHAB/fvcrSir/vlZ6uAyAiyk0OHTqEXr16wcXFBTKZDHfu3NF1SERERESZ6ubNm/D09ISnpycAwM/PD56enli8eHG6lzFv3jyUL18e3bt3R58+feDk5IRp06ZlUcQ5F3NLIiIiIqL/xEe+VHsf/e6VjiLRLT7xR0S5TnxSMvT19BCdIEceE0MkK5UwM8qew2FcXBycnJzQtGlTTJgwIVvWSURERJSdXFxccO/evXRPf+zYMY1h+fLlw/z58zMzrCyhy7wSYG5JRERERJRa0vu3au9jP0TqKBLdYsUfEeUqiXIFlp94hKAzjxEVnwxLUwP0dC2HAfWsYWyY9e0xq+58f/bsWZavi4iIiIiyjq7zSoC5JRERERFRaoqYj2rv46LepjHlj40Vf0SUowmCgHi5Il3TKpUCVpx6jEVH/xGHRcUni+9/qVMOenqf74ja1FCfnVUTERER/YCyO68EmFsSEREREWWqmFi1t4mfVATmFqz4I6IcSxAE/Lz8LC6Fvf/itAXMjXB6TH0EnXmsdXzQmcfo614ebrP/h3exSWkux7lMfmzrV4sXaIiIiIh+ILrIKwHmlkREREREmUkvLkHtfW6t+NPTdQBERN8ivZdIrCyMERmThKj4ZK3jo+KT8S42CVYWxpkXHBERERHlGMwriYiIiIhyNv0Eudr75LgYHUWiW3zij4hyLIlEgm39aqW7SSYDPT1YmhpovUhjaWqAwnlMEDLQ9bPLYHNMRERERD8eXeSVAHNLIiIiIqLMZPhJfp6cEK2jSHSLFX9ElKNJJBKYGaXvUBaflIyeruXU+mJR6elaDslKZbqXRUREREQ/FuaVREREREQ5m1GiUu29Mj5eR5HoFs9EiCjXMDUywIB61gBS+l6Jik+GpakBerqWw4B61jA21M/yGD58+ICXL1/i9evXAIDHj1P6hilUqBCsrKyyfP1ERERE9O2+h7wSYG5JRERERJSacYIAAIg3AkyTAGViwhfm+DGx4o+IchVjQ330dS+PgfUrIDpBjjwmhkhWKrPt4syxY8cwduxY8f3w4cMBAIMGDcLgwYOzJQYiIiIi+na6zisB5pZERERERKmZ/lvP99ECMH0HCKz4IyLKHVTNLhW0MAYAGEEvS9e3fv168f+2bduibdu2Wbo+IiIiIsoe2Z1XAswtiYiIiIi0USQnw/zfer44Cz3gnRJCUpJug9KRrD8rISIiIiIiIiIiIiIiIsoi7988hcG/XfwlWBgCACSs+CMiIiIiIiIiIiIiIiLKWV48vg0ASNYDks2MAAASebIuQ9IZVvwRERERERERERERERFRjvXu+SMAQKwJIBj++8QfK/6IiIiIiIiIiIiIiIiIcpbYN88AAPEmAIxSKv705AodRqQ7rPgjIiIiIiIiIiIiIiKiHCvhfQQAINFEAhilNPWpn6zUZUg6w4o/IiIiIiIiIiIiIiIiyrGSP7wHACQZ6wHGxgAAPVb8EREREREREREREREREeUsQmw0AEBuagA9YxMAgH6yoMuQdIYVf0RERERERERERERERJRzxcYDABQmhtA3MQMAGLDij4iIiIiIiIiIiIiIiChn0Y9LBAAozUygb2qeMixZlxHpDiv+iIiymJeXF2bOnKnrMIiIiIjoB8DckoiIiIhIk0HCv7V8FuYwNMuTMkyeO5/4M9B1AEREuYVcLsfChQtx8uRJPH36FBYWFnB1dcXIkSNRpEgRXYdHRERERDkIc0siIiIiov8YJSoAAPoWecWKP0M+8UdElEskxQGKJCD2TcrfpLhsWW1CQgJu376N/v37Izg4GAEBAXj8+DH69++fLesnIiIiokymo7wSYG5JRERERJSacYISAGCUvxBMLfMDyL0Vf3zij4hyl+QE4K+FwPlAIOEDYJIPcOkL1BkBGJhk6arz5MmDoKAgtWETJ05E+/bt8eLFCxQvXjxL109EREREmUiHeSXA3JKIiIiIKDWThJS/pgWLwcSiAABW/BER5UyCAMjTeWe1oATO+AMnZv83LOHDf+9dBwOSLzwIbWgGSCRfFao2MTExkEgksLS0zLRlEhEREdFXyO68EmBuSURERESUSUz/rfjLW7Q0LPJbAQCM5DoMSIdY8UdEOZcgAKsbA0/Pf3las4LAsBspd2Rrcz4QqD0UWGgLxEWmvZxSNYFeBzLlAk1iYiLmzZuH5s2bw8LC4puXR0RERERfSRd5JcDckoiIiIgoE8THRsEsKeV/q9I2MDA0RiwAfQGIjf4I8zx5dRpfdmMff0SUw6XzIolFESD2bcqd2NokfADi3qZMlw3kcjmGDh0KQRAwderUbFknEREREX1OzswrAeaWRERERJS7vQy7K/5frIwNLAsUFt9/ePtcFyHpFJ/4I6KcSyJJuUM6vU0y6Rum9L2i7SKNST4gTzGg95HPLyMTmmOSy+UYNmwYXrx4gbVr1/KObCIiIiJd00VeCTC3JCIiIiLKBG/C78ISQJwRYGpuCSNjMzxHypNvMR9eA6is2wCzGSv+iChnk0gAI/P0TZsUB7j0Ve+LRcWlL6BITv+yvpLqwkxYWBjWrVuH/PnzZ+n6iIiIiCidclheCTC3JCIiIiICgI+vwmAJIN4k5b2+gQHkhoCxHIj78IXm939ArPgjotzDyAyoMyLl//OBKXdom+RLuThTZwRgYJKlq5fL5RgyZAhu376NwMBAKBQKvHnzBgCQN29eGBkZZen6iYiIiCiT6DivBJhbEhERERGpxEe+SvmbKg1PMkip+IuNYsUfEdGPzcAEqD0MqDsKSIgCTCwBhTxbLs5ERETg2LFjAIDWrVurjVu3bh1cXFyyPAYiIiIiyiQ6zCsB5pZERERERCpJ79+m/DXRE4fJ/639Soz5oIOIdIsVf0SU+xiZpfw1L5TyVz9r74Zev369+P+9e/eydF1ERERElI2yOa8EmFsSEREREX0qOfojACDJWF8cJjdM+ZsY/UEHEemW3pcnISIiIiIiIiIiIiIiIvr+SGJjAQDJpv8965b8bx1gcnyMLkLSKVb8ERERERERERERERERUY6kF5cAAFCYGovDkg0lAAB5HCv+iIiIiIiIiIiIiIiIiHIE/fiklH/MTcVhCoOUij9FQpwuQtIpVvwRERERERERERERERFRjmSYoAAASCwsxWEK/ZSKPyEhXicx6RIr/oiIiIiIiIiIiIiIiChHMkpUAgAM8uYThykMU6q/lEmJughJp1jxR0RERERERERERERERDmSSYKQ8jd/EXGY0iCl+kuSlKSTmHSJFX9ERERERERERERERESUI5kkpPzNU7iUOEz57xN/YMUfERERERERERERERER0fdPkZwM838r/vIVLycOVxoYAAAkcrkuwtIpVvwRERERERERERERERFRjhMZEQaDlC7+ULxcZXG4YGQIAJAkKXQRlk6x4o+I6Dvy7NkzyGQy3LlzR9eh5HrBwcFwdnbWdRhZ7kffzqzavtzwXZXJZDhy5Iiuw4Cvry8GDBjwzdMQ4O/vj9atW+s6DCLKJrnhtyqn+NHzLZUffTuZV3495pU/HuaVRPQ9eRV2FwAg1wfyW/3X1KdgmPLEn14yK/6IiIgIQLNmzXDw4EFdh5HlfqTt9PDwwJo1a3QdhsjLywszZ8786vnPnz8PmUwmvlxdXTF48GA8ffo0E6NM2+nTp1G3bt1sWde3Gj9+PGbNmpVpy8usCz4ZuZD34sUL9OnTB/b29qhVqxZmz56N5OTkdK8rODhYrbykfkVGRn7LZmh1/PhxtG/fHnZ2dqhevbra/nr//j18fHzg5uaGqlWrwt3dHdOmTUNMTIw4zaflW/V68+aNOI2/v7/G+CZNmqjF4eXlpTHNpEmT1Ka5fv06unfvDmdnZ1SvXh0+Pj64e/eu1u0KCwuDo6OjxoVdbfvX1tZWbRpBELB48WI0atQI9vb26NGjB548eZKh/UpEP6YfKd/6nB9pO5lXZi7mlcwrP4d5pfa8ctGiRXBzc4OdnR3zSqIvePf8EQAgzhjQ/7d5TwCQGBsDAPTkua/iz+DLkxAR/Vji5fEw0DNAdFI08hjlQbIyGaaGpjqL5+PHjzAwMIC5uXmWLF/+HbRjLZfLYWhomOXrSUpKgpGRUaYsy8TEBCYmJpmyrMyWW7aTUhw4cADm5uYICwvDxIkT0a9fP+zatQv6+vpq0wmCAIVCAQODzEnvrKysMmU52SFPnjy6DuGbKBQK9O3bF4UKFcLmzZvx+vVrjBkzBoaGhhgxYkS6ltGsWTPUqVNHbZivry+SkpJQsGDBTI334MGDmDhxIoYPH46aNWtCoVDg/v374ng9PT00aNAAw4YNQ4ECBRAeHo6pU6fi48ePmD9/vtqyDhw4AAsLC/H9p7FWrFgRQUFB4vtPyz0AdOjQAUOGDBHfm5r+95seGxuLX375BR4eHpg8eTIUCgX8/f3h4+OD48ePq/02yeVyjBgxAs7Ozrhy5YrGeiwsLHDgwAHxvUQiURu/YsUKrF+/HlOnToW1tTUWLVoEHx8f7Nu3D8b/nnC+ePECxYsX11g25Vy5Ma/Mjpzue4ght+RbuWU7KQXzyi9jXsm88nvKK2fNmoWSJUsyryT6gpjX4SgCIP6TNETyb46jnyxkf1A6xif+iChXSVQkYvXN1ai3tR7ct7qj3tZ6CLoZhERFYrbGkZycjOPHj2PIkCFwc3NL805LhUKBsWPHokmTJnjx4gUA4MiRI2jTpg1sbW3RoEEDBAQEqN29J5PJsGnTJvTr1w+Ojo5YtWoVFAoFxo0bBw8PD9jZ2aFx48ZYu3at2rrOnz+Pn3/+GQ4ODnB2dkanTp3w/PlzrXElJSVh2rRpcHNzg62tLerXr4/AwECtMTg4OGD58uUAgGPHjqFdu3awtbWFi4sLBg4cmOY+UjUdsnnzZri7u8Pe3h5Dhw5FdHS0OI3q7slly5bBzc1NvGNPW1Myzs7OCA4OBvDf3ZKHDh2Cl5cX7O3t0apVK7WE/NOmfFTxhIaGwsPDA9WqVcPw4cPV7jSMiYnByJEj4eDgADc3N6xZs+aLd+fmlu1MzcPDA0uXLsXo0aPh6OiI+vXr4+jRo3j37h369+8PR0dHtGzZEjdu3FCb7+LFi+jSpQvs7Ozg7u6OGTNmIC4uDkDK3ZnPnz+Hn5+feNdkaqdOnULTpk3h6OgIHx8fvH79WhynVCoREBCAunXromrVqmjdujVOnjypNv/169fh6ekJW1tbtG3b9pubYkrPZ6NSsGBBFC5cGNWrV8fAgQPx4MEDhIWFiXe2njhxAm3btoWtrS0uXbqk9a7imTNnwsvLS3zv5eWFGTNmYM6cOahRowZq164Nf39/tXlSl6/0xrt161axHA8cOBBBQUFfbBLr3r178Pb2hp2dHVxcXDBx4kTExsZqTBcQEICaNWvCyckJkyZNQlJSkjju021WKpUIDAwUj3mtWrVSO7EGgH/++Qd9+/aFk5MTHB0d0aVLF4SHh8Pf3x8hISE4evSoWJbOnz+vNfaTJ0+ic+fOcHZ2houLC/r27Yvw8HBxfIMGDQAAnp6ekMlkap9BaqdPn8aDBw8wd+5cVKpUCe7u7hg6dCg2btyIpKQkCIKAHj16wMfHB4KQcsLy4cMH1K1bF4sWLQKQcrHVyspKfOnr6+P8+fNo166dxvo+d7wBgG3btqFp06awtbVFkyZNsHHjRnFccnIyZs6ciV9//RWdO3dGuXLlUKFCBTRr1kycJm/evOjSpQtsbW1RokQJ1KpVC126dMHFixc1YilYsKBa3Hp66qcn+vr6auMLFCigsYxPtz31BZ9Hjx7hw4cPGDJkCMqXL4+KFSti4MCBePv2rfi7qrJw4UKUL18eTZs21fo5SSQStfUUKlRIHCcIAtatW4d+/fqhXr16kMlkmDNnDl6/fq12nO7WrRs6dOiATZs24ePHj1rXQzlHbssrVTkd88q08y1vb2+4urqidevWzCuzcDtTY16Z8tnY2Njg6NGj8Pb2Zl7JvDLb88p69erB1dUVw4YNY175r8zIK/v374+GDRvCxsaGeSXRFyR+SHnCN9FEvRJdYpJSea+frMz2mHSNFX9ElKMJgoA4eVy6XjFJMVh5fSWWX1+OqKQoAEBUUhSWXV+GlddXIiYp5ovLUCXGX+vevXuYNWsW3N3dMWbMGBQoUADr1q2DjY2NxrRJSUkYOnQo7t69i02bNqF48eK4ePEixowZA29vb+zbtw/Tpk1DcHCweAFEJSAgAD/99BN27dqF1q1bQ6lUomjRoli0aBH27t2LgQMHYsGCBdi3bx+AlIR74MCBqF69Onbt2oUtW7agY8eOGnedqaxfvx7Hjh3DwoULceDAAcydOxclSpTQGsPu3bvRrl07HD9+HIMGDYK7uztCQ0Oxdu1a2NnZfXZ/hYeHY//+/Vi+fDlWrlyJO3fuYMqUKWrTnD17Fo8fP0ZQUJDaRaL0WLBgAXx8fBAaGoqyZcti5MiRn20CJTw8HEePHsXy5csRGBiICxcuYMWKFeL4WbNm4cqVK1i2bBlWr16Nixcv4tatW1+MI7dsZ2pr166Fk5MTQkJC4O7ujtGjR2P06NFo1aoVgoODUbp0aYwZM0b8zoWHh+OXX35Bo0aNsGvXLixYsACXLl3C9OnTAaRcWCpatCiGDBmC06dP4/Tp0+K6EhISsHr1asyZMwcbNmzAy5cvMXv2bHH8unXrEBQUhDFjxmDXrl1wc3PDgAEDxKZUYmNj0bdvX1hbWyM4OBiDBw9Wm/9bZPSzUd1Fn/pJ3vnz52PkyJHYt2+fxoWpzwkJCYGZmRm2bt2KX3/9FUuWLMFff/311fFeunQJkydPhre3N0JDQ+Hq6qpxbPpUXFwcfHx8kDdvXmzfvh0LFy7EmTNnxM9V5ezZs3j48CHWr1+P33//HYcPH8aSJUvSXO7q1auxc+dOTJ06FXv37kWPHj3w66+/4u+//wYAREREoFu3bjAyMsLatWsRHByMdu3aITk5Gb169ULTpk1Rp04dsSw5OjpqXU98fDx69uyJHTt2YM2aNZBIJBg4cCCUypSTim3btgEA1qxZg9OnT2tcBFO5evUqpFKp2gm/m5sbYmJi8ODBA0gkEsyePRs3btzAunXrAACTJ09GkSJF0rzQHRoaChMTE40mjL50vNm1axcWLVqE4cOHY9++fRgxYgQWL16MkJAQAMDt27cREREBPT09eHp6ws3NDb1791a7M/tTEREROHz4MKpXr64xTrWMnj174tKlSxrjw8LC4ObmhgYNGmDkyJEaF1UAYPfu3XBxcUGLFi0wf/58xMfHi+PKlSuHfPnyYfv27UhKSkJCQgK2b98Oa2trtd+ts2fP4sCBA5g8eXKa2xEXF4f69evD3d0d/fv3xz///COOe/bsGd68eQNXV1dxWJ48eWBvb692IXPjxo3w8PDA+vXr4ebmhqFDh+L48eNQKHJf0zPfo+zOK781t8zuvFKV0zGv1G7BggXo1asX/vzzT+aV2bCdqTGvTLFkyRL06tWLeWUm55WBgYEIDQ1lXvmZvHLZsmXw9/dnXvkv5pVE2S/5w3sAQJLxJxX+xqqKv9z3xB+b+iSiHEsQBHjv98bVN1e/OG1+4/w40O4ANt3dpHX8prub0LNqTzTZ0QTvE9+nuRzHwo5Y22RtmhcutHn//j127dqF0NBQ/PPPP3B3d8fkyZNRr169NJvViY2NRZ8+fZCUlIR169aJzY0EBASgT58+aNOmDQCgVKlSGDp0KObOnYtBgwaJ87do0QLt2rVLuYAVFwdDQ0O15ipKlSqFq1ev4sCBA2jWrBliYmIQHR2N+vXro3Tp0gAAa2vrNLfp5cuXKFOmDKpVqwaJRKJxcSZ1DCojRoxAs2bN1OLQdmEqtcTERMyZMwdFihQBAEyYMAF9+/aFr6+v2FyMmZkZZsyY8VVNFPXq1Qv16tUDAAwZMgTNmzdHWFhYmtsuCAL8/PzEu/5atWqFs2fPincuh4aGYt68eahVqxYAwM/PT6OJlNy8nanVrVsXnTp1AgAMHDgQf/75J2xtbcU7In/55Rd07NgRb9++hZWVFQIDA9GyZUv06NEDAFC2bFmMHz8eXl5emDJlCvLlywd9fX2Ym5trNCUkl8sxdepUsWx37doVS5cuFcevWrUKv/zyC5o3bw4A+PXXX3H+/HmsXbsWkydPxp49e6BUKvHbb7/B2NgYFStWxKtXrzQuon2NjHw2r1+/xqpVq1CkSBGUK1dOPOkbMmQIateuneF1y2Qy8bhRtmxZbNiwAWfPnv3ssj4X74YNG1C3bl34+PgAgBjj8ePH01zenj17kJSUhNmzZ8PMzAwAMGnSJPTr1w+jRo0SL1gYGRnht99+g6mpKSpWrIghQ4Zgzpw5GDp0qMadvElJSVi9ejWCgoLg5OQEIOWYd+nSJWzZsgU1atTAxo0bYWFhgd9//11skqdcuXLiMkxMTJCUlPTFZqkaN26s9v63335DrVq18ODBA0ilUvEu4nz58n12WW/fvlW7OANAfK/qm6RIkSKYOnUqxowZg7dv3+LkyZMICQlJswmu7du3o0WLFhpNrn3peOPv7w9fX180atRI3HcPHjzAli1b0KZNG/FJooCAAPj6+qJEiRIICgqCl5cXDh48iHz58onrGjFiBI4ePYqEhATUr19f7ekNKysrTJ06FVWrVkVSUhK2bdsGb29vbN26FVWqVAEA2NnZwc/PD+XKlcObN2+wZMkSdO3aFbt37xaPTy1atEDx4sVRuHBh3Lt3D/PmzcPjx48REBAAIKUZpfXr12PgwIHi975MmTJYtWqVuO/ev3+PsWPHYu7cuWp3dadWrlw5/Pbbb5DJZIiOjsbq1avRqVMn7N27F0WLFhU/p0+blCpYsCDevn0rvi9WrBj69euHfv364fr16wgNDYWvry8MDAzQsmVLtGnTBlKpVGsMlLV0kVcCGc8tdZlXpsa8UpPqNzIuLg6DBw9GixYtmFdm4Xamxrwyhbe3N+rVqweJRMK8MhPzysDAQAQFBYkVdswr/6M63hQuXBhxcXHMK8G8kkhXhJiUp43lpurHMX3TlN8Dg/R3c/rDYMUfEeVo6b1IUsi0EN4lvBPvyP5UVFIU3ie+RyHTQl+8QJNRGzZsQEBAAJydnXH48GEUK1bsi/OMHDkSRYsWxdq1a9WS67t37+Ly5ctqdzsqFAokJiYiPj5ebH++atWqGsvcuHEjduzYgRcvXiAxMRFyuVy8QJIvXz60bdsWPj4+qF27NmrVqoWmTZuicOHCWuNr06YNevXqhSZNmqBOnTqoV68e3Nzc1Kb5NIY7d+6gffv2X9z21IoVKyZetAAAR0dHKJVKPH78WDzZkUqlX90vSeq7WFXLe/fuXZoXLkqUKKGWvBcuXFjs3PzZs2eQy+Vqd5vnyZNH7aQvLbllO9OKSXUimvqkRHWSExkZCSsrK9y9exf37t3D7t27xWkEQYBSqcSzZ88+e0HR1NRUvDjz6fbExMTg9evXYgWRipOTk9hJ+8OHDyGTycS+FACkeaduRqXns3F3d4cgCIiPj4eNjQ38/f3VysKnHcF/zbpV61ftl6+J9/Hjx2jYsKHa9HZ2dp+9QKPat6qLM0DKvleVf1XZkMlkav1rODo6Ii4uDi9fvtS4QBwWFoaEhATxQpGKXC5HpUqVAKQcj5ydnb+5j6gnT55g8eLFuHbtGt6/fy8+SfDy5cssOclu2rQpjhw5gj/++ANTpkxB2bJltU535coVPHz4EHPmzNEY97njjbm5OcLDwzF+/HhMnDhRnCY5OVmsKFDddd6vXz/xApWfnx/q1q2LAwcOiBdeAWDs2LEYOHAgnjx5gt9//x1+fn7ihc3y5cujfPny4rROTk54+vQp1qxZg7lz5wJIKfsqNjY2sLe3R/369bF//37x96Rjx47iNDKZDFZWVujRowfCw8NRunRpJCQkYPz48XBycsL8+fOhVCqxevVq9O3bF9u3b4eJiQkmTpyIFi1aaL1zPPV+Sv29d3R0RLNmzbB582YMGzYszfk+x87ODnZ2dvD19cXvv/+OoKAgnDlzBjt37vyq5dG3Y17JvPJHyLdyy3amFVNuzisrVqwo/s+8MkVm5JXx8fHo1auX2nDmlSlUxxtVrA4ODswrmVcS6UZcytO5ClP1Y7GBWcrxxoBP/BER5RwSiQRrm6xFfHL8lycGYKhnCEsjS60XaSyNLFHYtDA2NNvw2WWYGphm6Gk/IKVzaH19fezcuRPNmzdH48aN0apVK7i4uGjcUaji7u6OXbt24cqVK+LdrwDEO4hVd8yllvrkMfXJDgDs3bsXs2fPxpgxY+Do6Ahzc3OsWrUK165dE6fx8/ODl5cXTp06hf3792PhwoUICgqCg4ODxrqqVKmCo0eP4uTJkzhz5gyGDRsGV1dXLF68OM0YPr07MLOkPmlTkUgkGk1naWvmJvXJmepzVZ18aKPtDshvbf41vX607Uy9DlVM2uJUrTcuLg6dOnXS2pfFly56fro92vabrqTns1HdRVygQAGtd41+WjbSWy6+Zr9ktCzpgqp/nuXLl6No0aJq41QXtjLreNSvXz+UKFECM2bMQOHChaFUKtGiRQu1JrPSo1ChQrh+/braMNXdvKnv6I6Pj8fNmzehr6+PsLCwNJe3bds2VKpUSevF+s9R7bvp06fD3t5ebZzq90oVT+qLiEZGRihVqhRevnypNo+qzxJra2vkzZsXXbt2xYABA9K8+G9ra4vLly+nGZ+lpSXKli2r1t/Np1Rxh4WFoXTp0ti9ezeeP3+OLVu2iNswb9481KhRA0ePHkXz5s1x7tw5HDt2DKtXrwbw38XfypUrY9q0afj555811mNoaIhKlSqJsaj2S2RkpNr3NDIyMs2nkB49eoSdO3di9+7diI6ORocOHbSui7KHLvJKIOO5JfPKFMwrv82Ptp3MK1No2w/MK7+NKjcKDAxUq1AHmFd+CfPKzMkrU28f80qitOnHpfSxrTRVPyYbmVsC4BN/REQ5jkQigZmh2ZcnBBAvj0dXm65Ydn2ZxriuNl2RrExO97IyokiRIhgwYAAGDBiAy5cvIzQ0FIMHD4a5uTlatmyJ1q1bq92dCQCdO3dGxYoVMWDAAAQGBqJGjRoAgMqVK+Px48coU6ZMhmK4fPkyHB0d0bVrV3GYtgS3cuXKqFy5Mvr27YuOHTtiz549Wi/QACnNXDRr1gzNmjVD48aN0bt3b3z48EGtOY7UpFIpzp49q7VD8LS8fPkSERER4knW1atXoaen98W7gAsUKIDXr1+L7588eaLWNn9WKFmyJAwNDXHjxg0UL14cABAdHY0nT558sRP63LKd36Jy5cp48ODBZ8u+oaFhhi8WWFhYoHDhwrh8+bL4PQNSvjOqu8+tra2xc+dOJCYmihdCr169mvGN+EolS5aEpaVluqcvUKCAWh8RQMqdyN96F/KXlCtXDjdv3lQbduPGjc/OY21tjZCQEMTFxYkXdS9fvqxR/u/du4eEhATxwsrVq1dhZmam9eKctbU1jIyM8PLlS7i4uGhdr0wmQ0hICORyudb9kp6y9P79ezx+/BgzZswQy/7Fixc1lgPgi/1sODg4YPny5YiMjBSfSjhz5gwsLCxQoUIFcbpZs2ZBT08PK1asQJ8+feDu7q52ER9IadJv//79GDlypNZ1fe54U6hQIRQuXBhPnz5Fq1attM5ftWpVGBkZ4fHjx+J2y+VyPH/+XDwmaKO6+JeUlJTmNHfv3v1s01WxsbF4+vTpZ6e5c+cOgP8umCQkJEBPT0+tYkX1XvUZb9myRe0zOnr0KFasWIHNmzdrXORTUSgUuH//vnj3eMmSJWFlZYWzZ8+Kx6mYmBhcu3YNnTt3Fud79+4d9u3bh507d+LWrVtwdXXFyJEj0bBhQ7WKFtIN5pXpw7zy+863cst2fgvmlcwrM5pXvnjxQu0zTY15ZYRYOcW8MvPzStWTpcwriT7PMOHfmyQszNWGm1jkTxmfsXsofgis+COiXMPU0BQ+dilNv228uxFRSVGwNLJEV5uu8LHzgbF+1idGTk5OcHJywvjx43HkyBEEBwdj9erVCAkJ0WgexcvLCwqFAn379sWKFSvg7OyMgQMHol+/fihevDgaN24MPT093L17F/fv38fw4cPTXG+ZMmWwc+dOnDp1CiVLlsTOnTtx48YNlCxZEgDw9OlTbN26FR4eHihcuDAeP36MJ0+eoHXr1gCA69evY/To0Vi7di2KFCmCoKAgWFlZoVKlStDT08OBAwdgZWX12ZPIQYMGoUePHihdujSaN2+O5ORknDhxAn369AGQ0pF8RESEWhMixsbG8PX1xZgxYxATE4MZM2agadOmX+wfoWbNmti4cSMcHR2hUCgwb968LD85tbCwgKenJ+bMmYO8efOiYMGC8Pf3h0QiUTsxyC3bmdlUfbNMmzYN7du3h6mpKR48eIAzZ85g0qRJAFKakrpw4QKaN28OQ0NDsR+ML/Hx8YG/vz9Kly4NGxsbBAcH4+7du5g3bx6AlL4eFixYIPZZ8fz5c/EOztSaNGmCkSNH4qeffhKHvXv3TjxhVPnS5/qtatasiVWrViE0NBQODg7YtWsX/vnnH1SuXDlL19utWzd069YNQUFBqF+/Ps6dO4eTJ09+tly0bNkSixcvhq+vLwYNGoR3795h+vTpaN26tVrfJElJSRg/fjz69++P58+fw9/fH926ddP6dIuFhQW8vLzg5+cHQRBQrVo1REdH4/Lly7CwsECbNm3QtWtXrF+/HiNGjECfPn2QJ08eXL16FXZ2dihfvjxKlCiB06dP49GjR8iXLx/y5Mmj8d3Kmzcv8uXLhy1btsDKygovXrzA/Pnz1aYpWLAgTExMcOrUKRQtWhTGxsZi00apubm5oUKFChg9ejR+/fVXvHnzBgsXLkTXrl3Fu8mPHz+OHTt2YMuWLahSpQp8fHzg6+uLXbt2IW/evOKy9u3bB4VCkeYFli8db4YMGYIZM2YgT548qFOnDpKSknDz5k1ERUWhZ8+esLCwQKdOneDv749ixYqhePHiWLVqFYCU7wAAnDhxAm/fvoWtrS3MzMzw4MEDzJkzB05OTuLvzpo1a1CyZElUrFgRiYmJ2LZtG86dO6f23Zo9ezbq16+P4sWL4/Xr1/D394eenh5atGgBIKWiYffu3XB3d0e+fPlw7949+Pn5oXr16uLd0K6urpgzZw6mTp0KLy8vKJVK/PHHH9DX1xcrhj9t0u3mzZvQ09NTa1YrICAADg4OKFOmDKKiorBq1Sq8ePFCbBpKIpHA29sby5cvR7FixVC+fHksXrwYhQsXVmuqrEOHDjA2NoanpyeWLFmS5l3q9P3L7XllaGgo88osxLySeSXzyu8nr+zVqxfzyi/klaNHj8bbt28xc+ZM5pWZmFcuW7YMZcqUQcmSJbFo0SLmlUSfYZiYUvmulyef2nATy5T3hnzij4jox2asb4yeVXviF7tfEC2PRh7DPEhWJmfLxRm1OIyN0bx5czRv3hwREREwNzfXOl2PHj0gCAL69OmDlStXok6dOli+fDmWLFmCFStWwMDAAOXLl/9iHyedOnXC3bt3MXz4cEgkEjRv3hxdunTByZMnAaQ06fLo0SOEhITgw4cPKFy4MLp27Sq2qR8fH4/Hjx+LzYyYm5tj5cqVCAsLg56eHmxtbfHHH3+k2cQUALi4uGDRokVYunQp/vjjD1hYWKi1e//mzRuN5jxKly6Nn376Cb/88gs+fvyIevXqYfLkyV/cv2PGjMG4cePQtWtXFC5cGOPGjcOtW7e+ON+38vX1xeTJk9GvXz9YWFigd+/eePnypdrddrllOzObjY0N1q9fj4ULF6JLly4AUjqHb9asmTjNkCFDMGnSJDRs2BBJSUm4d+9eupbt7e2NmJgYzJo1S+xXZOnSpWI/F+bm5li+fDkmT54MT09PVKhQAaNGjcLgwYPVlvP48WNER0erDduzZw/27NmjNmzo0KFpnjxnhjp16mDAgAGYO3cuEhMT0a5dO3h6euL+/ftZtk4AqFatGqZOnYqAgAAsXLgQbm5u6NGjBzZu3JjmPKampli1ahVmzpyJn3/+GaampmjUqBF8fX3VpqtVqxbKlCmDrl27IikpCS1atNDY/6kNGDAARYoUQWBgIJ49e4Y8efKgcuXK6NevHwAgf/78WLt2LebOnQsvLy/o6emhUqVKqFatGoCUk+i///4b7dq1Q1xcHNatW6fx9KCenh4WLFiAGTNmoEWLFihXrhwmTJig1myYgYEBJkyYgCVLlmDx4sVwdnbG+vXrNeLV19fH8uXLMWXKFHTs2BGmpqZo06YNhgwZAiDlQt/48eMxePBgVKlSBQAwePBgnD59GpMnT8bChQvFZe3YsQM//fRTmhfMv3S8ad++PUxMTLBq1SrMmTMHZmZmkEql6N69uzjN6NGjYWBggNGjRyMhIQH29vZYu3ateKHI2NgY27Ztg5+fH5KSklCsWDH89NNP4gV5IOVu7tmzZyMiIgKmpqaQSqUICgpCzZo1xWlevXqFESNG4MOHDyhQoACqVauGrVu3ihdfDQ0NcfbsWaxbtw5xcXEoVqwYGjVqhAEDBojLsLa2xvLlyxEQEICOHTuKn/XKlSszdHEkKioKEydOxJs3b5A3b15UqVIFmzdvVrtz/pdffkF8fDxmzJiB6OhoVKtWDStXrlQ7NgYGBn627yjKWXJzXnnnzh3mlVmMeSXzSuaV30deOWzYMBQoUIB5pRaq402fPn2YV2ZRXjlp0iRERUUxryT6AuOElIo/43wF1Yab5035bhrlwoo/ifC9NEaeTRQKBa5evQoHBwfo6+vrOhzKBVjmMkdCQgIeP36McuXKZVmfHj8iQRDEpk6y8q7ZrODv748jR47k6M6o4+LiULduXYwZMybNi2g/6nbm5LJHmWfChAl49OgRNm3alKXrGTFiBPT09DBv3jyWPdKprCp/n8uDmGum7XP7hrll7pJd+VZW/gbl5rySviw35D+6yCspfXJD+fvR/Ch5EPNgUjldoxIKRgFPfbuiUY8J4vDwe5cR2zqlefrSF87BPE/etBaRYboofxlZJ5/4IyIiyiS3b9/Go0ePYGdnh+joaCxZsgQA0KBBAx1Hlrlyy3ZSxq1atQq1a9eGqakpTp48idDQ0HQ9ZfC1kpOT8eTJE1y9ehUdO3bMsvUQERFlt9ySb+WW7aSMY15JRETpZZqQ8jdf8bJqwy0LFUPsv/9HvXuZqRV/3ztW/BEREWWi1atX4/HjxzA0NESVKlWwcePGdPcJkpOktZ0XL17EL7/8AiDlzs9P7/i8cuWKLsKlbHL9+nWsXLkSsbGxKFWqFMaPH5+ld+v/888/6NSpE1xcXMQm5IiIiH4UzCv/yyu1YV75Y2NeSURE6REb/RFmSSn/FyopVRuXJ68VngPQAxD1/jWKlbHJ9vh0hU19EmUxlrnM8aM0Q5Dd2OQGZbeEhARERERAEATEx8fD1NRUreyVKVNGh9FRbsDjHukSm/r8vrCpT8pu/A3KXKq8Mi3MK//Dske6xPKX8/woeRDzYAKAhzfOIKm9DwCg7KXzMDVX75f0atVKME4GEpbNgGP9dpm2Xjb1SURERLmGiYkJypQpw5M/IqJc7MKFC1i1ahVu3ryJN2/eYMmSJWjYsCEAQC6XY+HChTh58iSePn0KCwsLuLq6YuTIkShSpIi4jA8fPmD69On43//+Bz09PTRq1Ajjx4+Hubm5rjaLiLKZKq8kIiIiSsvb5w9gCSDOGBqVfgCQZAgYJwPx0e+yPzgd0tN1AERERERERPTjiIuLg0wm09oXU0JCAm7fvo3+/fsjODgYAQEBePz4Mfr376823ahRo/DgwQMEBQVh+fLluHjxIiZNmpRdm0BERERERDnAx1dhAIB4Y+3jk/999C0h5mM2RfR94BN/RERERERElGnc3d3h7u6udVyePHkQFBSkNmzixIlo3749Xrx4geLFi+Phw4c4deoUtm/fDltbWwDAhAkT0KdPH4wePVrtyUAiIiIiIsq94t6+BADEp9FqrfzfGrDEmPfZFNH3gU/8ERERERERkc7ExMRAIpHA0jKlaZ4rV67A0tJSrPQDAFdXV+jp6eH69eu6CpOIiIiIiL4z8g+RAIAkE+1VXaon/uRxMdkV0neBT/wRERERERGRTiQmJmLevHlo3rw5LCwsAABv375FgQIF1KYzMDBA3rx58ebNmwyvQ6FQaB0mCIL4IsoMqrLEMkXZjWWPdInlL+dR5T8KhUJrnpRTqGLPydtA3y456gMAIMlYX2tZSDaQABAgj43O1LKii/KXkXWx4o+IiIiIiIiynVwux9ChQyEIAqZOnZpl67lx44bW4QYGBoiPj4dSqcyydVPuFB8fr+sQKJdi2SNdYvnLORITEyGXy3H37l1dh5Ip0sr1KHdQRkUBAOTG+rh69arGeNUTf7Ef3mkd/62+1/Kn84q/jRs3YtWqVXjz5g1sbGwwceJE2NnZpTn9mjVr8Oeff+Lly5fInz8/GjdujJEjR8LYOI3eG4mIcpBnz56hQYMGCA0NRaVKlXQdTq4WHByM3377DRcvXtR1KFnqR9/OrNq+3PBdlclkWLJkCRo2bKjTOHx9fREVFYWlS5d+0zQE+Pv748iRI9i5c6euQ/khfC/fkZxKLpdj2LBhePHiBdauXSs+7QcAhQoVwrt379SmT05OxsePH2FlZZXhddna2kJfX19tWEJCAsLCwmBqagoTkzQ6BMnhnj17hoYNGyIkJOSH/a363giCgPj4eJiamkIikYjDg4OD4efnhwsXLugwuqz3o29nVm1fZnxX0yp73wsbGxsEBATo/DfT19cX0dHRWLJkyTdNQyl55dGjRxEaGvrdl7+cILu/I3p6ejA0NESFChVydB6kUChw48YNrbke5R4Pk5IBAEpzEzg4OGiM32ugB0ABY0Dr+K+li/KnWmd66LTib9++ffDz88PUqVNhb2+PtWvXwsfHBwcOHEDBggU1pt+9ezfmz5+P3377DY6Ojnjy5Al8fX0hkUgwduxYHWwBERH9qJo1awZ3d3ddh5HlfqTt9PDwgLe3N3r06KHrUAAAXl5esLGxwfjx479q/vPnz8Pb21t8X7BgQVSrVg2jR49GqVKlMivMNJ0+fRp58+bN8vVkhvHjx2dq00KZVZGYkQriFy9eYMqUKTh//jzMzMzg6emJkSNHwsAgfel6cHBwmvnwmTNntObW3+L48eNYsmQJ7t27B2NjY1SvXj1D+8vf3x8BAQFqw8qVK4cDBw5kKI5du3Zh5cqVCAsLQ548eVCnTh2MHj0a+fPn15h27969GDFiBBo0aKAR68OHDzF37lxcuHABCoUC1tbW8Pf3R/HixdMdy4cPHzB9+nT873//g56eHho1aoTx48fD3Nw8Q9uUG6gq/cLCwrBu3TqNz8vR0RFRUVG4efMmqlatCgA4d+4clErlZ28STYu+vr7Gybi+vj4kEon4+hGptutH3sbv1af7vHnz5qhXr94P/zn8SNupLa9M/Z3KTOn5rqY3r0xrGd9LXqnrspGez3DChAkQBCHTYv1R80pt+/Jrf2+YV6ZIvf+yOq9UrUtbjpQT/SjbQV9HPz4p5R9zU63lQPFvxZ8gT8iScvK9lj+dVvwFBQWhQ4cOaNeuHQBg6tSpOH78OHbs2IE+ffpoTH/lyhU4OTmhZcuWAICSJUuiRYsWuHbtWrbGTUQ5mzI+HhIDAyiioqBvaQkhORl6pqY6i+fjx48wMDDIsguDcrk8S5ab0RgMDQ2zfD1JSUkwMjLKlGWZmJh8t3e+5ZbtpBQHDhyAubk5wsLCMHHiRPTr1w+7du3SSCxVfTSk94T+S77mqRpdyZMnj65D+CYKhQJ9+/ZFoUKFsHnzZrx+/RpjxoyBoaEhRowYka5lNGvWDHXq1FEb5uvri6SkpEyv9Dt48CAmTpyI4cOHo2bNmlAoFLh//36Gl1OxYkUEBQWJ7zN6snTp0iWMGTMGY8eORf369REREYEpU6Zg4sSJGhd/nj17htmzZ8PZ2VljOeHh4ejSpQvatWuHIUOGwMLCAv/880+GWxQZNWoU3rx5g6VLl8LAwADjxo3DpEmTMH/+fAApTSrFxsZq9F33I4qNjUV4eLj4/tmzZ7hz5w7y5s0LKysrDBkyBLdv30ZgYCAUCoXYb1/evHlhZGQEa2tr1KlTBxMnTsTUqVMhl8sxffp0NG/eHEWKFNHVZmmVG/PK7MjpvocYcku+lVu2k1Iwr/wy5pXMK7+nvDIoKAhyuTxX55VEX2KYkNLvncTCUut4paFeyj+JSdkV0ndBT1crTkpKwq1bt+Dq6vpfMHp6cHV1xZUrV7TO4+joiFu3buH69esAgKdPn+LEiRNf9aSCqvNSvvjKjhfLXOa8VJ0Pf8tLmZiIyJUrcb+2G/6p7Yb7td0QuXIVlImJmbL89L7kcjn+97//YciQIXBzc0N4eLg4Dvivo+Xk5GSMHTsWTZo0wfPnzyEIAo4cOYI2bdrA1tYWDRo0gL+/P+RyuTiPTCbDpk2b0K9fPzg6OmLVqlVITk7GuHHj4OHhATs7OzRu3Bhr165Vi+ncuXP4+eef4eDgAGdnZ3Tq1AnPnj3TGn9iYiKmTp0KNzc32Nraon79+li+fLnWGBwcHLBs2TIIgoCjR4+iXbt2sLW1hYuLCwYOHJjmPlq8eDFat26NP//8E+7u7rC3t8fQoUMRFRUlTjNmzBgMGDAAy5Ytg5ubG5o0aSKu//Dhw2rLc3Z2xo4dOyAIAp4+fQqZTIaDBw/Cy8sL9vb2aNWqFS5fvixOv2PHDjg7O2vEExoaivr166NatWoYPnw4oqOjxWmio6MxcuRIODg4wM3NDUFBQejWrRtmzpyZK7czdVlO/apfvz6WLl2K0aNHw9HREfXr18fRo0cRGRmJ/v37w9HRES1btsT169fV5rt48SK6dOkCOzs7uLu7Y/r06YiNjYUgCOjWrRueP38OPz8/yGQyyGQytThOnjyJpk2bwtHRET4+PoiIiFDr0DwgIAB169ZF1apV0bp1a5w8eVJt3deuXYOnpydsbW3Rtm1b3L59O83tS88+SM9no5q/QIECsLKygrOzMwYMGIAHDx7gyZMnOHfuHGQyGU6cOCEeEy5evCiWl9TLmTlzJrp16ya+79atG6ZPn445c+agRo0aqF27NhYvXqw2T+ryld54t2zZIpbjgQMHYvXq1WrlS9vr7t278Pb2hp2dHWrUqIEJEyYgJiZGYz/4+/ujZs2acHJywqRJk5CY6rj96TYrlUoEBgaKx7xWrVph//79asu8f/8++vTpAycnJzg6OqJLly4ICwvD4sWLERISgqNHj4pl6dy5c1pjP3nyJDp37gxnZ2fUqFEDffr0QVhYmDi+QYMGAABPT0/IZDK1zyD16/Tp03jw4AHmzJkDGxsb1K1bF0OHDsXGjRuRmJgIpVKJHj16oFevXlAqlRAEAe/fv0fdunWxaNEiCIIAY2NjFCpUSHzp6enh/PnzaNeunca+/NzxRhAEbN26FU2bNoWtrS2aNGmCjRs3iuPkcjlmzpyJX3/9FZ06dULZsmVhbW2Npk2bqi3j3r176N27NxwdHeHq6opff/0V7969U5tGX19fLeb8+fOrjf/48SPGjx8vfu7e3t64c+eOOP7KlSsoUaIEvLy8ULJkSVSrVg0dOnTQOHYkJydj1KhRGDx4MEqWLKnxvVywYAHq1q2LX3/9FZUqVUKpUqXg4eGBAgUKiNM8fvwYXbt2ha2tLZo1a4bTp0+rLefBgwc4deoUpk+fDltbWzg5OWHChAnYu3cvXr16BUEQ8ObNG9StWxcDBgzAoUOHkJSU9MVjyKevz+Wa35ObN2/C09MTnp6eAAA/Pz94enpi8eLFiIiIwLFjx/Dq1Su0bt0abm5u4iv1eeC8efNQvnx5dO/eXfyuTps2TUdbpN3n8srslJycjOPHj4t55dOnT7VOp1AoxLzyxYsXAKCRVwYEBCA5OVmc59Ocbvny5VAoFFrzytTOnz+vkVc+f/5ca1xJSUmYNm2aWl4ZGBj42RgA4NixYxp5ZVr8/f3RunVrbN68We34Fx0dLU7j6+urkW+p1n/kyBG15Tk7OyM4OBhAygVgmUyGQ4cOwdvbG66urmjdurVaeQ4ODla7QKyKJzQ0FB4eHmK+FRMTI04TExOjlm+tWbMGXl5emDlzps63M3UuoIvtTM3Dw0NrXvnu3Tu1vPLTZrE+zStnzJiBuLg4AClP132aV6Z26tQptbzy9evX4jilUqk1r0zt+vXrannlnTt30rWtaXn27BlsbGxw9OhReHt7a/1sVAoWLIjChQujevXqGDhwIB48eICwsDCcP39ezCvbtm0LW1tbXLp0SSwvqc2cORNeXl7iey8vL8yYMUMtr/T391ebJ3X5Sk9ZAoCtW7eq5ZVBQUFaK1pSu3fvnphXuri4YOLEiYiNjdWYLiAgQC2vTEr672Lwp9usLa/89Emyf/75B3379lXLK8PDw+Hv76+RV54/f15r7KnzShcXF/Tt21ftJppP88rUn0Fqqrxy7ty5qFSpEtzd3cW8UpV79OjRAz4+PmJu+OHDBzGvBFIq8a2srMSXvr6+mFd+avPmzahXrx5cXV0xbNgwteMNAGzbtk0jr1RJTk4W88rOnTujXLlyqFChApo1a6a2jPv372vNK1PT19dXi/nTCrGoqCiNvDJ1/3ZXr15FiRIl4O3tjVKlSsHZ2RkdO3YUr0GrKBQKMa/U9rSsKq8cPXo0KleujNKlS6NBgwZqFaZPnjxRyyv/+usvtWU8fPgQp06dwowZM2Bvbw9nZ2cxr4yIiAAAvH37VswrDx8+/F3c8E2kK0aJKf11G+TVXhEuVvwl5a7vic6e+Hv//j0UCoXGnSIFCxbEo0ePtM7TsmVLvH//Hl26dBFP4jt16oR+/fpleP3fa6eL9ONimft2BgYGiI+Ph1KpFIcJggAhISFd8xsbGuLjunV4u+S/ZhiUUVF4u2QJAAF5vb2R+IVkSWJi8k1Nfvzzzz/YvXs39u/fj+TkZDRq1AiBgYEoXbo04uLikPDvtiQkJODDhw8YO3YsXr58iZUrVyJfvnw4ffo0xowZg19//RWOjo549uwZZsyYAblcjr59+4rr8ff3x+DBgzF8+HDo6+uLd4LNnj0befPmxbVr1zBjxgxYWlqiUaNGSE5OxsCBA9GmTRtxebdu3UJCQoJ4ApzaunXrcOzYMfj5+aFo0aKIiIhARESE2rSfxnDw4EGMGDECvXr1wpQpU5CcnIzTp09rXT6Qcjd3WFgY9u7diwULFiAmJgbTpk3DpEmTxAsBCoUCZ8+ehYmJidi8hmp5iYmJassWBAFJSUlq+/n333/H8OHDMXbsWCxZsgQjRozAzp07YWBgIJ4UqZYhl8sRHh6OgwcPYuHChYiKioKvry+WLl2KQYMGAUg5Cb506RJ+//13FCxYEMuWLcPt27dRoUKFXLud2jp4FwQBa9aswcCBA9GzZ09s3LgRo0ePhp2dHVq3bo3Bgwdj8eLFGD16NLZv3w6JRIKnT5+id+/eGDBgACZOnIj3799j9uzZmDx5MqZOnYo5c+agU6dOaNu2Ldq0aSPuo6SkJMTHx2PlypWYNm0aJBIJJkyYAD8/P3H/btiwAUFBQRg/fjxkMhl27tyJ/v37Y/v27eJ3s2/fvqhZsyamTZuG58+fY/bs2QCQ5ncESLlIIJfLtY5Pz2eT+O+F4/j4ePFua9XxJzo6Whw/d+5cDB8+HCVKlIClpaVYEZB6vXK5HEqlUhymVCoRGhqKrl27Yu3atbh+/TomT56MKlWqoGbNmuJ8qvKVnnivXr2KKVOmYMiQIXB3d8f58+fFSv+09lF8fDx8fHxgZ2eH9evX4927d5g+fTqmTJmCqVOnAviv/Ovr6+OPP/4Qmy4yNzcXy+Sn27x69Wrs27cPY8eORenSpXH58mWMHj0a5ubmqFatGl6/fo1u3bqhWrVqCAwMhLm5Oa5evYqYmBh07twZ9+/fR2xsLKZMmQIg5Skkbdvw4cMHdO7cGRUrVkR8fDyWLVuGAQMGYPPmzdDT08P69evh5eWFZcuWwdraGoaGhlqXc+HCBVSoUAFmZmbi+GrVqiEmJgY3b96EjY0NJk+ejA4dOmDVqlXo0qULJk6cCCsrK/To0UPrMrdt2wZjY2PUqVNH7fv9pePNvn37sGjRIowZMwY2Nja4e/cuZsyYAX19fbRs2RI3b95EREQE5HI5WrdujcjISEilUgwbNgwVKlQQy2f37t3h6emJYcOGITExEYsXL8bgwYPxxx9/iLE8efIEbm5uMDY2hp2dHQYNGoRixYqJ2zB48GAYGxvD398fFhYW2LFjB3r06IGQkBDkzZsXlSpVwsuXL3Ho0CHUrl0b7969w/79++Hq6qq2T5YtW4Z8+fKhWbNmOH/+vFpZUSqVOH78OLp3744ePXrg3r17KFGiBHr27In69euL0wwaNAgFChTAunXrEB0djblz56p9R86fP488efLA2tpaLNv29vbQ09PDhQsX4OHhgfz58yMoKAh79+7FpEmTIAgCmjRpghYtWqBy5cpavyMqiYmJkMvlaheovmcuLi64d+9emuM/N04lX7584l3t2UUQBAhafre0TqtU4l1QUJp5ZYGePSHR+/J9tpJv6A/p3r17CAkJwe7du5GcnIymTZti3bp1sLGx0Zg2KSkJI0aMwPPnz7Fp0yYUKFBAvFlkwoQJcHZ2Rnh4OCZOnAgA4vEVSLlAPnLkSIwfPx76+vpQKpUoWrQoFi1ahHz58uHKlSuYNGkSrKys0KxZMzGvbN++PX7//XfI5XJcv349ze1cv349jh07hoULF6JYsWJ4+fIlXr16pTbNpzEcP34cgwYNQr9+/TBnzhzI5XKcOHHis/srPDwc+/fvx/LlyxETE4Px48djypQpauXs7NmzsLCwUHtqJL0WLFiA0aNHo0iRIli+fDlGjhyJQ4cOpfnEVHh4OI4ePYrly5cjKioKw4YNw4oVKzB8+HAAwKxZs3DlyhUsW7YMBQsWxOLFi3Hr1i2tn292b+eYMWNQpkwZLFiwQGfbmdratWsxfPhwDBgwAGvWrBErAdu1a4fRo0dj3rx5GDNmDPbu3QuJRILw8HD88ssvGDp0KH777Tcx/5g+fTr8/PzECssOHTqgQ4cOautKSEjA6tWrMWfOHOjp6eHXX3/F7Nmzxf27bt06BAUFYdq0aahUqRJ27NiBAQMGYM+ePShbtixiY2PRt29fuLq6Yu7cuXj27Fm6Kzm/ZMmSJfD19UXZsmXT9dmons5MXWEwf/58jBkzBqVKlYKlpfYnJ7QJCQlBz549sXXrVly9ehW+vr5wcnJC7dq105znc2Xp0qVLmDx5MkaNGgUPDw+cOXMGixcv/mwMcXFx8PHxgaOjI7Zv347IyEhMmDAB06dPx6xZs8Tpzp49C2NjY6xfvx7Pnz/H2LFjkT9/frFMfiowMBC7du3C1KlTUbZsWVy4cAG//vorChQogBo1aiAiIgLdunVDjRo1xD5rL1++jOTkZPTq1QsPHz5ETEwM/Pz8ACDNpvTj4+PRs2dPyGQyxMXFYdGiRRg4cCB27twJPT09bNu2De3bt8eaNWtQoUKFNJ98vnr1KqRSKQoVKiQOc3Nzw5QpU/DgwQNUrlwZs2fPRsuWLbFu3Tp0794dkydPRpEiRdK8gSI0NBQmJibijQIqquPNsmXL8PbtW8yYMUPteLNr1y4sWrQIkyZNQqVKlXDnzh1MnDgRZmZmaNOmDW7fvo2IiAjo6enB09MTb9++hY2NDUaPHg2pVAogpcKue/fuaN++PcaOHYvExETMmzcPw4YNw7p168RYwsLCxLzSwcEBI0eOVGtac+jQoTA2NsaKFSuQJ08ebNmyBd27d8fBgweRL18+ODg4YMGCBThx4gTq1q2LyMhIHDx4UONhkyVLlqBgwYJo3749Ll26pDZOlVf27t0bPj4+uH37NkqWLIm+ffuKffcplUoMHjwYBQsWxLZt2xAdHY3ffvtNbTlXrlyBpaUlbG1txWGurq7Q09PD9evX8dNPP6FEiRLYvHkzQkNDMWnSJCiVSrRs2RKenp5iM+lEuYVJQspNDCb5tT9drjRIeQJYIs9dT/xB0JFXr14JUqlUuHz5strw2bNnCz///LPWec6dOye4uroKW7duFe7evSscOnRIcHd3FwICAtK93uTkZOHixYtCYmKikJyczBdfWf5KTExkmcuEV0xMjHDr1i0hLi5OUCqVglKpFBQKhfC4U2fhtszmi697NWsJithY4W71GlrH361eQ1DExgr3atb67HIed+4iKBQKMYb0vCIjI4U1a9YInp6eQpUqVYQBAwYIBw4cEBISEjSmDQ8PF6RSqXDhwgWhe/fuQufOnYWPHz+K47t37y4sW7ZMbZ7Q0FChdu3a4nupVCrMnDlT3EcxMTFaY54yZYowaNAgQalUCu/evROkUqlw7ty5dG3TtGnTBG9v7zT3ReoYVK8OHToII0eOTPd+W7RokVCpUiXh5cuX4rATJ04INjY2QkREhKBUKoXRo0cLrq6uGvtSKpUKhw4dUhtWrVo1Yfv27Wr7eevWreL4+/fvC1KpVHjw4IGgVCqF7du3C9WqVVOLx97eXoiKihKHzZ49W2jfvr2gVCqFqKgooUqVKsK+ffvE8R8/fhTs7e2FGTNm5Lrt/FzZq1evnjBq1CjxfUREhCCVSoWFCxeKwy5fvixIpVJxH4wbN06YMGGC2nIuXLgg2NjYCPHx8eJyg4KC1KbZvn27IJVKhSdPnojDNmzYILi6uorv3dzchKVLl6rN165dO2HKlCmCUqkU/vzzT6FGjRriepRKpbBp0yZBKpUKt27dSvOz7dq1a5qffXo+m7NnzwpSqVT48OGDoFQqhVevXgkdO3YU6tSpIyQkJIjjDx8+rLbs0aNHC/3791cbNmPGDKFr165qsXXu3Fljm+fOnau1fKUn3mHDhgm//PKL2jJHjhypVr4+fW3evFmoXr26EBMTIw773//+J9jY2AivX78Wt6d69epCbGys2v53cHAQkpOTNbY5Pj5esLOzEy5evKi2rnHjxgnDhw8XlEqlMG/ePMHDw0NITEzUGpe2fZieV2RkpCCVSoW7d++q7bfPlROlUimMHz9e6Nmzp9qw2NhYQSqVCsePHxeH7du3T7C1tRXmzp0rODg4CI8ePUpzmU2bNhUmT56sNiw9x5uGDRsKu3btUptvyZIlQocOHQSlUins3r1bkEqlQr169YT9+/cL169fF4YPHy7UqFFDePfunTj9p9vz4sULQSqVCg8fPhSUSqVw/PhxYd++fcKdO3eEEydOCB06dBDq1asnHnsuXLggODk5aRz3GjZsKPz5559q+8TBwUGoXLmyIJVKhb59+6p9rhcuXBDq1KkjREZGav1sVccfe3t7YfXq1cKtW7eE5cuXCzKZTPxNPHnypFC5cmWN/Zb6O7J06VKhUaNGGse+mjVrChs3btT4fJKSkoQjR44IgwcPFqpWrSo0b95cWLFihVjuP33FxcUJt27dEmJiYtLMNZOTk7/p/OxHlJycnOa+iY+PF27fvi3Ex8cLgiAISqUy2/NKVW6pVCrTvU3v3r3TyCsPHjwoJCYmakz79OlTjbwyKipKHN+9e3dh+fLlavOo8koVVU73JVOnThUGDx4sCIIgvH//XpBKpcL58+fTtU3Tp08XvL2909wP2mLo2LGjMHLkyHQtXxAEYfHixUKlSpWEV69eicNUx7/Xr18LgiAIY8aMEVxdXTX2peq3NrVq1aoJO3bsEAThv/2s+o2MiYlR+40UBEHYsWOHUK1aNbV47O3thejoaHGYKt8SBEGIjo4WqlSpIuzfv18cHxUVJeZbut5OlX/++Ucn25la/fr1hVGjRonvX79+LeaVKleuXBGkUqm4D8aNGydMnDhRbTmqvDIhIUFcblBQkNo0O3bsEKRSqRAWFiYOU+WVKm5ubsKyZcvU5lPllYIgCJs3bxZq1KghrkcQBDGvvH37dprb2a1btzT3ieqz2bBhg/g9+vSzOXfunCCVSoWPHz8KgiAIERERYl6ZmJgojv+0DIwZM0bo37+/2rAZM2YI3bp1U4utc+fOGts8d+5c8X3qZaenLA0bNkzo06eP2jJVeWVatmzZIuaMKsePHxdsbGyEN2/eiNtTo0YNIS4uTpxGlVcqFAqNbU5MTBTs7e01rl2OGzdOGDFihCAIgjB//nzBw8NDSEpK0hqXtn2YHqq88t69e4Ig/LffPldOBEEQJkyYIPTq1UttWFxcnJhXqqjyynnz5gkODg7C48eP01ymKq9MLfXxRnXs+/R407BhQ2H37t1q8y1ZskTo2LGjIAiCsGfPHjGvPHDggHDjxg1hxIgRQo0aNYT379+L03+6PS9fvhSkUqnw6NEjQRAEtbzy5MmTQseOHYV69eqJxx5VXvnpca9hw4bC5s2b1fbJp3ll6s81dV4pCJqfrer4Y29vLwQFBQm3b98WAgMDBZlMJv4mnjp1SqhcubLGcTr1d2TZsmVCo0aNND4HVV75KblcLhw9elTMK1u0aCGsXLlSLPfafJoH5VSfy/Uo9zjnmJJXn9jmr3X8nz1ScvItndL+Dfkauih/GVmnzp74y58/P/T19REZGak2PDIyUu2ulNQWLVqEVq1aoX379gAg3gUzadIk9O/fH3rpuKNS5XvtdJF+XCxz30ZfX1/sfFjtbuF03iFtYFUIye/eQRkVpXW8MioKye/fw8CqEBTv3392WRntsHrjxo0ICAiAs7MzDh8+rPY0g7ZlA8DIkSNRtGhRrF27Vq2fjLt37+Ly5ctqzR8pFAokJiYiISEBpv/2KVO1alWNTrY3bdqEHTt24MWLF+KTAzY2NpBIJMifPz/atm2L3r17o3bt2qhVqxaaNm2KwoULa42zbdu26NWrF5o2bYo6deqgXr16cHNzU5vm0xju3r2LDh06pHvfSSQSFCtWDEWLFhWHOTo6QqlU4smTJyhcuDAkEgmkUqnW9vK1fU6fliGZTCb+r9rWd+/ewdraWqOzcolEghIlSqj1+VC4cGFERkZCIpHg+fPnkMvlsLe3F+extLREuXLl1JaT27YzrfhSx6Tq80MqlYrDVLnAu3fvULhwYdy9exf37t3Dnj17xOUI/zbp+Pz5c63bovrf1NQUZcqU0bo9MTExeP36NapVq6Y2n5OTE+7evQuJRIJHjx5BJpOpfRcdHR3T3L7U69a2T1IPS89nU69ePQiCgPj4eNjY2MDf3x/GxsbieFtbW41t1rbe1LF++hkAKZ+Dar98Ok964n38+DEaNmyoNr+dnR2OHz+e5j5S7dvUfVFVq1ZNLP9WVlaQSCSwsbGBmZmZOI2joyPi4uLw6tUrlChRQm27wsPDkZCQgN69e6utSy6Xo1KlSpBIJLh79y6cnZ3T7NPoc59dak+ePMHixYtx7do1vH//Xmwu6dWrV2r76ku/G9p+37T937RpUxw5cgQrVqzAlClTxO/dp65cuYKHDx9izpw5Gsv53PHGwsIC4eHhmDBhAiZNmiROk5ycjDx58kAikYjb2K9fP/Gu71mzZqFu3bo4ePAgOnXqhLt37+Lvv/+Gk5OTRmxPnz5F+fLl1e6etrGxgYODA+rXr48DBw6gffv2uHfvHuLi4tSeQAVSnrR4+vQpJBIJHjx4gJkzZ2LgwIFwc3PDmzdvMGfOHEyZMgW//fYbYmJiMHr0aEyfPl1s7unTz1a1PQ0aNEDPnj0BAJUrV8aVK1ewZcsWuLi44NGjRyhatKjGftP22aX1mX/6+RsaGqJBgwZo0KCB2PfO3LlzERERgfHjx2vsN9XymE9mMR3klRm1YcOGdOeVKl/KK1VNZwL/5ZXx8fFqeeWnNm7cqDWvBFKe1mzbti18fHzSlVe2adMGvXr1QpMmTT6bV6Z2584d8bpAehUrVkytj0jV8e/x48dqucjX9neXujlI1fJUv5HalChRAhYWFuJ7VX4CpDSFKJfLYWdnJ47PkydPmsf91HLLdqYVkyqHVD0tBEBsaSoyMhJWVlZiXrl7925xGlVe+ezZszS3BQBMTU1RunRprdujyis//f1T5ZVAShN+MplMLa9X/aZ8q4oVK4r/p/XZuLu7a+SVqctC6ieMMuLT5lBVeWV65/k0XlVemZoqr0yLat+mzhmdnJzE8q8qGzKZTDy+Af/llS9fvkSJEiXUlhkWFob4+Hj06tVLbbgqrwRSjkfOzs7f3PdoWnnly5cv1cpzZlHllX/88QemTJmCsmXLap0udV75KdXxRhWrg4ODuL/Nzc0RHh6O8ePHi0+TA//llQDE1pz69euHxo0bA0hpHrxu3bo4cOCAmFeeP39e6/ckPDwc5cqV08gr7e3tUb9+fezfv18tr3RxcVGbPyEhQWxONa28cvLkyWnmlZ9SbU+DBg3Qo0cPAEClSpVw+fJlbN68GTVq1MDDhw9RtGhRjeP0tzAwMICHhwc8PDzEvHLOnDl49eqV1ryS6EeiSE6G2b8NwRUoqf33WzBMqQLTk39/XSRkJZ1V/BkZGaFKlSo4e/as2uPOZ8+eRbdu3bTOk5CQoFG5pzrxVf3IEFHuIZFIUGbjhnQ3ySQxMICepaXWizR6lpYwtLJC2c2bP7+Mr2iOqUOHDtDX18fOnTvRvHlzNG7cGK1atYKLi0uaNyy4u7tj165duHLlCmrVqiUOj4uLw+DBg9GoUSONeVKfPKY+2QGAvXv3Yvbs2RgzZgwcHR1hbm6OVatW4dq1a+I0fn5+8PLywqlTp7B//34sXLgQQUFBcHBw0FhXlSpVcPToUZw8eRJnzpzBsGHD4Orqqtb8yqcxpL7QlJlSn7SppL44rZKcqr8aldQnZ6rPVZmqKdlPaWsmJ7t+f3607Uy9DlVM2uJUrTcuLg6dOnXS2pfFly56fro92vabrqTns9m4cSMsLCxQoEABtQtnKp+WjfSWi6/ZLxktS7qgasJx+fLlahU1AMQLW5l1POrXrx9KlCiBGTNmoHDhwlAqlWjRokWG+9goVKiQRv8hb9++BfDfhTAgpQmomzdvQl9fH2FhYWkub9u2bahUqVKGm/lR7bvp06fD3t5ebZzq90oVT+qLiEZGRihVqhRevnwpLqd+/foYNWqUxjpSb09qlpaWKFu2rHjxJTY2FlZWVli/fr3GtKqLRYGBgXBychIreW1sbGBqaoquXbti2LBhiIyMxPPnz9G/f39xXlV5rVy5Mg4cOICiRYvCwMBA4yKvtbW1RvNNn1OoUCGNvmaSk5Px8eNHrdssCCn9lu7cuRMHDhyApaUlBg4ciJ9//jnd66TMpYu8Esh4bsm8MgXzym/zo20n88oU2vYD88pvo8qNAgMD1SpqAOaVX8K88j/MK4myTmREGAz+/ekoWkZ7M+ESo5S8Vi/5+/qNyWo6q/gDgJ49e2LMmDGoWrUq7OzssHbtWsTHx6Nt27YAILaPP3LkSABA/fr1ERQUhMqVK8POzg7h4eFYtGgR6tevzztfiXIpiUQCyScXAtKijI9HAS+vf/teUVfAywuCQgG9dC4rI4oUKYIBAwZgwIABuHz5MkJDQzF48GCYm5ujZcuWaN26tdrdmQDEPqMGDBiAwMBA1KhRA0BKQvn48WO1p5fS4/Lly3B0dETXrl3FYak7ClepXLkyKleujL59+6Jjx47Ys2eP1gs0/2fvvqOjqvY2jn+nZdJ7ARJ678UO2FEUsCByRQFFKdeGvYJiw4b6igXlUkQF1MsVsIENKyJeVLoGRHoN6ZM+9f1jQjSXAAlMclKez1qsmZw5Z59nwibsnN/sfQDCw8MZMGAAAwYMoH///owZM4acnByio6Mr3L9du3asXLmywhuCH8n+/ftJS0sr+yVr7dq1mM3mY34KODY2loMHD5Z9vWPHjgrvNRdIKSkp2Gw2NmzYUHYvgby8PHbs2HHMm9A3lPd5Ijp16sSff/551L5vs9mqfLEgPDycxMREVq9eXfbvDPz/Zg59+rx169Z8+OGHlJSUlF0IXbt2bdXfxHFKSUmp0j1WYmNj2bJlS7ltqampJ/wp5GNp2bIlGzduLLftWPe3bd26NYsXL6awsLDsou7q1asP6/+bN2+muLi47MLK2rVrCQ0NrfDiXOvWrQkKCmL//v2Hfar3kPbt27N48WJcLleF35fK9KXs7Gy2b9/O5MmTy/r+L7/8clg74J9BczQ9evRg+vTpZGZmls1K+PHHHwkPDy+7bx74Z9aZzWZmzpzJuHHjOPvss8tdxAf/hY1PP/20bPz8v4728yY+Pp7ExER2797NpZdeWuHxXbp0ISgoiO3bt5e9b5fLxd69e8t+JnTu3JnPP/+c5OTkI95X6H8VFBSwe/fusosZnTt3JiMjA4vFQkpKSoXHFBcXH/Y7wN8/FNiqVatyszkApk6dSkFBARMnTqRRo0YEBQXRtWtXtm/fXm6/HTt2lH3qv3Xr1hw4cICDBw+WzVj6358BPXv2xOFwsHHjRlq1agXATz/9hNfrLTeTZfv27Xz44Yd89NFHZGdnc9FFFzFt2jROPfXUKn+wSAJP48rK0biydo+3Gsr7PBEaV2pcWdVx5b59+8r9nf6dxpVp5cZHGlcGdlx5qOCqcaVIxQ7s+B0b4LJATELTincq/aBGQyv8VX5tzGowYMAA7r//fl5++WUuu+wyUlNTmTVrVtn0+/3795Oenl62/0033cQNN9zA1KlTGThwIBMnTqRv3748/vjjRr0FEalDzCEhxI0bS/wtt2Au/WXHHBlJ/C23EDduLOYKPvkaaL169eLxxx9nxYoV3HfffaSmpnLZZZexefPmw/YdOXIkt99+O//85z/LBv2Hbu796quvsmXLFrZu3cqSJUt48cUXj3re5s2bs3HjRpYvX8727duZOnVquV+cdu/ezQsvvMCaNWvYu3cvP/zwAzt27Ci7eLl+/Xouuugi0tLSAJgzZw6ffPIJW7duZfv27Xz22WckJCQc9ZfIW2+9lSVLlvDyyy+zdetWNm/ezIwZM8pef+GFF7jvvvvKHWO323nggQfYtGkTv/zyC5MnT+biiy8+4if7Djn99NOZP38+v//+Oxs2bOCRRx6p9l9Ow8PDufzyy5kyZQo//fQTW7ZsYeLEiYct99ZQ3megjR07ljVr1vD444+TmprKjh07WLZsWbkxQHJyMj///DNpaWmHfULyaEaPHs3MmTNZunQp27Zt4/nnn2fTpk1ce+21AAwaNAiTycRDDz3En3/+yXfffccbb7xxWDsXXXQRX375ZbltWVlZpKamlvtz6BO31eX0009n48aNfPDBB2VLBv3vBZvqMGLECL777jvmzJnDjh07eO+99/j++++P2i8uueQSgoKCeOCBB/jjjz/46aefeOKJJ7jsssvKLf3udDqZOHFi2ff/lVdeYcSIERXObgkPD2fkyJE8/fTTLF68mF27dvHbb78xd+5cFi9eDMDw4cPJz8/nrrvuYsOGDezYsYMPPviAbdu2Af6+tHnzZrZt20ZWVlaFn7SOiooiOjqaf//73+zcuZOVK1fyzDPPlNsnLi6O4OBgli9fTkZGBnl5eRV+H/r27UubNm2477772LRpE8uXL2fq1KkMHz687NPk3377LQsXLuT555+nT58+jB49mgceeIDc3NxybS1duhSPx3PECyzH+nlz2223MWPGDN5++222b9/O5s2bWbhwIXPmzCn7/g4bNoxXXnmFH374gW3btvHoo48ClC39ec0115Cbm8tdd93F+vXr2bVrF8uXL+fBBx8su1j17LPPsmrVKvbs2cPq1au59dZbMZvNDBo0CIDevXvTo0cPbrnlFn744Yey/V588cWy/7/OPfdcvvzyS9555x12797Nr7/+yuTJk+nWrRtJSUnY7XbatWtX7k9kZCRhYWHllrkbPXo0n376KQsWLGDnzp3MmzePb775hquvvrosS4sWLcp93/73/93WrVtz5plnMmnSJDZu3Mjq1at54oknGDhwYNnF93379jFgwADWrFnDbbfdxooVK3j66ac57bTTdHGmDtK4UuPK6qRxpcaVGlfWnnHlDTfcoHFlJcaVq1ev5sknn9S4MoDjyocffpj169fz66+/alwpcgRZ+3YAUGgHyxE+HGAO9o/Lra6GVfgzdMYf+P8zP9LSnv87BdtqtXLrrbdy66231kQ0EamHzHY7cWNGE3/jP/Hk5WGJiMDndmOu4N5p1clutzNw4EAGDhxIWlpauftb/d2oUaPw+XyMGzeOWbNmceaZZzJ9+nSmTZvGzJkzsVqttGrV6pj3ODm0Nv6dd96JyWRi4MCBXHPNNXz//feAf0mXbdu2sXjxYnJyckhMTGT48OEMGzYM8C8Dsn379rJfUsLCwpg1axY7d+7EbDbTtWtXZsyYcdR7rZ522mm89NJLvPbaa8yYMYPw8HBOOeWUstfT09PLlvM4pFmzZlxwwQWMHTuW3NxczjnnHB555JFjfn/vv/9+JkyYwPDhw0lMTGTChAn89ttvxzzuRD3wwAM88sgj3HjjjYSHhzNmzBj2799fbrmshvI+A61Dhw7MnTuXqVOncs011wDQtGlTBgwYULbPbbfdxqRJk+jXrx9Op7PCC58Vufbaa8nPz+eZZ54pu6/Ia6+9Vnafi7CwMKZPn84jjzzC5ZdfTps2bbjnnnsYP358uXa2b99+2C/gn3zySbn7EgLcfvvtR/zlORDOPPNMbr75Zp577jlKSkoYMmQIl19+OX/88Ue1nRP89+Z77LHHePXVV5k6dSp9+/Zl1KhRzJ8//4jHhISEMHv2bJ588kmuvPJKQkJCuPDCC3nggQfK7XfGGWfQvHlzhg8fjtPpZNCgQYd9///u5ptvJikpiX/961/s2bOHiIgIOnXqxI033gj47zX91ltv8dxzzzFy5EjMZjMdO3bkpJNOAvxL6a1atYohQ4ZQWFjI22+/fdjsQbPZzIsvvsjkyZMZNGgQLVu25KGHHiq3bJjVauWhhx5i2rRpvPzyy5x88skVLjFksViYPn06jz76KFdddRUhISEMHjyY2267DfBf6Js4cSLjx4+nc+fOAIwfP54ffviBRx55hKlTp5a1tXDhQi644IIjXjA/1s+boUOHEhwczOzZs5kyZQqhoaG0a9eO6667rmyf++67D6vVyn333UdxcTHdu3fnrbfeIioqCvDPSHr33Xd5/vnnGT16NE6nkyZNmnDmmWeW/T9x4MAB7rrrLnJycoiNjeWkk05iwYIF5e7FN2PGDKZOncqDDz5IdnY28fHxnHzyyWUX76644goKCgqYP38+zz77LBEREZx++unce++9R+wbFbngggt49NFHmTFjBpMnT6Zly5Zlf1+H/q5fffVVJk6cyJVXXklycjIPPfTQYfeRfP7553n88ce58cYbMZvNXHjhhTz00ENlr8fExPDVV1+VfYJd6r6GPK5MTU3VuLKaaVypcaXGlbVjXHnHHXcQGxurcWUFDv28GTdunMaVpQI5rnziiSe47rrrNK4UOYr8g7tIAoqOsuqyOdg/C9zirh1Lc9cUk6+2LEZeQzweD2vXrqVHjx5aHlRqhPpcYBQXF7N9+3ZatmxZbff0qI98Pl/ZUid17VNfr7zyCsuWLePDDz80OspxKyws5KyzzuL+++8/4kW0+vo+63Lfk8B56KGH2LZtG++88061nueuu+7CbDbz/PPPq++Joaqr/x1tHKSx5pEd7XujsWXDUlPjrer8P6ghjyvl2BrC+MeIcaVUTkPof/VNfRkHaRwsC5+4jk7zV7GziYmLvv69wn0+eO5G2s/+jj1JJi74ruJ9jocR/a8q5zR8xp+IiEh98fvvv7Nt2za6detGXl4e00rv+3P++ecbnCywGsr7lKqbPXs2ffr0ISQkhO+//54PPvigUrMMjpfb7WbHjh2sXbuWq666qtrOIyIiUtMaynirobxPqTqNK0VE5FjcjhwAnPYjrxRhDQn3PzawGX8q/ImIiATQG2+8wfbt27HZbHTu3Jn58+eXLTFSnxzpff7yyy+MHTsW8H/y838/8blmzRoj4koNWb9+PbNmzaKgoICmTZsyceLEav20/pYtWxg2bBinnXZa2RJyIiIi9YXGlX+NKyuicWX9pnGliIgciy/fvyy2K+TIZS5bqH+5Yqu7RiLVGlrqU6Saqc8FRn1ZhqCmackNqWnFxcWkpaXh8/koKioiJCSkXN9r3ry5gemkIdDPPTGSlvqsXbTUp9Q0/R8UWIfGlUeiceVf1PfESOp/dU99GQdpHCz/vvZ0uq3KZcNJ4fxj/s8V7vPtf14h6eHXyA2D039NDdi5tdSniIiINBjBwcE0b95cv/yJiIiIyAk5NK4UERERqYi5yAmAN+TIBeyQiCgAbA1sxt+RFz8VERERERERERERERERqWVsxS7/k/CwI+4TFp0AQJCrJhLVHir8iYiIiIiIiIiIiIiISJ1hK/YCYI6IPuI+4VH+wp/VC0UFjpqIVSuo8CciIiIiIiIiIiIiIiJ1hr3EX/izR8cdcZ+o+MZlz3Mzj3zv4PpGhT8RERERERERERERERGpM4KL/Y8hcY2PuE9kTFLZ87zsg9UdqdZQ4U9ERERERERERERERETqjNDSwl90kxZH3MditVJi9T/Pz1HhT0REaoH27duzbNkyAPbs2UP79u1JTU01OJWIiIiI1EUaW4qIiIhIfVCQl0uI0/88PqXdUfd1lRb+ivOyqjlV7WE1OoCIiFRO48aN+eGHH4iJiTE6ioiIiIjUcRpbioiIiEhddWDHb2XPGzfvcNR9XTagGIoc2dWcqvZQ4U9EGhxXiQezxYSzyE1QiBWvx4fNbjE61jFZLBYSEhKMjiEiIiIiperquBI0thQRERGRuit9zxaigEI7hIRFHnXfQzP+nAW51R+sltBSnyLSoLhdHlZ/sZM59/3AG/f+wJz7fmDNFztxuzzVds7PPvuMSy65hG7dunHaaacxatQoCgsLWb9+Pddffz2nnXYaJ510EiNGjOC33347YjsVLcf0xx9/MGbMGHr27Env3r259957ycr6a9r6yJEjmTJlCs899xynnnoqffr04ZVXXinXrsPhYNKkSfTu3ZuuXbsyaNAgvvnmGwoLC+nVqxefffZZuf2XLVtGjx49yM/PD9B3SERERKTuMWJcCcaPLSdPnsyUKVM0thQRERERwzjSdgFQZD/2vu6ywp+jGhPVLir8iUid5vP5cJV4KvXHWeTm18928suSHZQUugEoKXTz85Id/PrZTpxF7mO24fP5qpTv4MGD3H333QwZMoSlS5fy9ttvc8EFF+Dz+SgoKODyyy/nnXfeYcGCBTRv3pxx48ZV+qKHw+Hguuuuo1OnTrz//vvMmjWLzMxM7rjjjnL7ffLJJ4SEhLBgwQLuvfdepk2bxooVKwDwer2MHTuW1atX89xzz7F06VLuvvtuzGYzoaGhDBw4kEWLFpVrb+HChfTv35/w8PAqfS9EREREarOaHlfW1bHl4sWLCQ0N1dhSRERERAxTmLEfgKLgY+/rtpr8j0UF1RmpVtFSnyJSZ/l8PhY9t5oD2449TTs43Ma1T/Zmwzd7Knx9wzd76HVhc96e+CPF+a4jttO4dRSD7+mFyWSqVMb09HTcbjcXXHABycnJALRv3x6AM844o9y+TzzxBCeffDI///wz55577jHbnjdvHp06deKuu+4q2/bUU09x9tlns337dlq2bAlAmzZtuPXWWzGZTLRo0YJ58+axcuVK+vTpw48//sj69etZunRp2f5NmzYta2/o0KEMGzaMgwcPkpiYSGZmJt9//z1z5syp1PsXERERqQuMGFdC3Rxbtm/fnltvvRVAY0sRERERMYQzO8P/GHzsuW0eqwnw4SkurOZUtYcKfyJSp1XyGgmhkUEU5TnLPpH9v0oK3RTlOwmNDDrmBZqq6NChA2eccQaXXHIJffv2pW/fvvTv35+oqCgyMjKYOnUqq1atIjMzE6/XS1FREfv27atU25s2beK///0vPXv2POy1Xbt2lV1sadu2bbnXEhISyMzMBCA1NZVGjRqV7fu/unXrRps2bfjggw8YN24cH330EU2aNOGUU06pyrdBREREpNar7eNKqB1jy0OFxkM0thQRERGRmubJ9y/b6azE/bUPzfjzFRdVa6baRIU/EamzTCYTg+/phdvprdT+ZosJe6i1wos09lArYVF2rrz/5KO2YQ0yV/oT2QAWi4U5c+awevVqVqxYwdy5c3nxxRdZsGABjz76KDk5OUycOJEmTZoQFBTEVVddhctVuQtEhYWFnHvuudxzzz2HvZaQkPBXZmv5H/Umk6lsWang4GPPhx86dCjz589n3LhxLFq0iCuuuKJK3wMRERGR2s6IcSVobKmxpYiIiIgcD1O+f9lOd8ixS1ze0sKft6S4WjPVJrrHXy3gKsjD63LhyszwPxbkGR1JpM4wmUzY7JZK/fF6fHQ7N6XCdrqdm4LX4ztmG8dzUcJkMnHSSSdx22238cEHH2Cz2Vi2bBmrV69m5MiRnH322bRt25agoCCys7Mr3W7nzp3ZsmULycnJNG/evNyf0NDQSrXRvn17Dhw4wPbt24+4z6WXXsq+fft4++23+fPPPxk8eHClM4qIiIjUFTU9rtTYUmNLERERETk+5kL/7D1PiP2Y+3pspbMCnSXVGalWUeHPYN6SErJnz2FLn7782edMtvTpS/Ybb+ItqT2dUIVJqS9sdgu9LmrOKQNbYA/1fxrEHmrllIEt6HVRc2yVmBpeVevWrWP69Ols2LCBffv28cUXX5CVlUWrVq1o0aIFH330EVu3bmXdunXcc889lfqU9CHXXHMNubm53HXXXaxfv55du3axfPlyHnzwQTweT6XaOPXUUzn55JO57bbbWLFiBbt37+a7777j+++/L9snKiqKCy64gClTptCnTx8aNWpU5e+DiIiISH1ixLgSNLYUEREREQGwFJeuahEWcsx9vdbSMpgzsMvw12Za6tNAroI8smfPIfO118u2eR0OMqe9BkDMDaOwhUUYFc+fp7QwmT1vPl6HA3NkJDEjRxA/bhxm+7Gr6SK1jdVmoeeFzTnp4hY4i9wEhVjxenxYbdVzcSY8PJyff/6Zt956i/z8fJo0acIDDzzA2WefTUJCAg8//DCDBw+mcePG3HnnnUyZMqXSbSclJfHuu+/y/PPPM3r0aJxOJ02aNOHMM8/EbK785zpeeeUVnn32We666y6Kiopo3rw5d999d7l9rrzySj755BOGDBlS6XZFRERE6rOaHleCxpYiIiIiIgC2Yv8H00zhkcfc11s6Pjep8Cc1wRIUTPa8+RW+lj13HvFjx7J0/CXkZR0ATGDir8fS575DS8OYKL0bfQVfH1o9xmTCd6gNSu/FUPq1yVS+fZ/JxKDbXqHosy8rLkz6fESNHI43yEZI2LH/cVUXV0EelqBgPI5cLJFReJzFhhdLpfY79AnskIggACzV+JOwdevWzJ49u8LXOnXqxMKFC8ttu+iii8p9vXnz5rLnKSkp5b4GaNGiBa+++uoRzz937lwKCwvLbXvttdfKfR0dHc3TTz995DcBpKWlER0dzfnnn3/U/UREREQakpocV0LtGFv+L40tRURERKSm2Yv99+a2RsUec1+fzT9IN7kOvz93faXCn4E8jly8DkeFr3kdDtyZmXTYaaLkj/waTgaWmBjCUpqz70iFyXnziR8zhj/P70deQTaFIVAcbKLEbsIVYsEdbMUTGowvLBRLRBS26HhC4xoR2agZsSltSW7ZhfBK/KM8Gs1GFKl+RUVFpKenM3PmTIYNG0ZQUJDRkURERESkjtLYUkREREQCwV7iAyA4JuHYO9tsAJhdlVu+vj5Q4c9AlsgozJGRFRb/zJGRWOPi2NTKQl50JPjw/8H31yOlk/R8gM//9aHXTH/b59CxprJj/9rf9Lfd8PnKvk5q244WuccoTGZnY02IJzQ7m1AnkHsopBdwAUVANrC3/LHAbqAoCAqDoTgYSoLNOO0W3CFW/w05w0MxRURii4ojJC6JiKTmxKe0oUmrLkTGJNaJZVL/rkmTJkZHEDkus2bNYvr06Zx88smMGzfO6DgiIiIiUodpbCkiIiIigRBS7H+MSGx6zH19dv+Hzcxub3VGqlVU+DOQx1lMzMgRZcWqv4sZOQKPx8WAqYsNSObndbmOXphMTMD63GMc3Lae3P07KcrchzMnE68jFwoKMBcWYy1yEVTsIajYR3CJj9Bi/z9KMxDi9P/BAf5y4N8LhjnAvnLn9OEvIe5MjKHz518dfZnUceNY/s7zEBxCfNP2NG7Vmei4xgH73lTWoaVI44NsmLxeXMWFtaogKXIs48ePZ/z48UbHEBEREZF6QGNLERERETlRHreb0NLCX2xK62Pubwryrw6owp/UCFtYBPGln3LMnjuv1i1XeczCpLOEZu170qx9zyq1W1JUyP4dv3Nw9yZy9u2gKHM/zuwM3Hm5mPILMBcWYS1yYSt2lxUMQ4ohrBjMPgiLjsedmXn02YgZGSS/9z0lf2wBYD+ww/rXDMPiYBPO4ENLkgbhDQ3FHB6BNSoGe2wi4YnNiG/ahiYtuxAV1xiLter/VLQUqYiIiIiIiIiIiIhI4GSm7cRaWsNr1LzDMfc324MBsKjwJzXFbLcTc8Mo4m+8EU+eA0tEJB5nca0oDFVXYdIeEkqLjifTouPJVTrO5SzhwO4tONJ2Yk1MPPpsxNhYDnqyMYVCWBFYfGB3gz0fyAf//EF36Z9i/NMODxzWVhqw23J4wdAVbMETYscXFoIpPBxLVCzBMQmEJzYlNqU1HbqeSc6bb9eZpUhFRERERERERERERGq7Azt+xwa4LBCTcOylPs32EAAsbt8x9qw/VPirBQ4VgMyxcf7H0ptN1ga1qTBpC7LTtHUXaN0FV0He0Wcjet2cvmQ54J/6e3DvnxzYkUr23q0UpO+hJDsdtyMHX34e5oJirMVObEVugoq92Iv/mmFo9UKQB4IKgAIoXzAswV8wTAO2lp3fEhOD9avzj74U6T//Sb4jh/DI6EB+i0RERERERERERERE6q2sfTtIwj9ZpzIr9VlCwwGwuqo5WC2iwp8cU20sTFZlNqLFaqVx8w40rsS037/zuN1kpu3kwI7fydy9hYL0vRRnp+PJzcKXn4+poAhLUUnpkqRegot9BBdDXFIC7qysoy9Fmp7OgZtuYt/BLeTGWCiKDcGblEh4yw40O+k8Op7cD1uQ8bM+RURERERERERERERqi/yDu0gCiip5+dwWEgaA1aMZfyK1XnXPRrRYrSQmtyYx+dg3CP07j9uNyes9+lKkMTF40zNIyIGEHA9sP7QG6TZgKb/ZIDMaHNE2SuLDMTdJJrptdzr0uYRmbbsH4N2JiIiIiIiIiIiIiNQtJVkH/Y/BpkrtbwuL8j9qxp9I3VAbZyNarNZjL0Xq8+B9dTLbVn1J/tbf4MABQrIKiczxEJcDdhc0SYcm6S7Ykg1kAxspYD6rQiArxkR+TBCuhGiCmrYgqUtvuvS9hOi4xlXK6irIwxIUjMeRiyUyCo+zWPceFBEREREREREREZFaye3IAcAZbK7U/vawSACs7upKVPuo8CdSDSqzFGmHk86jw0nnHXZsUYGDDSs+Yt+6HyjeuRVLeiZhWSVEZ3uJyYeIIogo8sG+Evz3F0wD/steXmRjJOTEmCmMCcHbKJ7QFu1p2vMcOp3aH3tIaLnzeEtKyJ49h+x584+6VKrUTosWLeKpp57il19+MTqKiIiIiNRxGluKiIiISF3hy88DwBVcufJWSFQsAEEq/InIiTrepUhDwiI59cIRcOGIw147uHcrv//wMRmpP+Pes4ugTAfhWS7isn2EOiHOAXEOL+wsAAqAncAXbLJMIDMGHNFWiuPCGfTI2xR+8imZr71e1rbX4SiboRhzwyjN/AugkSNH0qFDByZOnGh0FBERERGp4zS2FBEREZGGzFRQCIAnpHKr/4VE+lcLtKnwJyKBYAuLwOPxkFHiJDHKfMLFtMTk1iRedcdh2z1uN9t++y9bV32G48+N+PbvIzizkMgcN3E5EOSBxhnQOMONJdNEaKNk9s6bX+E5sufOI/6f/zyhnLWdq6QYs8VCSUEB9rAwvB4PNnuw0bFqnMvlwlYLlscVERERqas0rvyLxpYiIiIiUhPMRU4AvCGVG3eHRydQAtg8UFJUeNjKePWRCn8iNWDfvn0kJiZWW/sWq5W23fvQtnufw14rKSpk449L2LP2e4p2/kFydHNa5OTgdTgqbMvrcOBOTyf1gdvYnv0HxY2isbfpSMs+A+l6xkAs1rr9Y8PtdLLqw4Ws+eyjsgs0vS6+lFMvG4o1KCjg53vggQdYtWoVq1at4u233wbgyy+/ZPr06fz0009kZGTQuHFjrrnmGq677joAfv75Z0aNGsW3335LQkJCWVtPPvkkv/32G++8806F53rnnXd44403OHDgAMnJydx0001cdtllZa+3b9+eRx55hO+//56ffvqJ0aNHM378+IC/ZxEREZGGoKbHlWD82PLyyy8ve11jSxERERExgq3Y5X8SHlap/aPjGpNW+jw3az+Jya2rJ1gtUrev4IvIMdlDQjnp/KGcdP7Qsm1elwtzZGSFxT9zZCTWmBhC/txP+2w3/JkBPyyHN5ezxv4A6fEm8hJD8TVNIb57b3pdOJyYhOSafEvl+Hw+3CUlldrX6/Pyy8eL+Wnhu2XbSgoKWPm+/+uTBg7GbD76TWGtdjsmk6nS+SZOnMiOHTto27Ytt912GwBRUVE0atSIl156iejoaNasWcOkSZNISEhgwIABnHLKKaSkpPDhhx8yZswYwP8J6o8//ph77723wvN8+eWXPPXUUzz44IP07t2bb7/9lgkTJpCUlES3bt3K9nv11Ve5++67mThxIhaLpdLvQ0RERKS+q+lxJdS9sWWjRo04/fTTy/bT2FJEREREapqt2AuAOSK6UvtHxCSWFf7ystJU+BOR+snjLCZm5Iiye/r9XczIEbhKith89ekUbF6HbV8G0RlOEjMhrATC9vpgbwGs2QwfbWbf5DmsjYGcBBvFjWMJbduZdmcNpl3Pc6p9dqDP5+O9Sfex74/UY+4bEhHJ2FffYM1nH1X4+upPP+KUS4Yw89YbKMqreDYkQJP2nRj22LOVvkATERGBzWYjODi43CesD12oAWjatClr167ls88+Y8CAAQBceeWVLFq0qOzizDfffENJSQkXX3xxheeZPXs2gwcPZvjw4QC0bNmStWvXMmfOHF588cWy/QYNGsSQIUMqlV1ERESkoTBiXAl1b2z5xhtvlCv8aWwpIiIiIjXNXuIv/NmjE46xp58tyI7TCkFuyMtJr85otYYKfyINkC0sgvhx4wD/Pf28DgfmyEhiRo4gftw4zHY7l9z2f+WOyc/N4pcv5nNwzXf4du4iPL2AhHQvEUXQKAsaZblgcxp8mwYzv+bXEEiPN5OfGIqpWTMSe5zJyRddS3hUbGDfTCUvkoRFx1DoyKGkoKDC10sKCih05BIWHXPMCzSBMH/+fBYuXMi+ffsoKSnB5XLRoUOHstevuOIKXnrpJdauXUuPHj1YtGgRF198MaGhFa9BvW3bNq666qpy23r16lW2BNQhXbp0CfybEREREakP6ui4EjS2FBEREZGGI7jY/xgSl1TpY1ylhb9CR1Y1papdVPgTaaDMdjsxN4wi/sYb8eQ5sERE4nEWY7bbK9w/PCqWc4aOh6F/3bfD43bzx5pv+eP7xRRu+Y3g/VlEp7tIzIaIIojY7YXd+fDr77D4d7Y/9i/SYyEnIQhn43jC2nelwzlDK7w3IYCrIA9LUDAeRy7eIDs+r7fc6yaTiWGPPVvpJZnMVgv2sLAKL9LYw8IIj43lmskvHLWNqi7HVJElS5bw7LPPcv/999OzZ0/CwsKYPXs269atK9snLi6Oc889l0WLFpGSksLy5csPu9ByPI50cUdERESkITNiXAkaW4qIiIiIVFVoaeEvukmLSh/jtEIYUKzCn4jUd7awCADMsXH+R5utSsdbrFY6ntKPjqf0K7c9O30vq7+YT8a6HzHt3kPEwUISM3yElkDjDGic4YTUffD1Ptyvf85PYZARb6YgKRxT85Z0v/ha2vY6j+zZc8ieN99/L8K2bfE+NLHC4p8tOLhSeV0lxfS6+NKye6/8Xa+LL8Xr8VS6raqw2Wx4/5Z79erV9OzZs2zpJIBdu3YddtyVV17J3XffTVJSEk2bNuWkk0464jlatWrF6tWrGTx4cLnztG5d/9esFhEREQmEujCuBGPHlm3atAnQuxARERERqbqCvFxCnP7niU07HH3nv3GXVsJcBTWzIofRVPgTkYCLSUjm/OH3wV/XHvC43WxYuYTtK5ZQ8mcqwQdyiM1wE5cDUQUQVeCFnQ5YtY4m59jJ+NcMMl9/vex4b34+3vx83FlZeO12zBZLlXPZ7MGcetlQwH/vlZKCAuxhYfS6+FJOvWwo1qCgE33rFUpOTmbdunXs2bOH0NBQmjdvzgcffMDy5ctJSUnhww8/ZMOGDaSkpJQ77swzzyQ8PJzXX3+93H1bKjJmzBjuuOMOOnbsSO/evfnmm2/48ssveeONN6rlPYmIiIg0ZEaNK8HYseWcOXOq7X2JiIiIiBzLgR2/lT1v3KJTpY9zlc53cRbkBjpSraTCn4jUCIvVSo8zL6PHmZeV235w71bWfD6f7I3/xbJ3P4nuCNqfcQb7HpxQYTve3FxMTZocdw5rUBCnXDqE0wb/g5LCQuyhoXg9nmq9OHPDDTfwwAMPMHDgQIqLi/n0009JTU3lzjvvxGQyMXDgQK655hq+//77cseZzWYGDx7Mv/71Ly6//PKjnqNfv35MmDCBN954g6eeeork5GSeeuopTjvtNAoLC6vtvYmIiIg0VEaMK8H4saWIiIiIiFHS92whCii0gz2k8svOuy0mwIe7qGFcJ1XhT0QMlZjcmv43TCq3zZWZ4V/eswI+jwefx4OzuAB7aMRxndNm9y+7FBoZBYDFWrUlTquqZcuW/Pvf/y637emnn+bpp58ut+3uu+8+7Ni0tDTOOussEhMTy22/4ooruOKKK8ptu+aaa7jmmmvKbfP5fGXPN2/efFz5RURERKRiNT2uBGPHln+nsaWIiIiI1DRH2i5/4a+Kq+p7rIcKf/nVEavWUeFPRGodS2QU5sjICot/JosFk9mMd+cesq1ezNExRCUc/wzA2iovL4/NmzfzySef8PrfljwVEREREakqjS1FREREpD4ozNgPQLG9asd5bCYAvCVFgY5UK6nwJyK1jsdZTMzIEWROe+2w18xRUXiLi8DjIdgDpGWRm5WFJyKM6KRmx3Xvv9ro5ptvZv369QwbNow+ffoYHUdERERE6jCNLUVERESkPnBmZ/gfg81VOs5j8Rf+fCUlAc9UG6nwJyK1ji0sgvhx4wDInjsPr8OBOTwcX3g41thYLKGhuFOScGWmE1zkJcgFZBVQkJuKKyyIiEbNsAVVcb53LTN37lyjI4iIiIhIPaGxpYiIiIjUB558/wpxzuCqTf7w2EoLhSr8iYgYx2y3E3PDKOJvvBFPngOXLYgdu3ZhMvt/SEdEJ0B0AsWFeRSm7cVe5MbqAavDiTPvT/JCLYQkNCEkPMrgdyIiIiIiIiIiIiIiJ8qUXwCAO7hqpS2vtbRQ6HQGOlKtpMKfiNRatrAIAMyxcXiKi8uKfn8XHBpBcMsOuF1OHAd2YssvweqBkAIPvoLdZAfvwRobT0RsUk3HFxEREREREREREZEAMRf679HnCanaTf58QaWFP5c70JFqpaothCoiYjCv11vhdqstiNimbQlr34niuHCcNjABwcU+rPvSyfljIzkHduLz+Wo2sIiIiMgJqmvjl59//pkbb7yRvn370r59e5YtW1budZ/Px0svvUTfvn3p1q0bo0aNYseOHeX2ycnJ4e6776ZXr16cfPLJTJgwgYKCgoBnPdLYUkRERKS+0vhH6jJLscv/JCy0Ssd5rf45cOYGUvjTjD8RqROCgoIwm83s27ePhIQEgoKCMJlMFe4bEtMIYiA/+yAeRy72Eh8UA8W5ZGZtwB1qJzy+MVZrUM2+CQP4fD5KSkowm81H/H6JVAf1PTGK+p4YqTr6n8/nIz09HZPJhM1mC0ib1a2wsJD27dszZMgQbr311sNenzlzJnPnzuWZZ54hJSWFl156idGjR7N06VLsdv8nd++55x7S09OZM2cOLpeLCRMmMGnSJF544YWAZKzK2FKksvR/kBhFfU+MpP5Xd/h8PpxOJ+np6ZjNZoKC6v91Mal/bMUeAEzhEVU7MMj/u5QKfyIitYjZbKZly5bs37+fffv2VeFIK05PIa78XGxOH4eGoAe27cEdZMYeGYPVVrWp4XWJz+fD5XJhs9k0AJcapb4nRlHfEyNVV/8zmUykpKRgsVTtBvZGOfvsszn77LMrfM3n8/H2229z00030a9fPwCmTJlC7969WbZsGQMHDmTr1q0sX76c999/n65duwLw0EMPMW7cOO677z6Skk58CffjH1uKHJn+DxKjqO+JkdT/6p7Q0FCaNWuGuYJb6ojUdvZi/4xVa1Rs1Q48VPhzN4wZryr8iUidERQURLNmzXC73Xg8niofn7bnT36ZPZkmGw4SWQA2wGmFrW2CaXzFtXQ/8/KAZzaax+Nh06ZNtGnTps5cLJT6QX1PjKK+J0aqrv5ns9nqTX/es2cP6enp9O7du2xbREQE3bt3Z82aNQwcOJA1a9YQGRlZVvQD6N27N2azmfXr13PBBRcEJMuJji1F/pf+DxKjqO+JkdT/6haLxYLValWRVuose4n/NgjBsVX8MKA9GACzS4U/EZFa59AyV8ez1FXzNl1o/vR7FOTlsmTKP4n5bh0p+6H9bvB+8xift5qM7dJL6T/mcSzW+vHj8dBFrODgYA3ApUap74lR1PfESOp/x5aeng5AXFxcue1xcXFkZGQAkJGRQWxs+U/wWq1WoqKiyo6vimMV9cxmsz7xLgFxqB/Vp2K91A3qe2Ik9b+6p77c4+/QGE8f4GpYQor9j2HxTar0d28K8q/4ZnF7A9JnjOh/VTlX/biyLSJSBWERUfzjiffwuN18NmMino+X0Ha7h3bbPDB1MV+/+wE55/Rg0H3TCQmLNDquiIiIiJyADRs2GB1BGhj1OTGK+p4YSf1PjKK+13B43W5CSwt/+R4ba9eurfSxJV7/TEGL21el446ltvY/Ff5EpMGyWK0MvPlZuPlZflwyh71vvkK71CJS0nyk/HsNqz89jV2nJnPOva/SuHkHo+OKiIiI1HkJCQkAZGZmkpiYWLY9MzOTDh384634+HiysrLKHed2u8nNzS07viq6du2qGQhSIzweDxs2bFCfkxqnvidGUv8To6jvNTwH924lp3TC6il9+xHfuGWlj931ZTwAVjf06NHjhLMY0f8OnbMyVPgTEQF6D7weBl7P1g0/8vPU+2m5JoNYB8Qu28u+7wfzfbcIOt78CN16Dyw7xlWQhyUoGI8jF0tkFB5nMbawCAPfhYiIiEjtlpKSQkJCAitXrqRjx44A5Ofns27dOq6++moAevbsicPhYOPGjXTp0gWAn376Ca/XS7du3ap8TovFootBUqPU58Qo6ntiJPU/MYr6XsORvnszNsBlgfhGLar0924N9V+ztbp9Ae0vtbX/6UYGIiJ/07prb4bNXk67z5exflBb0mIh1AndfsnDPPoeFl3WjY3fvI+3pITs2XPY0qcvf/Y5ky19+pL9xpt4S0qMfgsiIiIihiooKCA1NZXU1FQA9uzZQ2pqKvv27cNkMnHttdfy+uuv89VXX7F582buu+8+EhMT6devHwCtW7fmzDPP5OGHH2b9+vX8+uuvPPHEEwwcOJCkpCQj35qIiIiIiBgka89WAAqD/Su5VUVQ6e2crO6Ax6qVNONPRKQCMQnJXPX8R7icJSx56XaCvlhOy91eOm52keKNIWP6v8h8/fWy/b0OB5nTXvMfe8MozfwTERGRBmvjxo1ce+21ZV8//fTTAAwePJhnnnmGsWPHUlRUxKRJk3A4HJx00knMmjULu91edszzzz/PE088wXXXXYfZbObCCy/koYceqvH3IiIiIiIitUN+xl6SgCL7MXc9zKHCn02FPxERsQXZufze6XAvfP3uC7iWfU77M85g34MTKtw/e+484v/5T7Zt+JGkFp0Ji4iq4cSHa9KkidERREREpAE57bTT2Lx58xFfN5lM3H777dx+++1H3Cc6OpoXXnihOuKJiIiIiEgdVJJ10P8YbKrysSFRcQDYXAGNVGup8CciUknnXX03XH03rox0vA5Hhft4HQ7c6ekw8Rl2/bGFvBAoCIWiUDMloRZcYXa8EWGYo2MIik8iokkrElp2pkXHU4iMSQxo3kP3IIwPsmHyenEVF2omooiIiIiIiIiIiNQ57txsAJzBVb+DXWhkLKAZfyIicgSWqGjMkZEVFv/MkZFYY2NxZmQAEFHk/0OmF/ACLiAfSAM2Ad8BsBf4ww75YVAUYqI41IorLAhvRCimqGiC4hsR1qiZv0jY6VRiEpKPmvHQPQiz583H63BgjowkZuQI4seNw2w/jvnwIiIiIiIiIiIiIgbxFeQD4AquelkrPCaJEiDIAyVFhdhDQgOcrnZR4U9EpIo8zmJiRo4ou6ff38WMHIHH66blV8vY9ccaDmxdS86erRQf3Is7JxOzIw9rfglBhS5CCr2EFUJ4IVi9EFbi/wM+/AVCF1AApANbys5xANgeBPmhUBhqojjUgivMhic8DKIiueT2Vyj48BMyX9M9CEVERERERERERKTuMxUUAuAJsVX52MiYBNJLn+flpGEPaRnAZLWPCn8iIlVkC4sgftw4wH9PvyPNqGvbvQ9tu/c5ZnsuZwl7t25g35Y1ZO3+g6KDe/FkZYAjD2t+EUGFbkIKvYQW+ogoBJsHQpz+P+T4AHfpnyIsMR5CJjdiz7z5FZ4re+484m+8MTDfCBEREREREREREZEaYCkqAcAbElzlY6PimpQV/nIz9hPfWIU/ERH5H2a7nZgbRhF/44148hxYIiLxOIuPaxlNW5CdFh1PpkXHk4+5r8ftZt/OTez941eydm6m8OAe3FkHITcPa34hycmtaJGTc9R7EHryHJhj46qcU0RERERERERERMQI1uLSG/SFh1X5WFuQHafFv9RngSMzwMlqHxX+RESO06HlMg8V0cy2qk8zryqL1UrT1l1o2rrLEffxulxHvQehJSKyOiOKiIiIiIiIiIiIBJSt2AuAOSL6uI53Wf2Fv0JHVgBT1U5mowOIiEhgHboHYUViRg7H4yyu4UQiIiIiIiIiIiIix89eWvizRycc1/Gu0jkbRbma8SciInXMke5BGDt8OLHXjcJSOlNRREREREREREREpC4I9t/ij5C4pOM63lVaDXMWVHyLpPpEhT8RkXrosHsQhoeT//1ydgy7ml+7BvGPZxcbHVFERERERERERESkUkJLFzGLbtLiuI53N6DCn5b6FBGpp2xhEfjMZjJKnPgsVpYseALntm20X7KJb//zitHxRERERERERERERI6pIC+XEKf/eWLTDsfVhttq8j8WFQQqVq2lwp+ISD23b98+AK589Qv+bGEmyA2m/3uN/Ts3GZxMRERERERERERE5OgO7Pit7HnjFp2Oq41DhT9PUX5AMtVmKvyJiDQQtiA7XV98i6wISMyG/946FI/bbXQsERERERERERERkSNK37MFgEI72ENCj6sNT+lSn57iokDFqrVU+BMRaUBadDyZgluuwW2G9lvcLLh7kNGRRERERERERERERI4od/9OAAqDj78Nj9VfDvOVFAciUq2mwp+ISANz4aiH+e38pgB0+XInX7w52eBEIiIiIiIiIiIiIhUryjoAQLH9+Nvw2krLYc6SACSq3VT4ExFpgIa+uJTNbaxYvRD22nx2pP5idCQRERERERERERGRwzizM/yPwcdf0vKWzvijxBmISLWaCn8iIg2QxWrllFf+TXo0xDpgw12jcDWAT7uIiIiIiIiIiIhI3eLNywXAGWw5/jZspTf5c7kDEalWU+FPRKSBSm7ZCe/dN+O0QJvtHhaO7290JBEREREREREREZHyCgoBcAdbj7uJQ4U/s1uFPxERqcfOGTqeTQPaAdD1+zSWvP6gwYlERERERERERERE/mIuLALAE3ICN/krLfyZnJ5ARKrVVPgTEWngrnx6Ib93smP2QdysD9i85lujI4mIiIiIiIiIiIgAYC12+Z+EhR5/I0FBAJjdKvyJiEg9Z7FaOXvaYg7EQVQBbL3vFkqKCo2OJSIiIiIiIiIiIoK12F+sM4VHHH8jdv9sQYvLG4hItZoKfyIiQnzjltgn3EdRELTc7WXxzf2MjiQiIiIiIiIiIiKCvdhfrLNGxR53G6ZDhT+PLyCZajMV/kREBIDeA69n6+CeAHRfmc2HL9xicCIRERERERERERFp6Owl/mJdcGzScbdhCfYvE2pxqfAnIiINyNDH3mFjd/9/gk3mfs36FR8bnEhEREREREREREQaspBi/2NEQvJxt2EJDvM/ulX4ExGRBqb/9E/Zk2QivBj2P3Q/+blZRkcSERERERERERGRBsjjdhNaWviLTWl93O1YQ/yTHWwq/ImISEMTGZNI/KNPUGCHZvt9LL25v9GRREREREREREREpAFK37cdq/8WfzRp2eW42wkKiwLA6g5EqtpNhT8RETlMz3OHsPvqMwHo+ms+C5+4zuBEIiIiIiIiIiIi0tAc3L0JAJcFouIaH3c7trBIQIU/ERFpwAY/MIP1p/j/Q2zxn1X8/MW7BicSERERERERERGRhiRrz1YACoPBYrUedzvBkbEABKnwJyIiDdmg175gZ7KJUCc4nnic7PS9RkcSERERERERERGRBiI/fQ8ARcEn1k5oaeHPpsKfiIg0ZGERUTR9aiqOUGiSDl/dNMjoSCIiIiIiIiIiItJAlGSn+x/tphNqJyI6AfDP+HM5S044V22mwp+IiBxV59Mu5OCoi/ECnTcWs2DClUZHEhERERERERERkQbAnZsNgDP4xMpZEbFJZc/zsg+eUFu1nQp/IiJyTJfc9n9s6BMPQLuPf+OHD2cYnEhERERERERERETqO29+HgCu4OO/vx9AVGzjsuc5mftPqK3azvDC3/z58znvvPPo2rUrQ4cOZf369Ufd3+Fw8Nhjj9G3b1+6dOlC//79+e6772oorYhIwzX41c/Z2syM3QXuZ1/k4N6tRkcSERERERERERGResxcWASAJ8R2Qu3YQ0JxWfzP83PSTzRWrWZo4W/p0qU8/fTT3HLLLSxevJgOHTowevRoMjMzK9zf6XRy/fXXs3fvXl566SU+++wznnjiCZKSkircX0REAsceEkrHF2aSHQ5JWfDDzYPxuBvA3XBFRERERERERETEEJYi//34vCHBJ9yWq3TSYGFuxgm3VZsZWvibM2cO//jHPxgyZAht2rThscceIzg4mIULF1a4/8KFC8nNzWXatGmcdNJJpKSkcOqpp9KhQ4caTi4i0jC17tqbvBv/gccEHTe7+M99lxkdSUREREREREREROopa1HpxIPw8BNuy1la+Ct2ZJ9wW7XZiS2KegKcTie//fYb//znP8u2mc1mevfuzZo1ayo85uuvv6ZHjx48/vjjfPXVV8TGxjJo0CDGjh2LxWKp0vk9Hs8J5ReprEN9TX1OjFAd/a/f9ZNYsOYHeny1j06fb2PZ/CmcO+zugLUv9YN+9olR1PfESEb0P/V1ERERERGpz2wlXgDMEVEn3Ja7tCJWkp9zwm3VZoYV/rKzs/F4PMTFxZXbHhcXx7Zt2yo8Zvfu3fz0009ccsklzJgxg127dvHYY4/hdru59dZbq3T+DRs2HHd2keOhPidGCnT/a3PtU2zePpr22zzYX57D11EtiG3cOqDnkPpBP/vEKOp7YiT1PxERERERkcCwF/sLf/bohBNuy1V6m0BXYd4Jt1WbGVb4Ox4+n4+4uDieeOIJLBYLXbp0IS0tjdmzZ1e58Ne1a9cqzxIUOR4ej4cNGzaoz4khqrP/Jb00n50jhhGfC3+8/jhnL/oVi7VO/bci1Ug/+8Qo6ntiJCP636FzioiIiIiI1Ech/lv8EZbQ+ITbcltMgA9XUcEJt1WbGXaFNiYmBovFQmZmZrntmZmZxMfHV3hMQkICVqu13C/RrVq1Ij09HafTSVBQUKXPb7FYdDFIapT6nBipOvpfs7bd+fP20bienE27rR4W3TmAYa99FdBzSN2nn31iFPU9MZL6n4iIiIiISGCEFPsfIxs1O+G23KUz/jyF+SfcVm1mNurEQUFBdO7cmZUrV5Zt83q9rFy5kp49e1Z4TK9evdi1axder7ds244dO0hISKhS0U9ERALjvGvu4ff+rQDo8s0+Ppv5sMGJREREREREREREpD4oyMslxOl/nti0wwm357GaAPCWFJ1wW7VZlQt/L7/8Mnv37g3Iya+//noWLFjA4sWL2bp1K48++ihFRUVcccUVANx333288MILZftfffXV5OTk8OSTT7J9+3a+/fZb/vWvfzF8+PCA5BERkaobOuVDUjsEYfFB1L/eZ8u6FUZHEhERERERERERkTpu37aNZc8bt+h0wu15rP6SmLek+ITbqs2qvNTnV199xfTp0znllFO48sor6d+//3HPthswYABZWVm8/PLLpKen07FjR2bNmlW21Of+/fsxm/+qTTZu3JjZs2fz9NNPc+mll5KUlMS1117L2LFjj+v8IiJy4ixWK32nLeL3oYNIyoI/7h1Hsw9/xh4SanQ0ERERERERERERqaMy9/1JFFBoJyDXGr2lhT+fs+SE26rNqlz4+/DDD/n9999ZtGgRTz75JI8//jgDBgxgyJAhdOvWrcoBRowYwYgRIyp8be7cuYdt69mzJwsWLKjyeUREpPokJrfmj/vvpOShF2m1y8viW/szbPZyo2OJiIiIiIiIiIhIHZW7f6e/8BccmPa8Nn/hz1TiDEyDtdRx3eOvU6dOPPTQQyxfvpwnn3yStLQ0rrnmGi655BLeeust8vLyAp1TRERqub6XjeOPS7sA0HVFBh+/dIexgURERERERERERKTOKsrcB0BxsCkg7XmtFv8Tlysg7dVWx1X4O8Tn8+F2u3G5XPh8PqKiopg/fz5nn302S5cuDVRGERGpI/7x5H/4rWswZiDxrc/57b9fGB1JRERERERERERE6iBnTpb/0R6Ywp/P5l8E0+xyB6S92qrKS30CbNy4kUWLFrFkyRJsNhuXX345kyZNonnz5oB/ic7JkyczYMCAgIYVEZHar9/0pfxy+Xkkp8POB++gxYcrCYuIMjqWiIiIiIiIiIiI1CHevFwAnMGWgLTnC7IBYHJ5AtJebVXlGX+XXHIJV111FXv27OHJJ5/ku+++45577ikr+gEMHDiQrKysgAYVEZG6ITquMdGPPkJhEDTf5+OTmy80OpKIiIiIiIiIiIjUNQUFALhDjmsO2+FKC39mtzcw7dVSVS78XXTRRXz99dfMmDGDfv36YbEcXmmNjY1l06ZNAQkoIiJ1z8nnD2PHVacB0O1nB4ufGWdwIhEREREREREREalLzIXFAHiC7YFpMMjfjkUz/sq75ZZbSEpKqo4sIiJSjwyZ+CYbTgoHoOm7y1nzzUKDE4mIiIiIiIiIiEhdYS12+Z+EhQakPbM92P/o9gWkvdqqyoW/8ePHM2PGjMO2z5w5k9tuuy0goUREpH4Y8Nrn7GpsIqwEMh59mJzM/UZHEhERERERERERkTrAVuyfmWeOiAxIe6bgEACsKvyV9/PPP3P22Wcftv2ss87il19+CUgoERGpH8KjYmn85HPkhUBKmo8vbx5odCQRERERERERERGpA4KK/ffis0TGBKQ9S7B/5qBFhb/yCgsLsdlsh223Wq3k5+cHJJSIiNQf3XoPZP+IfniBLuuKWPDwMKMjiYiIiIiIiIiISC1nL/EX6IJjA3P7OVuo/7ZEmvH3P9q1a8fSpUsP27506VLatGkTkFAiIlK/XHb3K2w4IxaAth+s48clcwxOJCIiIiIiIiIiIrVZSLH/MTIpJSDt2UIjALC6A9JcrWWt6gE333wz48ePZ/fu3Zx++ukArFy5kiVLlvDSSy8FPKCIiNQPg1/7kq8uPYWWu7143/sI55mDsYaE4XHkYomMwuMsxhYWYXRMERERERERERERMZjH7Sa0tPAX06RVQNoMCosCwOYKSHO1VpULf+eddx7Tpk1j+vTpfP7559jtdtq3b8+cOXM49dRTqyOjiIjUA/aQUFpPmYbzqf+j+0uzyZrzNtnz5+N1ODBHRhIzcgTx48ZhttuNjioiIiIiIiIiIiIGSt+3Hav/Fn80adklIG2GlN4r0KYZf4c755xzOOeccwIcRURE6rv2Pc+h8KXWZM2dR+brr5dt9zocZE57DYCYG0Zp5p+IiEg95/F4eOWVV/joo4/IyMggMTGRwYMHc/PNN2MymQDw+Xy8/PLL/Oc//8HhcNCrVy8effRRWrRoYWx4ERERERGpdgd2/o4dcFkgKq5xQNoMjvDfiiionhf+qnyPPxERkRMRnNCI7PnzK3wte+48LEHBNZxIRERE/pfH4yE1NZXc3NxqaX/mzJm8++67TJo0iaVLl3LPPfcwa9Ys5s6dW26fuXPn8uijj7JgwQJCQkIYPXo0JSUl1ZJJRERERERqj5x92wEoDAaL9bjmsB0mPDoR8Bf+PO76W/2r8nfL4/Hw5ptv8umnn7J//35crvKLoa5atSpg4UREpP7xOHLxOhwVvuZ1OPDkOTDHxtVwKhERkYbtySefpF27dgwdOhSPx8OIESNYs2YNISEhTJ8+ndNOOy2g51uzZg3nn39+2UoyKSkpLFmyhPXr1wP+2X5vv/02N910E/369QNgypQp9O7dm2XLljFw4MCA5hERERERkdolP30PSUBRAOcIRMQkkl363JGdRkxCcuAar0WqPOPv1VdfZc6cOQwYMIC8vDxGjRrFBRdcgMlk4tZbb62OjCIiUo9YIqMwR0ZW+Jo5MhJLRMWviYiISPX5/PPP6dChAwDffPMNe/bs4dNPP+W6667jxRdfDPj5evbsyU8//cT27f5P8W7atIlff/2Vs846C4A9e/aQnp5O7969y46JiIige/furFmzJuB5RERERESkdinJOuh/tJsC1mZUXFLZ89yM/QFrt7ap8oy/jz/+mMmTJ3POOefwyiuvMGjQIJo1a0b79u1Zt25ddWQUEZF6xOMsJmbkiLJ7+v1dzIjheEqKMdtsBiQTERFpuLKzs0lISADgu+++46KLLqJly5YMGTKEt99+O+DnGzduHPn5+Vx88cVYLBY8Hg933nknl156KQDp6ekAxMWVXwUgLi6OjIyMKp3L4/EEJrTIMRzqa+pzUtPU98RI6n9iFPW9+s/lyAHAGWwO2N9zUHAYbjNYveDISjvudo3of1U5V5ULfxkZGbRr1w6AsLAw8vLyADj33HN56aWXqtqciIg0MLawCOLHjQP89/TzOhyYIyOJHT6cmBEj+GnSjXR7+EUiYxINTioiItJwxMfH8+eff5KQkMDy5ct59NFHASguLsZisQT8fJ9++ikff/wxL7zwAm3atCE1NZWnn36axMREBg8eHNBzbdiwIaDtiRyL+pwYRX1PjKT+J0ZR36u/XDn+RTmddgtr164NXMM2sJbAn5s24ApudEJN1db+V+XCX1JSEunp6TRp0oSmTZuyYsUKOnfuzIYNGwgKCqqOjCIiUs+Y7XZibhhF/I034slzYImIpHDXdrZfO5K4P7exYt05dH5tHs3a9zI6qoiISINwxRVXcMcdd5CQkIDJZCpbYnPdunW0atUq4OebMmUK48aNK7tXX/v27dm3bx//+te/GDx4cNnsw8zMTBIT//owUGZmZtmSpJXVtWvXaileivwvj8fDhg0b1OekxqnviZHU/8Qo6nv1358lTgC8oUH06NEjYO3+bAVKIDYi5LjbNaL/HTpnZVS58HfBBRewcuVKunfvzsiRI7n33nt5//332bdvH6NGjapqcyIi0kDZwiIAMMf6l/AKb92Ozf070HjXNlrs9bHt+uGkP/E4J50/1MiYIiIiDcL48eNp27YtBw4c4KKLLir7UKfFYmHs2LEBP19xcTEmU/l7dVgsFnw+HwApKSkkJCSwcuVKOnbsCEB+fj7r1q3j6quvrtK5LBaLLgZJjVKfE6Oo74mR1P/EKOp79ZeluLTwFxIc0L9jV2lVrCQ/94Tbra39r8qFv3vuuafs+YABA2jSpAlr1qyhefPmnHfeeQENJyIiDcug8S+wPLkVxU+/SlIWOO6dxLI7t9Jv5ANGRxMREan3LrroonJfOxyOgC+7eci5557L9OnTadKkSdlSn3PmzGHIkCEAmEwmrr32Wl5//XWaN29OSkoKL730EomJifTr169aMomIiIiISO1hLXL7n4SHB7Rdd2lVzFXoCGi7tUmVCn8ul4tJkyZx880307RpUwB69OgR0GmWIiLSsJ15xS2kNm3P3rvGk5wO9mffYtGeP7niwVlGRxMREam3ZsyYQUpKCgMGDADg9ttv54svviAhIYEZM2ZUeXnNY3nooYd46aWXeOyxx8qW87zqqqu45ZZbyvYZO3YsRUVFTJo0CYfDwUknncSsWbOw2+0BzSIiIiIiIrVPUIkXAHNEVEDbdVtNgA9XYX5A261NzFXZ2Waz8cUXX1RXFhEREQA6ntKPLvM/4s8WZuxuaP/WCt6746JjHygiIiLH5b333qNRI/+N7VesWMGPP/7IzJkzOfPMM5kyZUrAzxceHs7EiRP55ptvWL9+PcuWLePOO+8sd994k8nE7bffzooVK9iwYQNvvvkmLVu2DHgWERERERGpfYKK/YU/e3RCQNs9NOPPU1wY0HZrkyoV/gD69evHV199VR1ZREREyjRq1pZ+C1fyW9dgzED3z3by7+Gn4nKWGB1NRESk3snIyKBx48YAfPPNN1x88cX07duXMWPGVPoG8iIiIiIiIoESUnoJMCyhcUDb9Vj99xr3FhUEtN3apMr3+GvevDnTpk1j9erVdO7cmZCQkHKvX3vttQELJyIiDVtIWCSD3/2Z/9zSj+7fpdHt1zyWXHEyZ8/5jJiEZKPjiYiI1BuRkZHs37+fxo0bs3z5cu644w4AfD4fHo/H2HAiIiIiItLghBT7HyMbNQtou26bGfDiLSkOaLu1SZULf++//z4RERFs3LiRjRs3lnvt0A3YRUREAsVitTLsX9+y4OFhdFi0jvZ/uln1jwto8/IsWnftbXQ8ERGReuHCCy/knnvuoXnz5uTk5HDWWWcBkJqaSvPmzQ1OJyIiIiIiDUlBXi4hTv/zxKaBvd+411q6EKbTGdB2a5MqF/6+/vrr6sghIiJyVP944j2WJk8g8fXFNNvvY+/Y0WQ+PpFTLxxhdDQREZE678EHHyQ5OZn9+/dz7733EhYWBkB6ejrXXHONwelERERERKQh2bftr0lnjVt0Cmjbhwp/vnp8O6EqF/5ERESMMuDGp1iZ0prMx58nIQfy7n+Sz3f/Sf/RjxodTUREpE6z2WyMHj36sO2jRo2q+TAiIiIiItKgZe77kyig0A72kNCAtu21WQAwudwBbbc2qXLh78EHHzzq608//fRxhxERETmWMwaNZnNKW3bdfiMpaT7s//dvFu7dxpBJbxsdTUREpE7btWsXb731Flu3bgWgTZs2XHfddTRt2tTgZCIiIiIi0pDk7t/pL/wFB75tn81fFjM5XYFvvJYwV/UAh8NR7k9WVhb//e9/+fLLL8nLy6uOjCIiIuW073EWPd9bwpZWFoI80OGdn3n31n543PX3kzoiIiLVafny5QwYMID169fTvn172rdvz7p16xgwYAArVqwwOp6IiIiIiDQgRZn7ACgONgW8bZ/NBoDZ7Ql427VFlWf8TZs27bBtXq+XRx99VJ8EFRGRGhPfuCX9F67iw+vPpOvaQnos28vCEadz2ZzvA74EgIiISH33wgsvMGrUKO65555y259//nmef/55+vTpY1AyERERERFpaJw5Wf5He+ALfwSVFv5c3sC3XUtUecZfhY2YzYwaNYq33norEM2JiIhUij0klCHz/su685sA0HVtAZ8NOZWM/dsNTiYiIlK3bN26lSuvvPKw7UOGDOHPP/80IJGIiIiIiDRU3rxcAJzBlsA3bg8CVPirlN27d+PWEmsiIlLDLFYrw6Z9xe/XnILTAu22eVg9bCCb135vdDQREZE6IzY2ltTU1MO2p6amEhcXZ0AiERERERFpsAoKAHCHVHnRymMyBflvHGhx19/CX5W/a08//XS5r30+H+np6Xz77bcMHjw4YMFERESqYsikt/m8ySPETFtA0zQf6eP+SeYj99F74PVGRxMREan1hg4dyqRJk9i9eze9evUCYPXq1cycOZNRo0YZG05ERERERBoUc2ExAJ5ge+DbDg4BwOL2Bbzt2qLKhb/ff/+93Ndms5nY2FgeeOABhgwZErBgIiIiVdV/zGOsataW9ElPkpADBROnsHT3Fgbc+JTR0URERGq1W265hfDwcN544w3+7//+D4DExERuvfVWrrvuOoPTiYiIiIhIQ2ItdvmfhIUGvG1LsL9Nqwp/f5k7d2515BAREQmIUy8cwdbGrfjztjE02+8j5ZXF/GffdoY+/q7R0URERGotk8nEqFGjGDVqFPn5+QCEh4dTVFTE6tWry2YBioiIiIiIVDdbsQcAc0RkwNu2hob7H+tx4a/K9/jbvXs3O3bsOGz7jh072LNnTyAyiYiInJDWXXtz6oIv2dzGis0DXRas5d0bz8Gje9GKiIgcU3h4OOHh/l+Gd+7cyfDhww1OJCIiIiIiDUlQsf/+e5bImIC3bSsr/AW86VqjyoW/Bx98kDVr1hy2fd26dTz44IMBCSUiInKiYhKSGbjoF9afFAFAj2/TWHT1qRQVOAxOJiIiIiIiIiIiIkdiL/HPxguOTQp82+H+YqJNhb+//P777xUu89KjRw9SU1MDEkpERCQQbEF2rpq/irX9m+MFumwoYtmQMziwa4vR0URERERERERERKQCIcX+x8iklIC3HRweBWjGXzkmk4mCgoLDtufl5eHxeAISSkREJJCufukzNl/XhxIrtNnhZeOIS0n9eZnRsURERERERERERORvPG43oaWFv5gmrQLefkhUPABBroA3XWtYq3rAKaecwr/+9S/+7//+D4vFAoDH42HGjBmcdNJJAQ8oIiISCFc8OItlyU8RMXUuyQch6+bxLJ9wG2cOvsnoaCIiIob56quvjvq67uMuIiIiIiI1KX3fduxRMVgT4olt3SPg7YdGxeEF7G5/kdFirXKZrNar8ju65557GD58OBdddBEnn3wyAL/88gv5+fm89dZbAQ8oIiISKP2uncCvyW1Je3gSSVlQ+MjLfLLnTwaNf8HoaCIiIoa45ZZbjrmPyWSqgSQiIiIiIiIQHRZN4lfLcGdlYY2Ox1WQhy0sImDtR8YkklP63JGdRkxCcsDari2qXPhr06YNH330EfPnz2fTpk0EBwdz2WWXMWLECKKjo6shooiISOCcdP5QdqW05rebR9Bir4/mry9lwb4d/OPphUZHExERqXGbNm0yOoKIiIiIiAgA3pISHPPfJXvefLwOB+bISGJGjiB+3DjMdntAzhEZ27is8JeXpcJfmaSkJO66665AZxEREakRzdr3InLBV3xzQ386bHbRdfHvvJd+JkOnf1Mvp/eLiIiIiIiIiIjUZq6CPLJnzyHztdfLtnkdDjKnvQZAzA2jAjLzLywiCrcZrF7/jL/6yFzVAxYuXMinn3562PZPP/2UxYsXBySUiIhIdYuOa8yg//zMutOiAej+QwYfXHUy+blZxgYTERERERERERFpYCxBwWTPm1/ha9lz52EJCg7YuZyln/svdGQGrM3apMqFvxkzZhATE3PY9ri4OKZPnx6QUCIiIjXBFmRn2FsrWT+wNV4TdPqthA33/5OS3Gy8LheuzAz/Y0Ge0VFFRERERERERETqLY8jF6/DUeFrXocDT17Frx0PV2nhr9iRE7A2a5MqF/727dtHSkrKYdubNGnC/v37AxJKRESkJl31widsGX0utG/NKU/9i9w357KlT1/+7HMmW/r0JfuNN/GWlBgdU0REREREREREpF6yREZhjoys8DVzZCSWiIpfOx4um/+xOD87YG3WJlUu/MXFxbF58+bDtm/atIno6OhAZBIREalxl9/zGi3+NYOsufPIfP31sk8YHVpLPGPGDM38ExERERERERERqQYeZzExI0dU+FrMyBF4nMUBO5e7dMafsyBwswhrkyoX/gYOHMiTTz7JTz/9hMfjwePxsHLlSp566ikGDhxYHRlFRERqhD0ugez5NbOWuIiISG1y/vnnk519+KddHQ4H559/vgGJRERERESkIbGFRRA/dizxN91UNvPPHBlJ3C03Ez9uHLawiICdy201+R8L6+eH/K1VPeD2229n7969jBo1CqvVf7jX6+Wyyy7jzjvvDHhAERGRmlKZtcTNsXE1nEpERKT67d27F6/Xe9h2p9NJWlqaAYlERERERKSh+XXJG3To3Im2332Lp6gQS0QkHmcxZrs9oOc5NOPPU1QY0HZriyoX/oKCgpg6dSo7duwgNTWV4OBg2rVrR3JycnXkExERqTGH1hKvqPhnjozEEhaOx+3GYq3yf58iIiK10ldffVX2fPny5URE/PUpWq/Xy8qVK/W7noiIiIiI1Ig/P3+P8O/T2XVGCv3nfAmA2WYL+Hk8VhPgw1Oswl85LVq0oEWLFgDk5+fzzjvv8P7777No0aJAZRMREalRh9YSz5z22mGvxQ4fTsHy5Xz94h10mPwq7XueU+P5REREAu2WW24BwGQy8cADD5R7zWq1kpycfNh2ERERERGR6hC613/7gawwX7Wex2M1A158JYG7b2BtckJTFn766ScWLlzIl19+SXh4OBdccEGgcomIiNQ4W1gE8ePGAf57+nkdDsyRkcSMHEHsqFFsvWYY7bZ6cFx/E/+5tAdXTJqr2X8iIlKnbdq0CYDzzjuP999/n9jYWIMTiYiIiIhIQ5WQ5gYguttp1XoeT+k9/nwlJdV6HqNU+WplWloaixYtYtGiRTgcDhwOBy+88AIXX3wxJpOpOjKKiIjUGLPdTswNo4i/8UY8eY6ytcQtYREcuO5inC9NIzkduixYy8ere9Jx8jTa9zjL6NgiIiIn5Ouvvz5sm8PhIDIy0oA0IiIiIiLS0Gz69Wti8sFrgpMG3lCt5/LaLIALn9NZrecxirmyO37++eeMHTuWiy66iNTUVO6//36WL1+O2WymXbt2KvqJiEi9YQuLwGyzYYuN8z+G+e93dNaVt3LG0p9Y1zcetxna/+km9/p/8v6jIwxOLCIicmJmzJjB0qVLy76+7bbbOPXUUznzzDPLZgWKiIiIiIhUl9Rl/wbgYCwkJreu1nN5rRYATC53tZ7HKJUu/N1555106tSJ5cuX8/LLL9OvXz+CgoKqM5uIiEitExYRxbBZy8l85J/sS4CIIuj83q98cElXtm740eh4IiIix+W9996jUaNGAKxYsYKVK1cya9YszjrrLKZMmWJwOhERERERqe9KNm0EIKuRvdrP5QsqLfw5XdV+LiNUuvB35ZVXMn/+fMaMGcO7775Lbm5udeYSERGp1c656g5O+2QF63rH+mf/bXGTee1o3n/8WqOjiYiIVFlGRgaNGzcG4JtvvuHiiy+mb9++jBkzhg0bNhicTkRERERE6ruw/TkAuJo1qfZz+Ww2AMzuBj7j7/HHH+eHH37gqquuYsmSJfTt25ebbroJn8+H1+utzowiIiK1UnhULMPeWEH6w2PYH186+++dn1l8aTe2/b7K6HgiIiKVFhkZyf79+wFYvnw5Z5xxBgA+nw+Px2NkNBERERERqec8bjdJaf46U3zPvtV/wtLVLM2u+lnbqnThDyA4OJjBgwczb948Pv74Y9q2bUtcXBxXX301d999N1988UV15RQREam1zrv6bk5dsoJ1Z8TiMUGHP1ykj7iOhU+OMjqaiIhIpVx44YXcc889XH/99eTk5HDWWWcBkJqaSvPmzQ1OJyIiIiIi9dn6FR8TUQRuM5x6yZhqP5/vUOHPrcJfOS1atOCuu+7iu+++47nnnqOoqIi77rorkNlERETqjPCoWIbNWUHaQ9dzIA4iC6HT3P+y+LJu7Ej9xeh4IiIiR/Xggw8yfPhwWrduzZw5cwgLCwMgPT2da665xuB0IiIiIiJSn239bjEABxJMRMYkVvv5zPZgACz1dMaf9UQbMJvNnHfeeZx33nlkZmYGIpOIiEiddf7w+3AMGMXSOy6j66ocOmx2cWD4SFZf2ZsrJsw2Op6IiEiFbDYbo0ePPmz7qFGjaj6MiIiIiIg0KJ4tmwHIaRRcI+czHSr8eXw1cr6adtwz/ioSFxcXyOZERETqpMiYRIa9tZL9E67jQBxEFULHt39k0eXd2bV5tdHxREREKvTBBx9w9dVX07dvX/bu3QvAm2++ybJlywxOJiIiIiIi9VnE/nwAvM2b1cj5LMGhAFhdKvyJiIhIFfQb+QA9P/qa9adG4TVBx01O9l0znMXPjDM6moiISDnvvPMOzzzzDGeddRZ5eXl4vf4lbyIjI3nrrbcMTiciIiIiIvWVy1lCo4P+3z8an9qvRs5pCw0HwOJW4U9ERESqKDquMVe9/RN77x9OWixEFUCHN5ezcHB3dm1ZZ3Q8ERERAObNm8fkyZO56aabMJv/+jWxS5cu/PHHHwYmExERERGR+uznL98hxAklVjjl4mtr5JzWEH/hz+aukdPVOBX+REREasCFox6ix8dfs/6USLwm6JTqZO+wYSyecqPR0URERNizZw8dO3Y8bHtQUBBFRUUGJBIRERERkYZgz49LATiQaCIkLLJGzmmPiAbAqsKf3/nnn092dvZh2x0OB+eff35AQomIiNRH0XGNuWruf9lz79UcjIHoAujwxncsvKIHu7duNDqeiIg0YCkpKaSmph62ffny5bRu3dqARCIiIiIi0iBs2w6Ao1FYjZ3SHh4NaMZfmb1795bd7+HvnE4naWlpAQklIiJSn/W/YRLdPlrG+pMj8AKdfi9h91VD+fCFW4yOJiIiDcyrr75KUVER119/PY8//jhLl/o/bbt+/Xpef/11/u///o8xY8YYnFJEREREROqrqAOFAJhataqxc4ZFxgEQVE8Lf9bK7vjVV1+VPV++fDkRERFlX3u9XlauXElycnJg04mIiNRTMQnJXDVvFZ/PeoTg2QtIzIaYmV/z/o89OOP5d0hu2cnoiCIi0gBMmzaNq6++mqFDh2K325k6dSpFRUXcfffdJCYmMmHCBAYOHGh0TBERERERqYcK8nJplO4DoFnfmvu9IzQ6Di9gc4HH7cZirXSprE6o9Lu55Rb/LASTycQDDzxQvhGrleTk5MO2i4iIyNH1H/MYWZeM4cu7rqDLr/l0/q2Enf8Ywq/XXMCld75sdDwREannfD5f2fNLL72USy+9lKKiIgoLC4mLizMwmYiIiIiI1Hc/L51DkhsKg6DXeVfV2HkjYxuRg39JzLzcdKLjGtfYuWtCpQt/mzZtAuC8887j/fffJzY2ttpCiYiINCSxSU25av7PfDpjImFvLCIhB2L+9SXvr+hJnxfepXHzDkZHFBGResxkMpX7OiQkhJCQEIPSiIiIiIhIQ5H2yzckAQeSzJwUZK+x80bGNian9Lkj62DDLfwd8vXXXx+2zeFwEBkZGZBAIiIiDdXF454k45IxfH3XULquKaDzxmK2XTmYX0b055LbpxodT0RE6qn+/fsfVvz7X6tWraqhNCIiIiIi0lCYd+wGIL9xxDH2DKywiCg8JrD4IC8rrUbPXROqXPibMWMGKSkpDBgwAIDbbruNL774goSEBGbOnEmHDpqVICIicrziG7fkH+/+wpLXHyTyzQ+Iz4XY1z/nPz/04swX/k2jZm2NjigiIvXM+PHjy93DXUREREREpCbEpBUBYG3TrsbP7bRBiBMKczNr/NzVrcqFv/fee4/nn38egBUrVrBy5UpmzZrFp59+ypQpU3jjjTcCHlJERKShGXjT02RcPo6v7xxK17UFdNlQxJ9XXsqeB++k+4VXYwkKxuPIxRIZhcdZjC1MF2xFROT4DBw4UPfzExERERGRGpWTuZ+kDP/z1ucOqfHzu6z+wl+Ro/4V/sxVPSAjI4PGjf3rnX7zzTdcfPHF9O3blzFjxrBhw4aABxQREWmo4hu35B/v/cLW8YPIjITG8a3ocfaVZM+ew5Y+ffmzz5ls6dOX7DfexFtSYnRcERGpg461xKeIiIiIiEh1WPXxbKxecIRC1zMG1vj5XaXT4orzc2r83NWtyoW/yMhI9u/fD8Dy5cs544wzAPD5fHg8nsCmExEREQbd8hztFn9C4jOTyZ47j8zXXsfrcADgdTjInPYaGTNm4CrIMzipiIjUNT6fz+gIIiIiIiLSAGWt/QGAg4lmLNYqL055wg4V/lyF9e96WpW/mxdeeCH33HMPzZs3Jycnh7POOguA1NRUmjdvHvCAIiIiAonJrfEmutg35sYKX8+eO4/4Gyt+TURE5Eg2bdpkdAQREREREWmAbLsOAFDQJNqQ87ttJsCnwh/Agw8+SHJyMvv37+fee+8lLCwMgPT0dK655pqABxQRERE/jyO3bKbf//I6HHjyHJhjdY8mERERERERERGp3WLT/LetsXfoasj5PaXVMU9RgSHnr05VLvzZbDZGjx592PZRo0YFIo+IiIgcgSUyCnNkZIXFP3NkJJaISANSiYiIHJ+0tDSee+45li9fTlFREc2bN+epp56ia1f/L/4+n4+XX36Z//znPzgcDnr16sWjjz5KixYtjA0uIiIiIiIn5MCuLSRm+p93umCYIRncVv+MP09xoSHnr05VvscfwAcffMDVV19N37592bt3LwBvvvkmy5YtC2g4ERER+YvHWUzMyBEVvhYzfDgl2Rk1nEhEROT45ObmcvXVV2Oz2Zg5cyZLlizh/vvvJyoqqmyfmTNnMnfuXB599FEWLFhASEgIo0ePpqSkxMDkIiIiIiJyon5d8gZmIDsC2vc8x5AMHqsJAG9JsSHnr05VLvy98847PPPMM5x11lnk5eXh9XoBiIyM5K233gp4QBEREfGzhUUQP24ccbfcjDnSP7vPHBlJ3E03ETtyBFtu/SdrvltscEoREZFjmzlzJo0aNeLpp5+mW7duNG3alL59+9KsWTPAP9vv7bff5qabbqJfv3506NCBKVOmcPDgQX3gVERERESkjsvbuAqAg0lVXpQyYDzW0vJYPfxgYZULf/PmzWPy5MncdNNNmM1/Hd6lSxf++OOPgIYTERGR8sx2OzE3jKLtih9o8+MPtF3xA1HXjWDd7aOxrd9C3v0T2Lz2e6NjioiIHNXXX39Nly5duO222zjjjDO4/PLLWbBgQdnre/bsIT09nd69e5dti4iIoHv37qxZs8aIyCIiIiIiEiD2PekAFCfHGZbBe6jw53QalqG6VLmcumfPHjp27HjY9qCgIIqKigISSkRERI7MFhYBgDnWPziyR8cSfOtNZI6/nYQc2H3bPwl/ayHJLTsZmFJEROTIdu/ezbvvvsv111/PjTfeyIYNG5g8eTI2m43BgweTnu6/EBAXV/5CQFxcHBkZVVva2uPxBCy3yNEc6mvqc1LT1PfESOp/YhT1vbot/oALgNCOPQz7O/TaLP4nTmeVMxjR/6pyrioX/lJSUkhNTSU5Obnc9uXLl9O6deuqNiciIiIB0Pm0C1k1eSKOB54k+SCsG3slYf/+iui4xkZHExEROYzP56NLly7cddddAHTq1IktW7bw3nvvMXjw4ICea8OGDQFtT+RY1OfEKOp7YiT1PzGK+l7dk7lnM01z/c9D2vRm7dq1huTwWPwz/nwlzuPOUFv7X6ULf6+++iqjR4/m+uuv5/HHH8dZOv1x/fr1fPLJJ8yYMYPJkydXW1ARERE5ulMvHME3WQexPjWTlnt8fHftBVy44EdCwiKNjiYiIlJOQkLCYR8cbdWqFZ9//nnZ6wCZmZkkJiaW7ZOZmUmHDh2qdK6uXbtisVhOMLHIsXk8HjZs2KA+JzVOfU+MpP4nRlHfq7s++f4tANKj4byLhxiWY4s9CACrx0uPHj2qdKwR/e/QOSuj0oW/adOmcfXVVzN06FDsdjtTp06lqKiIu+++m8TERCZMmMDAgQOPO7SIiIicuHOH3cWS7HRSXv2Adls9LL32LC7/9y9YrMbdLFlEROR/9erVi+3bt5fbtmPHjrKVZVJSUkhISGDlypVlt5rIz89n3bp1XH311VU6l8Vi0cUgqVHqc2IU9T0xkvqfGEV9r+4pTF0LQEaSzdC/O1+Qv/BndnuPO0dt7X+Vvgro8/nKnl966aVceumlFBUVUVhYeNh9F0RERMQ4A296mkU56bR/ewWdfivh/VF9uGref42OJSIiUua6667j6quvZvr06Vx88cWsX7+eBQsW8PjjjwNgMpm49tpref3112nevDkpKSm89NJLJCYm0q9fP4PTi4iIiIjI8QrdmwVASdPEY+xZvUx/K/zVN+aq7Gwymcp9HRISoqKfiIhILXTFg7PYeJl/KbRuvzh476bzDE4kIiLyl27duvHqq6+yZMkSBg0axGuvvcaECRO49NJLy/YZO3YsI0aMYNKkSVx55ZUUFhYya9Ys7Ha7gclFREREROREJKR5AIjudpqhOUz2YAAsrvpX+KvSul/9+/c/rPj3v1atWnVCgURERCQwrnpmMe85zqf71/vo/s1+/n3vZVz13IdGxxIREQHg3HPP5dxzzz3i6yaTidtvv53bb7+9BlOJiIiIiEh12fTr18Tkg9cEJw28wdAs5kOFP7fvGHvWPVUq/I0fP56IiIjqyiIiIiIBNuy1r/j3tafTbVUuXT75g4XRoxgy8U2jY4mIiIiIiIiISAOT+uV7dAAOxkLn5NaGZjGHhAEq/DFw4MBqWdpz/vz5zJ49m/T0dDp06MDDDz9Mt27djnnckiVLuOuuuzj//PN57bXXAp5LRESkPrjyjR9YPOwUOm8spu07/+WTqHsYdOvzRscSEREREREREZEGpGTzbwBkNTJ++X5rsL/wZ62Hhb9K3+PvWEt8Hq+lS5fy9NNPc8stt7B48WI6dOjA6NGjyczMPOpxe/bs4dlnn+Xkk0+ullwiIiL1hcVqZeBb37G5jRWbB5rMWMJX86cYHUtERERERERERBqQsP05ALiaNTE2CGANDfc/ug0OUg0qXfjz+aqn6jlnzhz+8Y9/MGTIENq0acNjjz1GcHAwCxcuPOIxHo+He+65h/Hjx9O0adNqySUiIlKfhIRFcs5bX7C9qZkQJ0Q8P4efPn3L6FgiIiIiIiIiItIAeNxuktK8AMT3OsvgNGAPjwLA1pALf5s2bQr4Mp9Op5PffvuN3r17/xXIbKZ3796sWbPmiMdNmzaNuLg4hg4dGtA8IiIi9Vl0XGN6zlrIniQTEUXgmfQMG3/61OhYIiIiIiIiIiJSz61f/iERReA2w6mDbjA6zl+FP5fBQapBle7xF2jZ2dl4PJ7DCopxcXFs27atwmN++eUX3n//fT744IMTOrfH4zmh40Uq61BfU58TI6j/yf9KTGlL81dmsv+fY0jMhgN33cXWmXG06HBSQM+jvidGUd8TIxnR/9TXRURERESkLti6/EM6AwcSTHSNSTQ6DqGR8UD9nPFnaOGvqvLz87nvvvt44okniI2NPaG2NmzYEKBUIpWjPidGUv+T8oLJHX8rtv97lUZZsPWWUeya+CyRsYFfX119T4yividGUv8TEREREREpz7NlMwA5jYINTuIXFh2HDwhy+5chtVjrVLnsqAx9JzExMVgsFjIzM8ttz8zMJD4+/rD9d+/ezd69e7npppvKtnm9/jVhO3XqxGeffUazZs0qde6uXbtisVhOIL1I5Xg8HjZs2KA+J4ZQ/5Mj6dGjBz+GWcl/ZCpND/jY+twD9HjnKyJjEgLSvvqeGEV9T4xkRP87dE4REREREZHaLGJ/PgDe5pWr4VS3iNhGOACzDwrysoisBbMQA8XQwl9QUBCdO3dm5cqV9OvXD/AX8lauXMmIESMO279Vq1Z8/PHH5bZNnTqVgoICJk6cSKNGjSp9bovFootBUqPU58RI6n9SkTMv+ydfZKdjfWE+rXd5+fb6Cxmw4CfsIaEBO4f6nhhFfU+MpP4nIiIiIiLyF5ezhEYH/ZO4Gp/az+A0flGlhT+A3KyD9arwZzY6wPXXX8+CBQtYvHgxW7du5dFHH6WoqIgrrrgCgPvuu48XXngBALvdTrt27cr9iYyMJCwsjHbt2hEUFGTkWxEREalzLhz1EHvG9Mdthg5bXHx0XV887nq4uLmIiIiIiIiIiBji5y/fIcQJJVY45eJrjY4DQEhYJB6T/3l+1gFjwwSY4YuWDhgwgKysLF5++WXS09Pp2LEjs2bNKlvqc//+/ZjNhtcnRURE6q1Lbp/K+7nX0vGdn+myvoj/jD2bYXNWGB1LRERERERERETqgT0/LiUGOJBookdYpNFxALBYrThtEOKEAkeG0XECyvDCH8CIESMqXNoTYO7cuUc99plnnqmOSCIiIg3KlZPe5r2cgXRfuo3uK7N4b/yFDHvlC6NjiYiIiIiIiIhIXbdtOwCORmEGBynPZfUX/opyM42OElCaSiciIiIADPu/Jaw9KwGA7l/uZsHEfxicSERERERERERE6rqoA4UAmFq1MjhJea7SqXEl+bnGBgkwFf5ERESkzD9e+5oNvcIB6LRoAx88d6PBiUREREREREREpK4qyMulUboPgGZ9Bxqcpjy3Cn8iIiJS31msVga/+QOpHYOw+KDlW9/x6YyJRscSEREREREREZE66OelcwhyQ2EQ9DrvKqPjlHNoxp+7KN/YIAGmwp+IiIiUYwuy0//t79nS0kKQGxKnLeLb/7xidCwREREREREREalj0n7+GoADSWZsQXaD05TnsZoAcBeq8CciIiL1XFhEFH3f/JSdySZCSyD4mdf45av3jI4lIiIiIiIiIiJ1iHnnHgDyG0cYnORw7tLCn6e4yOAkgaXCn4iIiFQoNqkpnWcsYF8CRBVA0YTH2PTr10bHEhERERERERGROiImzV9Us7Ztb3CSwx2a8ecrKTY4SWCp8CciIiJH1LR1F5KnTiMjCuJzYe8dt7B760ajY4mIiIiIiIiISC2Xk7mfpAz/89bnXGFsmAp4bf4SmQp/IiIi0qB0OOk8Qp96nNwwaJIOv437B1lpu42OJSIiIiIiIiIitdiqj2dj9YIjFLqeMdDoOIfxWEsLfy6nwUkCS4U/EREROaaTzh+K88FbKLRD870+fhh1MQV5uUbHEhERERERERGRWipr7Q8AHEyyYLFaDU5zOK/NAoCpxGVwksBS4U9EREQq5awrbyX91itxWqHtdg+fX3sWLmeJ0bFERERERERERKQWsu06AEBB4yiDk1TMV1qMNLncBicJLBX+REREpNIuGvsE2687G48JOqY6WTyqLx53/RociYiIiIiIiIjIiYs94P/AuL1DV4OTVMxn8xf+zCr8iYiISEN2+b3T+f0K/4Ct6+p8Ftx8nsGJRERERERERESkNjmwawuJWf7nnS68xtgwR+Cz2wAwuT0GJwksFf5ERESkyv7x5ALWXdAUgB7fp/PeXbXvBs0iIiIiIiIiImKMX5e8gRnIioD2Pc4yOk6FTLYgACxur8FJAkuFPxERETkuw175gnVnxALQdek23n90hMGJRERERERERESkNsjbsAqA9CSrwUmOzGQPBsDsUuFPREREBIChM79jY/cQzED7Bb/y8Ut3lL3WpEkTw3KJiIiIiIiIiIhx7HvTAShOjjM4yZEdKvxZ3D6DkwSWCn8iIiJy3CxWK5e++QOb2tmweqHN139StH8PJq+X+CAbJq8XV0Ge0TFFRERERERERKQGxR9wARDWuZfBSY7MEhoKgFWFPxEREZG/2ENCuWDut2Sf05W2c94mb8EitvTpy599zmRLn75kv/Em3pISo2OKiIiIiIiIiEgN2JH6C/G5/ufdL77O2DBHYQ0OAzTjT0REROQw4VGxnDxlBllz55H5+ut4HQ4AvA4HmdNeI2PGDM38ExERERERERFpANZ/9jYA6dHQrG13Y8MchTU0EgCb2+AgAabCn4iIiASENSSM7PnzK3wte+48LEHBNZxIRERERERERERqWkHqWgAykmzGBjkGe1gEAFYV/kREREQO53Hkls30+19ehwNPXsWviYiIiIiIiIhI/RG6NwsAZ9Mkg5McnT0iBtCMPxEREZEKWSKjMEdGVviaOTISS0TFr4mIiIiIiIiISP2RmOYBIKrbqQYnObqQSBX+RERERI7I4ywmZuSICl+LGTkCj7O4hhOJiIiIiIiIiEhNSv15GdH54DXBKZeMMTrOUYVFxgNgd4HHXX+qfyr8iYiISEDYwiKIHzeOuFtuLpv5Z46MJO6mm4gbMwZb6brpIiIiIiIiIiJSP236agEAaXEQ37ilwWmOLjy2EQBmHxQV1J9b1KjwJyIiIgFjttuJuWEUbVf8QJsff6Dt998R0qkTP913vdHRRERERERERESkmpVs/g2A7CS7wUmOLSo2sex5dsY+A5MElgp/IiIiElC2sAh8ZjMZJU7W//gJO28bT+yX61jy2v1GRxMRERERERERkWoUvi8HAFezJsYGqYSwiFi8Jv/zgpyDxoYJIBX+REREpFrs27ePrmcPJrVbKABB//4Yl7PE4FQiIiIiIiIiIlIdPG43iQe9AMT3OsvgNMdmsVpxWv3PC3IyjQ0TQCr8iYiISLXqMeFlioIgJc3HB49fa3QcERERERERERGpBuuXf0hEEbjNcOqgG4yOUylOm/+x0JFhbJAAUuFPREREqlXb7n3444wkAJp8vp7s9L0GJxIRERERERERkUDb+t1iAA4kmIiMSTzG3rWDu3TGX0l+rrFBAkiFPxEREal2/SfPJzsCYvPgs4eHGx1HREREREREREQCzLN1CwA5jYINTlJ5LhX+RERERKouJiGZPRd2AaD9j2ls3fCjwYlERERERERERCSQIvbnA+Br0dzgJJV3aMafuzDf2CABpMKfiIiI1IjBj8xjTyKEOOHXJ8cbHUdERERERERERALE5Syh0UEvAI1OOd/gNJXntpr8j8V5BicJHBX+REREpEbYguyUXDUIgE7rCvnp07cMTiQiIiIiIiIiIoHwyxfzCHFCiRVOufhao+NUmqe08OctKjI4SeCo8CciIiI1ZtAtz7GllQWLD9Jefc7oOCIiIiIiIiIiEgC7V34GwIFEEyFhkQanqbyywl+xCn8iIiIixyXh5jvxmKDdVg9LXn/Q6DgiIiIiIiIiInKitm0HwNE4zOAgVeOx+ctkPmeJwUkCR4U/ERERqVFnDBpNarcQAILe+xBXPRpYiYiIiIiIiIg0RFEHCgEwtWxlcJKq8VoPFf6cBicJHBX+REREpMb1mPAKRUGQkubjg8frzrrvIiIiIiIiIiJSXkFeLo3SfQA06zvQ4DRV47VaADCp8CciIiJy/Np278Pm0xMBaPLFerLT9xqcSEREREREREREjsfPS+cQ5IZCO/Q67yqj41SJ11Za+HO5DU4SOCr8iYiIiCH6PzGP7AiIdcBnD48wOo6IiIiIiIiIiByHtJ+/BuBAohlbkN3gNFXjs9kAFf5ERERETlhsUlP2XNgFgHY/HmDrhh8NTiQiIiIiIiIiIlVl2bkbgPzGEQYnOQ5B/sKf2eUxOEjgqPAnIiIihhn8yDz2JkKoE3598jaj44iIiIiIiIiISBVFpxUDYG3b3uAkxyEoCACL22twkMBR4U9EREQMYwuyU/wP/02fO64v4KdP3zI4kYiIiIiIiIiIVFZO5n6SMvzP2553pbFhjofdvzSpWYU/ERERkcAYdOvzbGlpweqFtFefNzqOiIiIiIiIiIhU0qqPZ2H1giMUOp92sdFxqsxsDwbA4vIZnCRwVPgTERERwyXcciceE7Tb6mbp9AlGxxERkRo0Y8YM2rdvz5NPPlm2raSkhMcee4zTTjuNnj17Mn78eDIyMgxMKSIiIiIiFclcswKAg0kWLFarwWmqzhIcCoDVo8KfiIiISMCcMWg0qd1CALC9+wEuZ4nBiUREpCasX7+e9957j/bty98L5KmnnuKbb75h6tSpzJ07l4MHD3LrrbcalFJERERERI4kaPcBAAqaRBsb5DhZQsL8j5rxJyIiIhJY3R6cSlEQpKT5WPzEdUbHERGRalZQUMC9997L5MmTiYqKKtuel5fHwoULeeCBBzjjjDPo0qULTz31FGvWrGHt2rXGBRYRERERkcPEHvB/eNvevovBSY6PLTQCAKvb4CABpMKfiIiI1Arte5zF5tMTAEj+fB05mfsNTiQiItXp8ccf5+yzz6Z3797ltm/cuBGXy2mHQ1oAAFBFSURBVFVue+vWrWnSpIkKfyIiIiIitciBXVtIzPI/73ThNcaGOU6HCn+2elT4q3sLroqIiEi91f+J+WwadCGxDvjsoeEMe/1royOJiEg1WLJkCb///jvvv//+Ya9lZGRgs9mIjIwstz0uLo709PQqn8vj8Rx3TpGqONTX1OekpqnviZHU/8Qo6nu1w69LZtMKyIqA07v2qZN/H/aIaMBf+KtsfiP6X1XOpcKfiIiI1BqxSU3Zc0FnYhb9RtsV+9n2+ypadTrV6FgiIhJA+/fv58knn+SNN97AbrdX+/k2bNhQ7ecQ+Tv1OTGK+p4YSf1PjKK+Z6ys1T/SCkhPstTZ1TmyHIUk4i/8VfU91Nb+p8KfiIiI1CqDH53Pd8t7kJwOvzxxM63e/cXoSCIiEkC//fYbmZmZXHHFFWXbPB4PP//8M/Pnz2f27Nm4XC4cDke5WX+ZmZkkJCRU+Xxdu3bFYrEEJLvI0Xg8HjZs2KA+JzVOfU+MpP4nRlHfqx12peUAUJwcT48ePQzNcrzs7kwAglzQsZLvwYj+d+iclaHCn4iIiNQqtiA7xVcNhFeX0HFdAf/97G1Ou+hao2OJiEiAnH766Xz88cfltj344IO0atWKsWPH0rhxY2w2GytXrqR///4AbNu2jX379h3XxQSLxaKLQVKj1OfEKOp7YiT1PzGK+p6x4tNcAIR17lVn/x4iY5MoACw+KC7MJywiqtLH1tb+p8KfiIiI1DqDbn2ej5Z8RtvtHva/+hyo8CciUm+Eh4fTrl27cttCQ0OJjo4u2z5kyBCeeeYZoqKiCA8PZ/LkyfTs2bPOfopYRERERKS+2ZH6C/G5/ufdL77O2DAnIDI2kYLS5zkZe6tU+KutzEYHEBEREalI/E234zVB+z/dLJ0+weg4IiJSgyZMmMA555zDbbfdxogRI4iPj+eVV14xOpaIiIiIiJRa/9nbAKRHQ7O23Y0NcwIiohLwlj7PzzloaJZA0Yw/ERERqZV6XzqW/8x7nS7ri7C++wGuGx7BFmQ3OpaIiFSDuXPnlvvabrfzyCOP8MgjjxiUSEREREREjqbw97UAZCTZjA1ygixWKy4b2F2Qn5VudJyA0Iw/ERERqbW6TZhKURA0TfPxweRRRscREREREREREREgZF8WAM6mSQYnOXHO0ilyRfnZxgYJEBX+REREpNZq3+MsNp+eAEDjz9aSk7nf4EQiIiIiIiIiIpKY5gEguvvpBic5ca7Swl9Jfo6hOQJFhT8RERGp1fo/MZ/scIhzwGcPDzc6joj8f3v3HSZVdbhx/J2+fdkKC6IowoJ0bBEh1kiCHcQYAaUogjWWn5oYC6KiiRhFUVERpSixgVHQKBpbxI4K2BBUOmyD2Trl3vn9Mbsr6MICW87s7PfzPPPMzJ07M+8MR/Dsu+deAAAAAECr9vXHS9SmTLId0mGnjDUdp8FC1UcrDZRuM5qjsVD8AQCAmJbZtqPW/+4QSVKX/23Smq8+MpwIAAAAAACg9fp6yXxJ0pYsKTvvQMNpGi7sqr6uLDMbpJFQ/AEAgJh3+k1ztCFHSgpIn0y+2HQcAAAAAACAViv43deSpOK2PsNJGkfY45AkhSoo/gAAAJqFLzFJVWcPkSR1/6JcH70213AiAAAAAACA1ill4zZJUnj/9maDNBLLHS3+rKoKw0kaB8UfAABoEU65bKq+P9Alty1tnHaX6TgAAAAAAACtjhUOq+0WW5KU3f+3htM0jpriL1JVaThJ46D4AwAALUbGhMtlO6T878N65ZEbTMcBAAAAAABoVb5890WlVElhp3TEKWNNx2kUljtaldnBgOEkjYPiDwAAtBgDTx+vr3omSpJc8xYoFCf/QwYAAAAAANASrH57gSRpc45DaRm5htM0Dru6+FMgPn7ORPEHAABalN5/vUeVXqnjlogW3j7GdBwAAAAAAIBWw/p+lSRpW7sEw0kaj+2JVmWOUMhwksZB8QcAAFqU/H7H6rsjcyRJea8s07aiTWYDAQAAAAAAtBKpm0slSZFOBxhO0nhst1uS5AhS/AEAABhx0m3ztC1FyvJLr9440nQcAAAAAACAuBcKBtRua0SSlHfkiYbTNJ6I1yNJcoQsw0kaB8UfAABocTLbdtTaE7tLkrr8b6N+/PoTw4kAAAAAAADi2yevzVViUAq4pcMGjzIdp9FEPNEVf84wxR8AAIAxZ9w8VxtzpKSA9NHkiabjAAAAAAAAxLV1778iSdqc61BicprhNI3H4fNJkpys+AMAADDHl5ikiuF/kCR1/7xMH70213AiAAAAAACAOPbDj5Ikf16y2RyNzOH1SpJc4YjhJI2D4g8AALRYp15+j77v5JTbljZOu8t0HAAAAAAAgLiVvrkieuOgzmaDNDJHQqIkyRW2DSdpHBR/AACgRcuYeIVsh5T/fVivPnqj6TgAAAAAAABxp7x0u9oVRFfEHXD0EMNpGpfLV1P8seIPAADAuIGnj9dXPaP/g+ac97yscNhwIgAAAAAAgPjy0aKZ8oalCp/U//g/mo7TqFyJSZIkd5z8SIniDwAAtHi9/3qPqjxSx80RLbjtfNNxAAAAAAAA4srWT96SJG1u65TH6zMbppG5k1Kj16z4AwAAiA35/Y7Vt0dmS5LaLf5M/pKtZgMBAAAAAADEEddP6yRJZe1SDSdpfN7kNEmSJ2Q4SCOh+AMAAHHhxMmztS1FyvJLi//2J9NxAAAAAAAA4kabzVWSJHeXfMNJGl9CSoYkDvUJAAAQU7LzDtTaE7tJkg5+b6N+/PoTw4kAAAAAAABavpKCDWpbFL3d5fizzIZpAglpbSRJHoo/AACA2HLGzfO0MUdKDkgf3TbRdBwAAAAAAIAW7+NFj8ttS/4kqceRfzAdp9Elp+dKkrwUfwAAALHFl5ikirMGS5K6LyvTx689bTgRAAAAAABAy1a07H+SpK1tXXK53YbTNL7UNtmSJLctlZduN5ym4Sj+AABAXDn1inv1fSen3La0YdodpuMAAAAAAAC0aN61myRJ5e3bmA3SRNKy82pv+4s3GUzSOCj+AABA3Glz0aWyHVL+92H957GbTccBAAAAAABosTK3BCVJCd16Gk7SNFLTc2RX3/aXbDWapTFQ/AEAgLgz6MyJ+rpnQvTO3GdlhePkIO0AAAAAAADNaPPaVcotjt7u/rtzzYZpIi63WyFP9HZFSYHZMI2A4g8AAMSlntdPVZVH2n9zRAtuG206DgAAAAAAQIvz6cuPySmpOFXK7/tb03GaTLD61IWVpcVmgzQCij8AABCXuh16vL49Mnpy5ravfBoXh2oAAAAAAABoTqUrPpEkFbR1G07StMK1xV+J2SCNgOIPAADErRMnz9a2ZCl7u7Toxvg8HAUAAAAAAEBT8a2PHvqycr8sw0maVqi6+AuWbzcbpBFQ/AEAgLiVnXeg1p3YTZLU5d0NWvvtZ4YTAQAAAAAAtBzZW0KSpJRD+htO0rRqVvyFykvNBmkEFH8AACCunX7LPG3MkZID0ge3XmQ6DgAAAAAAQIuw5quPlF29AK7vyWPNhmliYbcjel1ZbjhJw1H8AQCAuOZLTFLFWYMlSd2Xlenj1542nAgAAAAAACD2rfjPXEnS1gypY+eehtM0rZriz6qqMJyk4Sj+AABA3Dv1inv1fSen3La04f47TMcBAAAAAACIeRVffS5JKsr1mA3SDKzq4s+uqjScpOEo/gAAQKvQ5qJL5e58kE74872yg0GFigplh0Jxcex2AAAAAACAxpa4oViSFOzY1nCSpme5o3VZJFhlOEnDUfwBAIBWYdCZE3XQ00+rasVKrRo4SN8fPUirjh6oksefkB0ImI4HAAAAAAAQU3K3WpKkNn1+YzhJ07M91XVZIGg2SCNwmw4AAADQHELlpSp54gkVPfRQ7Tbb71fR9AclSRljR8uTnGoqHgAAAAAAQMz4+uMlalMm2Q7psFPGmo7T5GqLv2DIbJBGEBMr/ubNm6fjjz9evXr10vDhw/Xll1/uct9nnnlG5557rg4//HAdfvjhGj169G73BwAAkCSXN0Elc+fV+VjJnLlyeROaOREAAAAAAEBs+nrJfEnSliwpO+9Aw2manu12SZIcoZa/4s948bd48WJNmTJFl1xyiRYsWKBu3bpp3LhxKioqqnP/Dz/8UCeffLJmz56t+fPnKy8vT2PHjtWWLVuaOTkAAGhJLP922X5/nY/Zfr+s0rofAwAAAAAAaG2C330lSSpu6zOcpHlEvB5JkiNkGU7ScMaLv1mzZunss8/WsGHDdPDBB2vSpElKSEjQ888/X+f+U6dO1YgRI9S9e3d17txZt912m2zb1tKlS5s5OQAAaElcaelypqXV+ZgzLU2u1LofAwAAAAAAaG1SNm6XJIUP6GA4SfOIeKJnxnNS/DVMMBjUypUrNWDAgNptTqdTAwYM0LJly/boNSorKxUOh5Went5UMQEAQBywglXKGDWyzscyRoxQqIwVfwAAAAAAAFY4rLZbbElSdr9BhtM0D4c3urLRGbYNJ2k4t8k3LykpkWVZysrK2ml7VlaW1qxZs0evcffddys3N3en8nBPWFbLb23RMtSMNcYcTGD8wZRYHHvOhCRljx8vKXpOP9vvlzMtTRkjRihz1Eh9fsU4ZVz3F3XqdqjhpGiIWBx7aD1MjD/GOgAAAIDG9vnbC5RSJYWd0hGnjDUdp3l4vZIo/ox75JFHtHjxYs2ePVs+394dZ3b58uVNlAqoG2MOJjH+YEqsjb127dopd+xoZU+YIKvUL1dqmgLbivTVRaOVvHyV1kw8Xz9cN1kZ7eL/pNXxLtbGHloXxh8AAACAluyHd19UD0mbcxzqlZFrOk6zcCYkSpLcIYq/BsnIyJDL5VJRUdFO24uKipSdnb3b586cOVOPPPKIZs2apW7duu31e/fq1Usul2uvnwfsLcuytHz5csYcjGD8wZRYH3sRSc70NopI8mbmqHTUqXLfco/22yqt+8dNavv4c8o7IN90TOyDWB97iG8mxl/NewIAAABAY7G+XyVJ2tYuwXCS5uNMSJIkucIRw0kazmjx5/V61aNHDy1dulQnnniiJMm2bS1dulQjR9Z9Dh5JevTRR/Xwww9r5syZ6tWr1z69t8vl4odBaFaMOZjE+IMpLWXsHX3ahXo3HFTp5AfUcXNEKy44S945Lym3Q2fT0bCPWsrYQ3xi/AEAAABoyVI3l0qSIp0OMJyk+bgTo8WfO2w4SCNwmg4wZswYPfPMM1qwYIFWr16tW265RZWVlRo6dKgk6dprr9XUqVNr93/kkUd033336Y477lCHDh1UUFCggoIClZeXm/oIAAAgDgwaeonKr7tQ5T7pgI0RfXL+aSress50LAAAAAAAgGYTqKxQu63RVW95R55oOE3zcSemRK9Z8ddwQ4YMUXFxsaZNm6aCggJ1795djz32WO2hPjdt2iSn8+d+cv78+QqFQrr88st3ep1LL71Ul112WbNmBwAA8eW4c67SkmCVnHfP0YHrbb0/6vca+PQStcnKMx0NAAAAAACgyX32xtNqE5QCHumwwaNMx2k23pR0SfGx4s948SdJI0eO3OWhPefMmbPT/TfffLM5IgEAgFbqxPP+qv+Egsq991/qvNbWuyN/p2OeelNpreRk1gAAAAAAoPVa9/4raiNpc45DfZPTTMdpNt7kNpIkTxwUf8YP9QkAABBrBo+7RRsvPkMBt3TwD5b+O/J4lZduNx0LAAAAAACgaa35UZLkz0s2m6OZJaZGV/x5QoaDNAKKPwAAgDqcPHGK1o3/g4IuqetqS6+dO0iV5X7TsQAAAAAAAJpM+uaK6I2DOpsN0syS2+RIkrys+AMAAIhfp15+j34Yc5zCTqnbqpBeGTFIgcoK07EAAAAAAAAaXXnpdrUriEiSOg06xXCa5pWSHi3+3LZa/C9+U/wBAADsxhnXPKhVo45W2Cl1/yaol0YOUCgYMB0LAAAAAACgUX20aKa8llThk/odO9x0nGaVnp1Xe3t70RaDSRqO4g8AAKAeQ//ymL4953DZDqnHyoAWjjpKVjgOjv0AAAAAAABQbcsn/5UkbW7rlMfrM5ymeaVltK29XVqy1WCShqP4AwAA2ANn3TRbK8/qI1tSzy8q9fyo31D+AQAAAACAuOH+ab0kqTQvzXCS5udyuxVwR2+XbaP4AwAAaBXOnjxfK87oLknqtaxcz405mvIPAPbBjBkzNGzYMPXr109HHXWULr74Yq1Zs2anfQKBgCZNmqQjjzxS/fr102WXXabCwkJDiQEAAID412ZzlSTJc3BXw0nMCFUXf1WlxWaDNBDFHwAAwF74450v6ItTDpYk9f7Yr2cvPMZwIgBoeT766CONGDFCzzzzjGbNmqVwOKxx48apoqKidp877rhD//3vf3Xvvfdqzpw52rp1qy699FKDqQEAAID4VVKwQe2qf8+uy/FnmQ1jSMgTva70l5gN0kAUfwAAAHvpnLtf0he/P0CS1GdpseZf+FvDiQCgZZk5c6aGDh2qLl26qFu3brrzzju1ceNGrVy5UpJUWlqq559/Xtdff72OOuoo9ezZU3fccYeWLVumzz//3Gx4AAAAIA599NJMuSKSP0nqceQfTMcxombFX7B8u9kgDUTxBwAAsA/OufdVfXFCe0lSn3cLNP/i4w0nAoCWq7S0VJKUnp4uSVqxYoVCoZAGDBhQu0/nzp3Vvn17ij8AAACgCRR/8b4kaUtbl1xut+E0ZoRriz+/2SAN1Dr/9AAAABrBOdPf0NPjj1Hfd7aqz5ubNP+yk3TO/a+ZjgUALYpt27rjjjvUv39/de0aPZdIYWGhPB6P0tLSdto3KytLBQUFe/X6lmU1WlZgd2rGGmMOzY2xB5MYfzCFsdf4vGs3SZIq8tJb7fcadjskRRSuKNvtd2Bi/O3Ne1H8AQAANMCfHnlb88cerT7vF6vP6+s0/6qTdc49i0zHAoAWY9KkSVq1apWeeuqpJnn95cuXN8nrArvCmIMpjD2YxPiDKYy9xpO1JShJCu13YKs9yoZV3ZiVlRTt0XcQq+OP4g8AAKCBhj/ytp4bO1C9P9quPovX6BnPmTr7rgWmYwFAzLv11lv11ltvae7cuWrXrl3t9uzsbIVCIfn9/p1W/RUVFSknJ2ev3qNXr15yuVyNlhnYFcuytHz5csYcmh1jDyYx/mAKY69xbfrpWyUUR28ffsYF6tKnr9E8pixyOyVZ8jkc6tu37y73MzH+at5zT1D8AQAANJDL7dZZj7+n588/Sr0+LVOPF7/RM+6zdfbtz5iOBgAxKRKJaPLkyXr99dc1Z84cdezYcafHe/bsKY/Ho6VLl2rw4MGSpDVr1mjjxo27nYDXxeVy8cMgNCvGHExh7MEkxh9MYew1ji9efVIHSSpOlY7uf6zpOMbYbockKRKs3KNxFavjz2k6AAAAQDxwud0a9uRSreiTJKekHi8s13O3jDQdCwBi0qRJk/Tvf/9bU6dOVXJysgoKClRQUKCqqipJUmpqqoYNG6Y777xTH3zwgVasWKG//vWv6tev314XfwAAAAB2r3TFx5Kkgrate62Y5aku8YJBs0EaqHX/KQIAADQil9utM+a8rxfP/Y16rKhSt399quc9YzXshsdNRwOAmPL0009LkkaNGrXT9ilTpmjo0KGSpL/+9a9yOp26/PLLFQwGNXDgQN18883NnhUAAACId751hZKkyv2yDCcxy3ZXr5ULhswGaSCKPwAAgEbk8fp0+lMf6N9/PEKHfB1U13lLtcAzQWde+7DpaAAQM7799tt69/H5fLr55psp+wAAAIAmlr01WnSl9jjUcBKz7OoVf44WXvxxqE8AAIBG5vH6NGTuu/om3yO3LXV+8m39+94rTMcCAAAAAADYyZqvPlL29ujtPkPGmA1jWMQTXSvnCIUNJ2kYij8AAIAmkJicppPmvqtvD3bLY0kHPPaaXp7+f6ZjAQAAAAAA1Fr+yhxJ0tYMqWPnnobTmBXxeiRJzpBlOEnDUPwBAAA0keTUdJ04722tOtAlb1ja7+GX9eqjN5qOBQAAAAAAIEmq/OYLSVJRW6/hJDHAG/0OnGHbcJCGofgDAABoQinpmfrt3Ne1+gCnfCEp9/7n9NoTt5mOBQAAAAAAoMQNxZKk4H65hpOY5/D6JFH8AQAAoB5tsvJ09NzXtKajU4lBKeuf8/TmU3ebjgUAAAAAAFq53C3Rw1q26fMbw0nMc/oSJEkuij8AAADUJyOng46c/bJ+7OBQUkBK/ftMvfXs/aZjAQAAAACAVmrlh6+pTblkO6TDThlrOo5xTl+iJMkdihhO0jAUfwAAAM0kO+9A9Zv1otbmOZRSJSXd8aDe//ejpmMBAAAAAIBW6Ns3n5EkbcmK/syitXMlpUSvw4aDNBDFHwAAQDNqt38XHfLYM1rf1qHUSsk56R59+Ops07EAAAAAAEArE/zuK0lScVuf4SSxwZOYLElyW6z4AwAAwF7o2Lmn8h+Zq405Unq5ZP1tij55Y77pWAAAAAAAoBVJ2bhdkhQ+oIPhJLHBk5wevQ4ZDtJAFH8AAAAG7J/fX50enKnNWVJGmRT4yyR9/u6LpmMBAAAAAIBWwAqH1XaLLUnKOfQYw2ligy85TZLk5lCfAAAA2Bedew1Q+wce0tYMKdMv6cHHVVVcIDsUUqioMHpdXmo6JgAAAAAAiDOfv71AKVVS2CkdPmS06TgxITE9U5LkpfgDAADAvsrvd6yy771PVf26quf0WfLPeVqrjh6o748epFVHD1TJ40/IDgRMxwQAAAAAAHHkh3cXSpI25ziUlpFrNkyMSEzLkiR5Wnjx5zYdAAAAoLXrceRJqnqgn4rnzFXRQw/Vbrf9fhVNf1CSlDF2tDzJqaYiAgAAAACAOGJ9v0qStK1dguEksSOlTY4CkjyWFKiskC8xyXSkfcKKPwAAgBjgTWujknnz6nysZM5cubz8jzgAAAAAAGgcqZvLJEn2gQcYThI72mTl1d7eXrzJYJKGofgDAACIAZZ/u2y/v87HbL9fVmndjwEAAAAAAOyNQGWF2m2NSJLaH3Gi4TSxI3WHQ55uL9xsMEnDUPwBAADEAFdaupxpaXU+5kxLkyu17scAAAAAAAD2xqevz1NiUAp4pMMGjzIdJ2Z4vD4Fq0+QV+4vNBumASj+AAAAYoAVrFLGqJF1PpYxcoSsYFUzJwIAAAAAAPHI/+0X8nXtom35bZWYzC8a7yhUXfxVbm+5xZ/bdAAAAABInuRUZY8fLyl6Tj/b75czLU2ZI0YoY+RILX/refU7ebTZkAAAAAAAoEULlZfqpMvuVvhPheqUna1Qeak8yammY8WMoFtKllRZus10lH1G8QcAABAjnD6fMsaOVvaECbJK/XKlpqnsi2X6aeQohTat0ScJCTrshHNMxwQAAAAAAC2QHQioZOYslcydV/sLxxmjRip7/Hg5fT7T8WJCuLo1C5X7zQZpAA71CQAAEEM8yalyejzyZGbJ6fEo3DFPa8p/UGqlVHrjJK1bvcJ0RAAAAAAA0MKEyktVOGOGih58SLY/WmrZfr+Kpj+owkceUai81HDC2BDyRK+D5dvNBmkAij8AAIAYltm2ozre84BKUqV2xdIXE/+oyhb8W2cAAAAAAKD5ubwJKpk7r87HSubMlcub0MyJYlPY5YheV1YYTrLvKP4AAABiXLdDj1fwmotU5ZE6r7X18thjZYXDpmMBAAAAAIAWYOWbzylcUFC70u+XbL9fVim/ZCxJlqem+CsznGTfUfwBAAC0AMf+8c/64dyjZUvq+UWlnrn6FNORAAAAAABADPvv/Hv07yE95bnhHrnbtJEzLa3O/ZxpaXKl1v1Ya2O5o8WfHag0nGTfUfwBAAC0EEP/8piWH58nSer92k96ceolhhMBAAAAAIBY885zD2jhKb3U7pZH1WWNpdC2EpV9940yRo6oc/+MUSNlBauaOWVsslwOuTIytF/HHqaj7DO36QAAAADYc8OnvaYFfzxMPVYG1PHJN/XuQQ9p0JkTTccCAAAAAACGvffiIyp47AF1WxVSjiRb0rfdvGp/0VVK69tfKd17SA6HSubMle33y5mWpoxRI5U9frycPp/p+DHh+MlPKCW/uw4sLZUdCskKVsmTnGo61l6h+AMAAGhBXG63fv/4m3pn2EAduD6iiinTtOqg3urS52jT0QAAAAAAgAEfvPKkNj48VfnfhpRVve2brh61veAyDT3twtr9nD6fMsaOVvaECbJK/XKlpskKVlH6VbMDAVW9+z9tuujiFl2MUvwBAAC0MCnpmTrk/tlaO2aUcrZJq6+8UDnPvqE2WXmmowEAAAAAgGby8WtPa+3Dd6nb1wF1j0S3fXuwW1ljJ+jMoXWfHqRm9ZozM1oROj2eZska60LlpSqZOUtFDz5Uu832+1U0/UFJUsbY0S1m5R/n+AMAAGiBOnU/TN6br1e5TzpgY0Rvjvu9rHDYdCwAAAAAANDElv33eT03rK8Sr7hVh3wVkDMifXeQS5tvuVBnvLxcg3ZR+mHXXN4ElcydV+djJXPmyuVNaOZE+47iDwAAoIX6zR/O1+YLhshySN2/CerZicebjgQAAAAAAJrIl/97Sc8O7y/3JX9Tj5UBuSLS9we6tPHGMTp98Qodd85VpiO2WJZ/u2y/v87HbL9fVmndj8Uiij8AAIAW7JTLpmrFyQdLkvq8W6DnbhlpOBEAAAAAAGhMKz98Tc/+8VA5LrxWPZdXym1Lqw9wat315+rUV1bohBHXmo7Y4rnS0uVMS6vzMWdamlypdT8Wiyj+AAAAWrhz7n5JXx6aIknq8tynWjL7DsOJAAAAAABAQ33z6Zt65k+HKTz2CvX8okJuW1rT0amfrj5Lp/xnpU4afaPpiHHDClYpY1Tdv0ydMWqkrGBVMyfad27TAQAAANBwZzz2tv4z9Ah1+cFSyn1z9OXBfdR7wMmmYwEAAAAAgL206ov/6bO/X6luX5SqVzi67ccODtnDTtXJF99lNlyc8iSnKnv8eEnRc/rZfr+caWnKGDVS2ePHy+nzGU645yj+AAAA4oAvMUlHzFigr849TXmF0sbrrtHW+d2U26Gz6WgAAAAAAGAPrPnqI31852XKX+ZX71B029o8h0JD/6A/TLxLLjeVTlNy+nzKGDta2RMmyCr1y5WaJitY1aJKP4lDfQIAAMSNdvt3UeaUO7U9SWpfIC298HQFKitMxwIAAAAAALux9tvPNH/0Udp2zvnq/ZFfvpC0rq1Dqy76nU58/UudctlUSr9m4klOldPjkSczK3qdnGo60l6j+AMAAIgjfQedLv/l5yrokrqusbTwwuNMRwIAAAAAAHVYt3qF5o89WgXDR6jPB9uUGJQ25ErfjD1GJ7zxpU67chqFH/YaxR8AAECcOWn0jfrurH6SpN6f+PWv/zvdcCIAAAAAAFBj00/faP6Fv9WWocPV5/1iJQWljTnS1+cN0HFvLteZ1z5M4Yd9RvEHAAAQh4ZPekpfHJ0tSTpk0Xd6efr/GU4EAAAAAEDrtnXDaj094VitP/1M9Xm3QMkBaXOW9NW5h+u3b3yuoX+dSeGHBmMEAQAAxKnhM/6rF4f1V/dvQ2r36Mv6qEsfHXHSSNOxAAAAAABosPbt25uOUKdQealc3gRZ/u1ypaXLClapKlilRTeco4OWblTfyuh+WzKlgt/102nXPyZfYpLZ0IgrrPgDAACIUy63W8c//h+tzXMouUqqvPl2rf32M9OxAAAAAADYZ6HyUjlsW9lejxy2rVB5qelItexAQCUzZ2nV0QP1/dGDtOrogSqZOUuJcuvwHxOUWikVtJGWD+ulo974VMMnPUXph0ZH8QcAABDH2mTlqdM/H1ZxmpRbIq24dJTKS7ebjgUAAAAAiFGh8lLZoZBCRYXR61gv1h5/QnYgYDqaQuWlKpwxQ0UPPiTb75ck2X6/ih58SCWz5yjn+uv05RnddfgbH+rs25+h8EOT4VCfAAAAcS6/72+15dpLVHnrdB24ztYrY47VmfM/5rwBAAAAAICd1BRrJXPnyfb75UxLU8aokcoeP15On89otlB5qUpmzlLRgw/VbrP9fhVNf1CSlDF2tDzJqQpUVqjcX6SybQUq9xerqmy7qsq3qapsu4IVZQpVlsqqqpIVqJAVqJQdqJJCQUWCQUVCITlCYSkcliMcljNsyRG25QzbcloRuayInOHotSsckduSXJaUlJql/v9+XSVz59WZvXjePHWZOEF//O0LzfJdoXXjpz0AAACtwG/PulQLf1ihLo+/rR4rqvTMn3+vPz2wxHQsAAAAAGh16joHnCc51XSs3RdrkYhSzx6md199XHYoqHCwSpFwSFYoqEg4LNuKlmaRcFh2OCRZliJWWBHLksOyqu9bcli2ZEcvDsuSw45Ub4vIYdvR+3ZETjsih63a20kZuTp21uJdFmslc+Yqe9w4fX30AKmoZKfHEqovTcmXm6lwUVHtSr9fsv1+WWWlcmZmNXESgOIPAACg1Tjj/x7W/LUnqc/r69R7yQYtuOsinXndDNOxAAAAAKDRxWq51hgr6spLt6t46zr5CzeqrGSzKrYVKlBaomDZdoUq/LIqymVXVkiBKikQlCMYlCMYlitkyRm05Q7bcoVseUKSJxSRJyQlp2aq98u7XrFWMneesi+4QJ1mvCKrpKTOfZqSz5dab7EWLimRLytbgR2KP9shBd1SyC1ZLinsksLu6LXldsh2OWS5JMvtlO1yKOJ2ynY7FXG7ZLvdktutiMclh8creTxyeH1yen1y+hLlTkiUKyFJnsRUZeTsp065uXKmpdWZ0ZmWJldqWpN9P8COKP4AAABakeH/XKwXzj1CPb+sVKe57+itg+7XscMvMx0LAAAAQAvVvn170xF+xfThKivL/dpWuFH+4s0qKy5Qhb9QlduLNPB356ns+YW7XFGXcMwgvT5pjFxBS+6QLXc4Ind1MecJSd6Q5AtJbjv6XKektOpLQ/nyshQuLt59sbZtmyo752nb6hLZDsl2RYs12+lQxCnZTinidOxw7VCkzotTckWvo7edkjNatjmcLskVvThcLjncHsnlUnrOfuqUk7PbYs2dkyN78rVK9CYopU2WktOylJjcfGVbqLxUGaNG1h56dEcZo0bKClbJ6fE0Wx60XhR/AAAArYjL7daQmf/VW8MG6KC1tir+/qC+PbiX8vsdazoaAAAAgBakZkVdttcjh20rVFUREyvq6j0P3JjztbVgvUqLN6ts21ZVbi9Spb9EwbJtClWUyaook1VVoUigqnrFXEiOUFCOoCVnKCxX9Wo5V3Up5w5XF3Ph6nIuLHmsn/MkVV9cGRlKOPsqrdvNirouF1ygXpuS9nhFneWQAh4p6JFCtReHwm6HLI+z+uKS7XUp4vUq4vNIvgS5EhLlTEySKylVnuRUJaRmKKtD53pXrLlzctRn7vN7/GfR2Oot1kIBHdxnoIFkUZ7kVGWPHy8peujRWDtHIloPij8AAIBWJjk1Xb2mP6UfzjtHuSXST1derNxnX1dGTgfT0QAAAAC0AM29om5b0SYVrF+jbVvWqrRwgypKtii4vVjhsu2yK8oUqaiQsyqgtKQsDb73hXrPA1d+7gWKlJQoWVJyo6f9ma1oKRf0SAmd8xQuKdn9OeBKS7Vl+NHasHaFHAmJciWlyJ2UIk9yuhLTs5SQnqWUjFy1ycpTm9z9lJyaKZe78X7EH+sr1lpCseb0+ZQxdrSyJ0yQVeqXKzUt+r3FQDa0HhR/AAAArdD+Xfpo86QbVHbt7eq4OaK3xv1Bpzz3sTxeJiMAAABALIjVc9TVu6Ju7OjanOWl21WwfpVKNv+k7YUbVVG0SQF/scL+ElkVZYpUlMtRGZAzEJQ7EJYrYMkTtOUNRuQNSgkBKSH48wq6lOrLrvi6tlW4sLDe88C5c7IV2lYSXS3njq6UC7ulkNuhsMchy+OQ5XZEV8t5nLLdbkW8HkV80XO8OXwJciQkRku5pBR5k9PlTYmWcykZuUrLaKu07DylpufsVMzZodDuzwGXmalBV/1j7/5AGhHFWuOoGf/OzKzoNYf3RDOj+AMAAGiljjhppBatXqmE+xeq23chPT/heJ3z+P9MxwIAAACaRawWa1Lzr6gLBQMqLdmqkq3rVLqtQBUlW1VZWqJA6TaFyrcrXFEmq7JcqUnpGvx/D+5+Rd0FF+jTEwfKtblIvnB0e0L1paEqvVKVTwp4paDXoZDXoZDXJcvnkuXzKKVDwh6dBy539uNKTGnTqKvl9oQVrIrpFXVSyynWLMtSYSCo3HRnzPx3C8QKij8AAIBW7OSJU/Svn75R74XfqM/7xXr2b3/U8Nv+ZToWAAAA4gDF2r7Z7Yq6SESp5/5RH77xtAJl2xQs9ytU7pdVWS67MnpeukgwIEcgJEcwJGcoLGfIqj4nnV19TjrJHYpEz0VXfU46X+jn9/dVXzLqyObr2kXhgoLdr6grLlZGUqYC4aLa7QH3z4VdwOtQyOdQyOuMFnZet+wEn5SYIEdSslwpqfKkZiqhTZaSMtsqve3+yso7ULkdOu/REUr25DxwKW2y632dptASVtRJLWfF2saNG5Wbm2s6BhBzKP4AAABauT/euUD/2niken/kV9cFX+o/B92qwWNvMh0LAAAAe6h9+/amI/xKiy7WzhmupW88pXCgUlawUlagSlYwKDsUkB2skh0OKRIKKhIKKWKFpfAOF8uSLFuOsCWHbcthRS9OOyKHFZHTishhR6+ddkROW3JZit62pMT0bB3xwmu7XlE3d56yL7hA7e99RlZJSZN8P0F39cXz8yEwwx6Hwm6HfOnl6pSdXe+KuuKJw+VKSFJW+wOV1b6zklPTmyRrXWK9XGsJK+oAtGwUfwAAANDQx97R4jMPVdfVlto88LSWde6lfsecaToWAAAtSiyWLy1BLK8Kk2I7X022bK9HDttWqKoiJrLVdw64Nuefp81bflRlWYkq/CUKVPhVVeZXuLJUwYoyWYEKhasqZQcqZQcDsoOB6qItKEcoLIVC0WItbMlhWXKGbTnCtpxWRC4rImc4eu2yJJcVXeHmsiS3JSWmZarn4tfrLdb2m/Z8kxVru+PLzlC4qKjec9QFD2in4khJ9Lx01aWc5XbI8jpluZ2yPW5FvC5FPF7J55V8PjkTEuVMTK4+L12qfCnpSkjPVnJallIy2yk9u53SM/PkS0zabcY9WVF32B9GNcr3sa9ivVxrKSvqALRMFH8AAACQx+vTUY+8qC/POUUdCqRNN/xVm+Z1V94B3UxHA4C4EMvFgUS+horV8qVGLH9/sbwqLNbzNUW28tLt2la4Qf7CjSot2ayK7SUK+IvqPJykAgEpGJIjGKw+lKQlZ8hWamqOjnt80e7PATdunKrOmyi7pKT23G/NtR7Mt1+WwsXF9RZrgU7ttM0uke2ULJdkOSXb6ZDtkmyXQ3b1/YjLIdvlVGSH23I5FXE5FXG5o7fdLsntllxuOd1uyeORw+WR0+OV0+uT0+ORy5sgpzdRGdl56pSbu/sVdbm56jn/hab+qnYp1lfU1aBcA9BaUfwBAABAkpTbobPa/eMebbvkKuUVSp9cNEwnPr9UiclppqMBQIsWy8UB+chnUn2rwjLGjm7UgtIKh1VVWa5QsELhYJVCwYBCVRUKBasUDgcVDgZlhQMKVVXJsgLq1e9ElT79zK4PB3n2MP3v9TnRByJ29HHbrt4zUr05stPjilRvr75fe23XbN95/8gvnldzffyZl6vshRd3mS1x8IladPcEKRiSMxiSM2TVnufNHY5Un+ctIk9I8oQlbyh6cdu1L6fU6sve8nVN2aMVa+6cbIVKShRyS2FX9eEkq6+jRZtDljtaslkuR7RscztluxyKuF2y3U5F3G7J7VbE45bD44kWal6vnN4EOX0+uXxJcvsS5UlMkTsxWd6kVGXktJd7D4q1Xk+bK9bqXVEXrDJeYsX6ijoAaM0o/gAAAFCr52/+oDeuWq7EKbN08I+2XrrwBJ391MemYwFAi1XfeaRShp2h/744fbevUfuD/71RUxLU4/gzL1fZ8wt3nW/o6Xrj+X/u8LI7vO4Ot2vLjegr7GL/miJjFzl3fL3qfQePuEHlC1/aZb7k00/Rq3NvlexdvM6OH9be8Xvc8b12leHn245dbB9ywe2qeOmVXeZLOvn3WvTw/0mRiCKKyBGJ7FDsRKrfL6JIpPo9diiFovers0Zq3jcixy9u/1wI1ewbqf14J//fw6pc8t9d5ks47hi9cucFctQ8/xev5YjU3ql+XHLo59ev2d+xw26KRHZ6PceOX/kOz0/OzNUJj9SzKuyCC/TWWccqULRVDltyRqKZnLbkiKj2tjOi6sd3fd9pS8463+lnzuqLR5IrI0O+N4ZqbT2Hg9z/wX83++EgXRkZShg3WevqydZ/RVhWybZ9eg/bIQU81ed4c0fP8xZyOxT27OJwkl6v5PVKCQlq066jOuXk1HsOuLZPzVVCQpJc7ub/8WSsF2staUWdZVkqDASVm+6MmZXEANDaUfwBAABgJyeMuFbPr16hQ576WL0+K9P8q07WOfcsMh0LQCs0b948zZw5UwUFBerWrZtuvPFG9e7d23SsveLyJtR7Hqkuc941ch4pV0aGEsbeWm95kD//I2P5Eq9oq/X15Ov18nfG8iXd0EEb6snX770CY/mSO3XWxrkX1Pl4ydx56nLBBeq3xmPmPGZdkxUuLNz9qrDiYu0fTFNg05Ymz2NL1YdujJZeCe1z6z8c5LZtqjooT/5VO39/EYcadr/mxi72a9Olg8IlJbvNZvn92nr8Idr44xfV5ZxXDp9PDl+CnAmJciWlyJ2UIl9KG3lT0pXUJlvJbXKUltVebbLzlJya2aBCbk/OAZecYu6oEi2hWGtJK+o2btyo3Nxc0zEAANUo/gAAAPArw26arafXH6O+72xVz1fW6KUD/qxTr7jXdCwArcjixYs1ZcoUTZo0SX369NGTTz6pcePG6dVXX1VWVpbpeHvM8m+vtzgoy2+v7asav3j5ZZnwS3tSHoS3bVNptw7a9v3P+XZaRbfDe+y8ve43r6vQ2NXrZXbpqPC2bbsvN7Zv17Y+HVW8atvOr7Pje+4UxVHnzV1lqDN79T7ZXTrtUb6iIw7S1lWfV7+uY+c8DikiR/QxR01exw6P6efv0rHj9+fY5fNr3iOva091qmf8WaWl2nJyP21Y9dnPn92xw4vteL/m9R01n8Ehxy9u13xGOaKfw+Fw7vAcR3Vsh+RwKC0zp/5VYdnZWj24u0qPypXD5ZbT5ZHT7ZLD5ZbD5ZHb45XDXbPdI7fHFz1nmtsjt8cjjydRLq9XLrdPbq9XHl+S3B6fPN7oxe1NkC8hWV5f3avO7FCo3lVrvec9X+f329Tqy+bKytLA2x8zkCyKYq1xcI46AMC+oPgDAABAnc5+8A0tHH6oDvk6qPaP/0fvHzxLA04eYzoWgFZi1qxZOvvsszVs2DBJ0qRJk/TWW2/p+eef1/jqHya3BK609HqLg/5PPGcgWdSeFBuHznrWQLKoesuN7Gwd8fC/DCSL2pN8R90310CyqHrzZWZq0N92f6jZplTvqrBwUL+75C4DyaKsYFXMHg4ylrPVoFgDAMAMij8AAADUyeV263ePv673hx2rAzZG5Jy7UIGjT4+ey8O/Xa60dFnBKs7lAaDRBYNBrVy5UhdddFHtNqfTqQEDBmjZsmV79VqWZTV2vL1i78EP5yPO+s481nTI1zDkaxhnQtJuV4VF3G6j/w3Hcr5YzrYjZ0KSIpKc6W2i1wlJMZELjafmz5M/VzQ3xh5MMjH+9ua9KP4AAACwS2kZuepy32Mqvf0u9Zo+S8VPzlXJvHkxebgmAPGjpKRElmX96pCeWVlZWrNmzV691vLlyxsz2l5r166d8nbzw/lNhYXavPk78pGvVearyZj7y1VhgarqbJuNZov1fLGcDa2P6X9v0Xox9mBSrI4/ij8AAADsVudeA1Q57WEVz5mrooceqt1u+/21Kwgyxo5m5R+AmNSrVy+5XC6jGSLSrw93F6hSxO1Wu3bt1K5dO/KRr9Xmq7HTqrDEJLVLTIqZbFJs5wtblooCQWWlROSKsWyIf5Zlafny5THx7y1aF8YeTDIx/mrec09Q/AEAAKBevoxsrZ03r87HSubMVfaECc2cCEA8y8jIkMvlUlFR0U7bi4qKlJ2dvVev5XK5YuKHQa4YP48U+RrGlZwqy7JUGAgqN90pT0ps/TJMrH9/aLiNGzcqNzc3Jv6+Q+sUK//eovVh7MGkWB1/5g7kDgAAgBbD8m+X7ffX+Zjt98sqLNT/Hvir3l80S1Y43MzpAMQbr9erHj16aOnSpbXbbNvW0qVL1a9fP4PJgN3buHGj6QgAAABo5VjxBwAAgHq50tLlTEurs/xzpqXJlZ6unHlvySpZoA9v/rs27e+TfUi+eg+bqPx+xzZ7XgAt35gxY3TdddepZ8+e6t27t5588klVVlZq6NChpqMBAAAAQMyi+AMAAEC9rGCVMkaNrD2n344yRo5Q+doftCp1u/bzSxllUsZXAemrL2U/N1FvZksFB6TI1+9wDfjT1crt0NnAJwDQ0gwZMkTFxcWaNm2aCgoK1L17dz322GN7fahPAAAAAGhNKP4AAABQL09yqrLHj5cUPaef7ffLmZamjFEjlT1+vJw+n05+baVKCjbovaenquKT95X543a13yrlFUp5hWXSp//Vlsf/q0/zHNp+YJYyB56kQcOvUGJymuFPByBWjRw5UiNHjjQdAwAAAABaDIo/AAAA7BGnz6eMsaOVPWGCrFK/XKlpsoJVcvp8tftk5HTQqZffU3t/zVcf6bPnHpCWL1e7n6qU5Zc6bYhIGwql957SV/98Suv3c6uyy37qNPgcHf67EXK5+V9UAAAAAACAfcFPVQAAALDHPMmpkiRnZlb02uPZ7f4HHXKEDrppdu39T96Yr9WL58r37U/ab31YyVVS19VhafWP0qt36oPUO6vPD9hNfYZdrPy+v22yzwIAAAAAABBvKP4AAADQbA474RwddsI5kqRAZYXee+EBFbyzWKlrCtRxo63MUilzZUBa+YXsZy/SGzlS4f6pSuh/pI4+9ypl5x1Y73u0b9++qT/GPgmVl8rlTZDl3y5XWrqsYFVtkQoAAAAAANAYKP4AAABghC8xSSeMuFYaca0kqXjLOr339N2q/PQDZf3oV4cCqX2B1L6gVPp0iTbNXKKP2ju0/aBsZQ04Scf88Sr5EpNqX6+mWMv2euSwbYWqKmKmWLMDAZXMnKWSufPqPD9iLKCYBAAAAACg5aP4AwAAQEzIbNtRp/35vtr7q5e/r2XPT5dWfKW8tVXK9EsHro9I6wukd+Zp+T/naX1HtzJPOllHX3hzzBZrofJSlcycpaIHH6rdZvv9Kpr+oCQpY+xo4wVbaygmTT8fAAAAAIDmQPEHAACAmNS51wB17jVAkmSFw/r0zfla8+rTSvhurfZbF1ZyQMr/Pqz9rvydih95VEUP1VGsRSJKGjJYLz94lSKWJYdlSbYlWbZkVV/bthx2zXVEDtuWw4rIEYlU34/scF9y2hE5IpLTqr62JYcdkTMiOS1Fr+3oJSE9S/1eel0lc+fV+RlL5sxV9gUX6LMTjlZge7Fsp2Q7VHsdqb121N6POCTb6VCk9r5DkZr7LsdO9+Wsue2Uqh+T06mIyylV3z75modV+dobdReTkYiSzzhFS56dKpcvUZ6EZLkTk+VNTJYnMUW+pDQlpqQrKaWNktIylJyWJa8vSS53404zGlpMmn4+AAAAAADNheIPAAAAMc/lduuIk0bqiJNGSoqeH/DdZ+9V4KsvlT9ggDb+5a91Pq9k7jxlX3CB+i8tkVVS0pyRJUm+vEyFi4pk+/11Pm77/QoXF6tNcpYCG4p380qReu7vG1dGhpL3P1Abd1VMVn9/3Z9btsvvLyKpvPpSIMmWFHZLYdfPF2uni0OWK1pe2q7qi9sp2+VQxBUtJSNulyJut+R2RYvJ/yzZZTGZdMoftHhm3X/+kjTkgjtU8dIrTfN8xcaKTQAAAAAAalD8AQAAoMXxJSbpxPOiZU2oqHC3xZq1fbuKjuqszd9/EV399ouLnM7obZdLcjqrV8Q5JKdLcrvkcLokV/TicLvlcFVf3B45XG45PV45XR45PR453R65vQlyVF+nZbRVp9xcOdPS6szoTEuTOydHJZf+Uf7iLbKtsKxQQHYoKCscViQclG2FZYdDilhW9XU4egmHJdtSxLIUsS3JshSx7dpVjdHbtmRHJLuulY0R5XXpoU7bt+/++9u2Tf5D9lPpNyVyWZLLktzVF5cleapXOdZ+JknecPRSt8gvrnfNlZGh5I6d6i0m+765oc5i0pWRoaQbOmhDUz1/zlxlT5hQ7+cAAAAAAKC5xETxN2/ePM2cOVMFBQXq1q2bbrzxRvXu3XuX+7/yyiu67777tGHDBnXq1EnXXHONjjnmmGZMDAAAgFjhSkvfbbHmys7WUffMMZAsKlReqoxRI2tXiO0oY9RIWaGADj1phIFkUXYotPvvLydHh818ZrevUVnuV7m/SBX+ElVW+FVZtl2B8u0KVpQqWFmucFWFQlXlsqqqZAUrZQeqZIeCilRfFAopEg7JEbakcFiOsCVH2Fb7zofUX0xu367iwzqp8Lvtv3o8u2snhbdta9rnl/rlzMza7fcDAAAAAEBzMV78LV68WFOmTNGkSZPUp08fPfnkkxo3bpxeffVVZWX9egL92Wef6eqrr9ZVV12l4447Ti+99JIuueQSvfDCC+ratauBTwAAAACTrGDV7ou1YJWcHo+BZFGe5FRljx8vKbpCLNbOEdcY319icpoSk9OkvAMbPV+9xWR2tn5z/1Pmnp+atoefBAAAAACApuc0HWDWrFk6++yzNWzYMB188MGaNGmSEhIS9Pzzz9e5/+zZszVo0CBdcMEF6ty5s/785z/rkEMO0dy5c5s5OQAAAGJBTbGWdcnFcqZFSxhnWpqyLrlY2ePHx8T515w+nzLGjlaX/72ng99/T13+954yxo42XvpJsf/91RSTdakpJmP5+QAAAAAANCejK/6CwaBWrlypiy66qHab0+nUgAEDtGzZsjqf8/nnn2v06NE7bRs4cKCWLFnSlFEBAAAQw2qKtewJE2SV+uVKTYuuVIuBYq1GTYFWc1hIk6sQfymWv7+Grpg0/XwAAAAAAJqT0eKvpKRElmX96pCeWVlZWrNmTZ3PKSwsVHZ29q/2Lyws3KP3jEQikqKlo8vl2ofUwN6xLEsSYw5mMP5gCmMPRnh8CoTDKq6sUmZiklwen4LBoOlULYfHp3AkIqWkRq9j6ftzOJR2/ihlXHihrLJSuVJSZQWqFHY4pD3J2EzPN/F3X8171sxz8LOa76TmOwKaWs1YY8yhuTH2YBLjD6Yw9mCSifG3N3M/4+f4a262bUuSvvrqK8NJ0Now5mAS4w+mMPZgyubNm01HQCtm4u++mnkOflbznSxfvtxwErQ2jDmYwtiDSYw/mMLYg0kmxt+ezP2MFn8ZGRlyuVwqKiraaXtRUdGvVvXVyM7O/tXqvt3t/0tut1u9evWS0+mUw+HYt+AAAAAAEAMikYhs25bb3ep+p7NezP0AAAAAxIu9mfsZnR16vV716NFDS5cu1Yknnigp2lYuXbpUI0eOrPM5ffv21QcffLDTef7ef/999e3bd4/e0+l0yuv1NjQ6AAAAACCGMfcDAAAA0Bo5TQcYM2aMnnnmGS1YsECrV6/WLbfcosrKSg0dOlSSdO2112rq1Km1+5933nl699139fjjj2v16tW6//77tWLFil0WhQAAAAAAAAAAAEBrYPx4MEOGDFFxcbGmTZumgoICde/eXY899ljtoTs3bdokp/PnfrJ///66++67de+99+qee+5Rp06dNH36dHXt2tXURwAAAAAAAAAAAACMc0QikYjpEAAAAAAAAAAAAAAaxvihPgEAAAAAAAAAAAA0HMUfAAAAAAAAAAAAEAco/gAAAAAAAAAAAIA4QPEHAAAAAAAAAAAAxAGKP6ARzJs3T8cff7x69eql4cOH68svv9zlvs8884zOPfdcHX744Tr88MM1evTo3e4P1Gdvxt+OFi1apPz8fF188cVNnBDxam/Hnt/v16RJkzRw4ED17NlTgwcP1ttvv91MaRFP9nbsPfHEExo8eLB69+6tY445RnfccYcCgUAzpUW8+PjjjzVhwgQNHDhQ+fn5WrJkSb3P+fDDD3XmmWeqZ8+e+t3vfqcXXnihGZICaErM/WAK8z6YxNwPpjD3gwnxMPej+AMaaPHixZoyZYouueQSLViwQN26ddO4ceNUVFRU5/4ffvihTj75ZM2ePVvz589XXl6exo4dqy1btjRzcsSDvR1/NdavX6+77rpLhx12WDMlRbzZ27EXDAY1ZswYbdiwQffdd59effVVTZ48WW3btm3m5Gjp9nbsvfTSS5o6daouvfRSLV68WLfffrsWL16se+65p5mTo6WrqKhQfn6+br755j3af926dbrooot05JFH6sUXX9T555+vv/3tb3r33XebOCmApsLcD6Yw74NJzP1gCnM/mBIPcz9HJBKJGHt3IA4MHz5cvXr10k033SRJsm1bxxxzjEaNGqXx48fX+3zLsnT44Yfrpptu0hlnnNHEaRFv9mX8WZalESNGaNiwYfr000/l9/v14IMPNmdsxIG9HXtPP/20Zs6cqVdeeUUej6e54yKO7O3Yu/XWW7V69Wo9+eSTtdvuvPNOffHFF3r66aebLTfiS35+vqZPn64TTzxxl/v84x//0Ntvv62XX365dtuVV14pv9+vmTNnNkdMAI2MuR9MYd4Hk5j7wRTmfogFLXXux4o/oAGCwaBWrlypAQMG1G5zOp0aMGCAli1btkevUVlZqXA4rPT09KaKiTi1r+Nv+vTpysrK0vDhw5sjJuLQvoy9N998U3379tWtt96qAQMG6JRTTtHDDz8sy7KaKzbiwL6MvX79+mnlypW1h4RZt26d3n77bR1zzDHNkhmt1+eff66jjjpqp20DBw7U559/biYQgAZh7gdTmPfBJOZ+MIW5H1qSWJz7uY29MxAHSkpKZFmWsrKydtqelZWlNWvW7NFr3H333crNzd3pHzJgT+zL+Pvkk0/03HPPaeHChc2QEPFqX8beunXr9MEHH+jUU0/VI488orVr12rSpEkKh8O69NJLmyM24sC+jL1TTz1VJSUlOvfccxWJRBQOh3XOOedowoQJzREZrVhhYaGys7N32padna2ysjJVVVUpISHBUDIA+4K5H0xh3geTmPvBFOZ+aElice7Hij/AoEceeUSLFy/WAw88IJ/PZzoO4lxZWZmuvfZaTZ48WZmZmabjoJWJRCLKysrS5MmT1bNnTw0ZMkQTJkzQ/PnzTUdDnPvwww81Y8YM3XzzzXrhhRf0wAMP6O2339b06dNNRwMAtCLM/dBcmPfBNOZ+MIW5H/AzVvwBDZCRkSGXy/Wrk8oWFRX9quX/pZkzZ+qRRx7RrFmz1K1bt6aMiTi1t+Nv3bp12rBhgyZOnFi7zbZtSdIhhxyiV199Vfvvv3/ThkZc2Je/+3JycuR2u+VyuWq3HXTQQSooKFAwGJTX623SzIgP+zL27rvvPp122mm1h7nKz89XRUWFbrrpJk2cOFFOJ78Hh6aRnZ2twsLCnbYVFhYqJSWF1X5AC8TcD6Yw74NJzP1gCnM/tCSxOPdjtAMN4PV61aNHDy1durR2m23bWrp0qfr167fL5z366KN68MEH9dhjj6lXr17NERVxaG/H30EHHaSXXnpJCxcurL0cf/zxOvLII7Vw4UK1a9euOeOjBduXv/v69++vtWvX1v7QQZJ+/PFH5eTkMPHDHtuXsVdVVfWrCV7NDyEikUjThUWr17dvX33wwQc7bXv//ffVt29fM4EANAhzP5jCvA8mMfeDKcz90JLE4tyPFX9AA40ZM0bXXXedevbsqd69e+vJJ59UZWWlhg4dKkm69tpr1bZtW1199dWSood4mTZtmqZOnaoOHTqooKBAkpSUlKTk5GRjnwMt096MP5/Pp65du+70/LS0NEn61XagPnv7d9+f/vQnzZ07V7fffrtGjhypn376STNmzNCoUaNMfgy0QHs79o477jjNmjVLhxxyiHr37q21a9fqvvvu03HHHbfTbyED9SkvL9fatWtr769fv15ff/210tPT1b59e02dOlVbtmzR3//+d0nSOeeco3nz5unvf/+7hg0bpg8++ECvvPKKZsyYYeojAGgg5n4whXkfTGLuB1OY+8GUeJj7UfwBDTRkyBAVFxdr2rRpKigoUPfu3fXYY4/VLjvftGnTTr9tMn/+fIVCIV1++eU7vc6ll16qyy67rFmzo+Xb2/EHNJa9HXt5eXmaOXOmpkyZotNOO01t27bVeeedpwsvvNDUR0ALtbdjb+LEiXI4HLr33nu1ZcsWZWZm6rjjjtOVV15p6iOghVqxYoXOO++82vtTpkyRJJ155pm68847VVBQoE2bNtU+3rFjR82YMUNTpkzR7Nmz1a5dO912220aNGhQs2cH0DiY+8EU5n0wibkfTGHuB1PiYe7niLDOFQAAAAAAAAAAAGjx+HUgAAAAAAAAAAAAIA5Q/AEAAAAAAAAAAABxgOIPAAAAAAAAAAAAiAMUfwAAAAAAAAAAAEAcoPgDAAAAAAAAAAAA4gDFHwAAAAAAAAAAABAHKP4AAAAAAAAAAACAOEDxBwAAAAAAAAAAAMQBij8AAOrxwgsv6LDDDjMdo8Hy8/O1ZMkS0zEAAAAAIOYw7wMAxAtHJBKJmA4BAEB9rr/+ei1YsECS5PF4lJeXp9NPP10TJkyQ2+1u0veuqqpSeXm5srKymvR9XnjhBf3lL3+RJDkcDmVnZ+uwww7Ttddeq/bt2+/x69x///1asmSJXnzxxZ22FxQUKD09XV6vt1FzAwAAAEBjYN7HvA8A0HCs+AMAtBiDBg3Se++9p//85z8aM2aMHnjgAc2cObPOfYPBYKO9b0JCQpNP/mqkpKTovffe0zvvvKNp06bphx9+0BVXXNEor52Tk8PkDwAAAEBMY97XMMz7AAAUfwCAFsPr9SonJ0cdOnTQueeeqwEDBujNN9+UFP3N0IsvvlgPPfSQBg4cqN///veS6j7MyWGHHaYXXnhBkrR+/Xrl5+frtdde06hRo9SnTx+ddtppWrZsWe3+vzzky/3336/TTz9dCxcu1PHHH69DDz1UV155pcrKymr3KSsr09VXX62+fftq4MCBeuKJJzRq1Cjdfvvtu/2MDodDOTk5ys3NVf/+/XXWWWfpyy+/3Om1//GPf2jw4MHq06ePTjjhBN17770KhUK1WR944AF98803ys/PV35+fu1n/eV38e233+q8885T7969deSRR+rGG29UeXn5nv+BAAAAAEAjY94XxbwPALCvKP4AAC2Wz+ernfhI0tKlS/XDDz9o1qxZmjFjxl691j//+U+NGzdOCxcuVKdOnXT11VcrHA7vcv+1a9fqjTfe0MMPP6wZM2bo448/1qOPPlr7+J133qlly5bpoYce0uOPP65PPvlEK1eu3KtMRUVFev311+VyueR0/vxPdnJysqZMmaJFixbphhtu0LPPPqsnnnhCkjRkyBCNHTtWXbp00Xvvvaf33ntPQ4YM+dVrV1RUaNy4cUpPT9dzzz2ne++9V++//74mT568VxkBAAAAoCkx72PeBwDYO017cGwAAJpAJBLR0qVL9d5772nkyJG125OSknTbbbft02FNxo4dq2OPPVaSdPnll+vkk0/WTz/9pM6dO+8yw5QpU5SSkiJJOu2007R06dLa3wBduHCh7r77bh111FGSpClTpmjQoEH15igtLVW/fv0UiURUWVkpSRo1apSSkpJq97n44otrb++333764YcftGjRIl144YVKSEhQUlKSXC6XcnJydvk+L7/8soLBoO66667a177ppps0YcIEXXPNNcrOzq43KwAAAAA0FeZ9zPsAAPuG4g8A0GK89dZb6tevn0KhkCKRiE455RRddtlltY937dp1n89lkJ+fX3u7ZuJUXFy8ywlghw4daid/kpSbm6uioiJJ0cPIhEIh9e7du/bx1NRUHXjggfXmSE5O1oIFCxQOh/XOO+/opZde0pVXXrnTPosXL9bs2bO1bt06VVRUKBwO75RlT6xevVr5+fk7TSz79+8v27b1ww8/MAEEAAAAYATzvijmfQCAfUXxBwBoMY488kjdcsst8ng8ys3Nldu98z9jiYmJv3qOw+FQJBLZaVtdh3LxeDw7PUeSbNveZZZfvrekX73PvnA6nTrggAMkSZ07d9batWt1yy236B//+IckadmyZbrmmmt02WWXaeDAgUpNTdWiRYs0a9asBr83AAAAAJjGvI95HwCgYTjHHwCgxUhMTNQBBxyg9u3b1zkBq0tmZqa2bt1ae//HH3+sPZRKU9lvv/3k8Xi0fPny2m2lpaX68ccf9/q1xo8fr1deeaX2PBHLli1T+/btNXHiRPXq1UudOnXSxo0bd3qOx+PZ7eRVik4uv/32W1VUVNRu++yzz+R0OvfoN1QBAAAAoCkw72PeBwBoGIo/AEBc+81vfqN58+bpq6++0vLly3XzzTfv9FueTSElJUVnnHGG/v73v+uDDz7QqlWrdMMNN8jhcNT+VumeysvL04knnqhp06ZJkg444ABt2rRJixYt0tq1azV79mwtWbJkp+d06NBB69ev19dff63i4mIFg8Ffve6pp54qr9er66+/Xt99950++OADTZ48WaeffjqHewEAAADQojDvY94HAPgZxR8AIK5dd911ysvL04gRI3TNNddo7NixSkhIaPL3vf7669W3b19NmDBBY8aMUf/+/dW5c2f5fL69fq3Ro0frrbfe0pdffqkTTjhB559/vm699VadfvrpWrZsmSZOnLjT/oMHD9agQYN03nnn6aijjtLLL7/8q9dMTEzUzJkztW3bNp111lm64oordNRRR+nGG2/c588MAAAAACYw72PeBwD4mSPSGAemBgAAu1VRUaHf/va3uu666zR8+HDTcQAAAAAAjYx5HwAgFuzZgbIBAMBe+eqrr7RmzRr17t1bpaWlmj59uiTphBNOMJwMAAAAANAYmPcBAGIRxR8AAE3k8ccf1w8//CCPx6MePXpo3rx5yszMNB0LAAAAANBImPcBAGINh/oEAAAAAAAAAAAA4oDTdAAAAAAAAAAAAAAADUfxBwAAAAAAAAAAAMQBij8AAAAAAAAAAAAgDlD8AQAAAAAAAAAAAHGA4g8AAAAAAAAAAACIAxR/AAAAAAAAAAAAQByg+AMAAAAAAAAAAADiAMUfAAAAAAAAAAAAEAco/gAAAAAAAAAAAIA48P/yZPLnjxAPwgAAAABJRU5ErkJggg==\n" - }, - "metadata": {} - } - ] - } - ] -} \ No newline at end of file From 837506d3eba2867ec583fca9f9e55efb4f735590 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 28 Aug 2025 11:04:19 +0530 Subject: [PATCH 023/115] Add LiteRT (TFLite) export support to Keras Introduces a custom LiteRTExporter for exporting models to TFLite format, bypassing the standard TFLiteConverter. Updates the export API and documentation to support the new 'lite_rt' format, and adds relevant options for custom ops, select TF ops, and optimizations. --- keras/src/export/__init__.py | 1 + keras/src/export/export_utils.py | 25 ++++++++++ keras/src/export/lite_rt_exporter.py | 72 ++++++++++++++++++++++++++++ keras/src/models/model.py | 35 ++++++++++++-- 4 files changed, 130 insertions(+), 3 deletions(-) create mode 100644 keras/src/export/lite_rt_exporter.py diff --git a/keras/src/export/__init__.py b/keras/src/export/__init__.py index 7adfd18513f6..1e5979264e10 100644 --- a/keras/src/export/__init__.py +++ b/keras/src/export/__init__.py @@ -3,3 +3,4 @@ from keras.src.export.saved_model import ExportArchive from keras.src.export.saved_model import export_saved_model from keras.src.export.tfsm_layer import TFSMLayer +from keras.src.export.lite_rt_exporter import LiteRTExporter diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 4b76f68fe4a6..008ab946f32e 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -105,3 +105,28 @@ def convert_spec_to_tensor(spec, replace_none_number=None): s if s is not None else replace_none_number for s in shape ) return ops.ones(shape, spec.dtype) + + +# Import exporters here to avoid circular imports +from keras.src.export.saved_model import export_saved_model +from keras.src.export.lite_rt_exporter import LiteRTExporter + +# Registry for export formats +EXPORT_FORMATS = { + "tf_saved_model": export_saved_model, + "lite_rt": LiteRTExporter, + # Add other formats as needed +} + + +def export_model(model, filepath, format="tf_saved_model", **kwargs): + """Export a model to the specified format.""" + exporter_cls = EXPORT_FORMATS.get(format) + if exporter_cls is None: + raise ValueError(f"Unknown export format: {format}") + if format == "tf_saved_model": + # Handle tf_saved_model differently if it's a function + exporter_cls(model, filepath, **kwargs) + else: + exporter = exporter_cls(model, **kwargs) + exporter.export(filepath) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py new file mode 100644 index 000000000000..5b4ea2e2cf20 --- /dev/null +++ b/keras/src/export/lite_rt_exporter.py @@ -0,0 +1,72 @@ +import tensorflow as tf +from keras.src.export.saved_model import ExportArchive # Adjusted import based on available modules +from keras.src.utils import io_utils + +class LiteRTExporter: + """Custom Keras exporter for LiteRT (TFLite) format, bypassing tf.lite.TFLiteConverter.""" + + def __init__(self, model, input_signature=None, verbose=None, **kwargs): + self.model = model + self.input_signature = input_signature + self.verbose = verbose or 0 + self.kwargs = kwargs # e.g., allow_custom_ops, enable_select_tf_ops, optimizations + + def export(self, filepath): + if self.verbose: + print("Starting custom LiteRT export...") + + # Step 1: Get input signature (use _get_save_spec if not provided) + if self.input_signature is None: + input_signature = self.model._get_save_spec(dynamic_batch=True) + if input_signature is None: + raise ValueError("Unable to infer input signature. Provide `input_signature`.") + else: + input_signature = self.input_signature + + # Step 2: Trace the model to create a concrete function + @tf.function(input_signature=[input_signature]) + def model_fn(*inputs): + return self.model(*inputs) + concrete_fn = model_fn.get_concrete_function() + + # Step 3: Convert to MLIR and apply TFLite passes (bypass high-level converter) + # Use TensorFlow's internal MLIR conversion (inspired by tf_tfl_translate.cc) + from tensorflow.compiler.mlir import tf2tfl # Internal module for MLIR conversion + mlir_module = tf2tfl.convert_function(concrete_fn, enable_select_tf_ops=self.kwargs.get("enable_select_tf_ops", False)) + + # Step 4: Export to FlatBuffer (inspired by ExportFlatbufferOrMlir and TfLiteExporter) + from tensorflow.lite.python import tflite_convert # Use internal conversion + converter_flags = tf.lite.experimental.ConverterFlags() + converter_flags.allow_custom_ops = self.kwargs.get("allow_custom_ops", False) + converter_flags.enable_select_tf_ops = self.kwargs.get("enable_select_tf_ops", False) + if "optimizations" in self.kwargs: + converter_flags.optimizations = self.kwargs["optimizations"] + + # Perform the conversion using MLIR-to-FlatBuffer (custom logic) + tflite_model = self._mlir_to_flatbuffer(mlir_module, converter_flags, concrete_fn) + + if self.verbose: + print(f"LiteRT model converted. Size: {len(tflite_model)} bytes") + + # Step 5: Save to file + with open(filepath, "wb") as f: + f.write(tflite_model) + + if self.verbose: + print(f"Exported to {filepath}") + + def _mlir_to_flatbuffer(self, mlir_module, converter_flags, concrete_fn): + """Custom MLIR-to-FlatBuffer conversion (inspired by attachments).""" + # Use the standard TFLite converter with our concrete function + try: + converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_fn]) + converter.allow_custom_ops = converter_flags.allow_custom_ops + converter.enable_select_tf_ops = converter_flags.enable_select_tf_ops + if hasattr(converter_flags, 'optimizations') and converter_flags.optimizations: + converter.optimizations = converter_flags.optimizations + result = converter.convert() + return result + except Exception as e: + if "custom op" in str(e).lower(): + raise ValueError(f"Custom ops detected. Enable allow_custom_ops=True. Details: {e}") + raise diff --git a/keras/src/models/model.py b/keras/src/models/model.py index f75fc2efba9c..a061e6c222d4 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -134,7 +134,6 @@ def call(self, inputs, training=False): keras.Input(shape=(None, None, 3)), keras.layers.Conv2D(filters=32, kernel_size=3), ]) - ``` """ def __new__(cls, *args, **kwargs): @@ -538,7 +537,7 @@ def export( filepath: `str` or `pathlib.Path` object. The path to save the artifact. format: `str`. The export format. Supported values: - `"tf_saved_model"` and `"onnx"`. Defaults to + `"tf_saved_model"`, `"onnx"`, `"openvino"`, and `"lite_rt"`. Defaults to `"tf_saved_model"`. verbose: `bool`. Whether to print a message during export. Defaults to `None`, which uses the default value set by different @@ -562,6 +561,12 @@ def export( provided, they will be automatically computed. - `opset_version`: Optional `int`. Specific to `format="onnx"`. An integer value that specifies the ONNX opset version. + - `allow_custom_ops`: Optional `bool`. Specific to `format="lite_rt"`. + Whether to allow custom operations during conversion. Defaults to `False`. + - `enable_select_tf_ops`: Optional `bool`. Specific to `format="lite_rt"`. + Whether to enable TensorFlow Select ops for unsupported operations. Defaults to `False`. + - `optimizations`: Optional `list`. Specific to `format="lite_rt"`. + List of optimizations to apply (e.g., `[tf.lite.Optimize.DEFAULT]`). **Note:** This feature is currently supported only with TensorFlow, JAX and Torch backends. @@ -596,12 +601,26 @@ def export( } predictions = ort_session.run(None, ort_inputs) ``` + + Here's how to export a LiteRT (TFLite) for inference. + + ```python + # Export the model as a LiteRT artifact + model.export("path/to/location", format="lite_rt") + + # Load the artifact in a different process/environment + interpreter = tf.lite.Interpreter(model_path="path/to/location") + interpreter.allocate_tensors() + interpreter.set_tensor(interpreter.get_input_details()[0]['index'], input_data) + interpreter.invoke() + output_data = interpreter.get_tensor(interpreter.get_output_details()[0]['index']) + ``` """ from keras.src.export import export_onnx from keras.src.export import export_openvino from keras.src.export import export_saved_model - available_formats = ("tf_saved_model", "onnx", "openvino") + available_formats = ("tf_saved_model", "onnx", "openvino", "lite_rt") if format not in available_formats: raise ValueError( f"Unrecognized format={format}. Supported formats are: " @@ -632,6 +651,16 @@ def export( input_signature=input_signature, **kwargs, ) + elif format == "lite_rt": + from keras.src.export.export_utils import export_model + export_model( + self, + filepath, + format="lite_rt", + verbose=verbose, + input_signature=input_signature, + **kwargs, + ) @classmethod def from_config(cls, config, custom_objects=None): From 631850e6bb39b72d0a814abe28078230d419d5dc Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 28 Aug 2025 13:29:34 +0530 Subject: [PATCH 024/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 5b4ea2e2cf20..c4586f39ab60 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -1,5 +1,6 @@ import tensorflow as tf from keras.src.export.saved_model import ExportArchive # Adjusted import based on available modules +from keras.src.export.export_utils import get_input_signature from keras.src.utils import io_utils class LiteRTExporter: @@ -15,11 +16,9 @@ def export(self, filepath): if self.verbose: print("Starting custom LiteRT export...") - # Step 1: Get input signature (use _get_save_spec if not provided) + # Step 1: Get input signature (use get_input_signature if not provided) if self.input_signature is None: - input_signature = self.model._get_save_spec(dynamic_batch=True) - if input_signature is None: - raise ValueError("Unable to infer input signature. Provide `input_signature`.") + input_signature = get_input_signature(self.model) else: input_signature = self.input_signature From f5aa72e8d27558bc5ebb8138416e609d32477d3b Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 28 Aug 2025 13:32:25 +0530 Subject: [PATCH 025/115] Update export_utils.py --- keras/src/export/export_utils.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 008ab946f32e..04e03ed17415 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -4,6 +4,8 @@ from keras.src import ops from keras.src import tree from keras.src.utils.module_utils import tensorflow as tf +# Import exporters here to avoid circular imports +from keras.src.export.saved_model import export_saved_model def get_input_signature(model): @@ -107,23 +109,33 @@ def convert_spec_to_tensor(spec, replace_none_number=None): return ops.ones(shape, spec.dtype) -# Import exporters here to avoid circular imports -from keras.src.export.saved_model import export_saved_model -from keras.src.export.lite_rt_exporter import LiteRTExporter - # Registry for export formats EXPORT_FORMATS = { - "tf_saved_model": export_saved_model, - "lite_rt": LiteRTExporter, + "tf_saved_model": export_saved_model, # Direct import since it's already imported + "lite_rt": "keras.src.export.lite_rt_exporter:LiteRTExporter", # Add other formats as needed } +def _get_exporter(format_name): + """Lazy import exporter to avoid circular imports.""" + if format_name not in EXPORT_FORMATS: + raise ValueError(f"Unknown export format: {format_name}") + + exporter = EXPORT_FORMATS[format_name] + if isinstance(exporter, str): + # Lazy import for string references + module_path, attr_name = exporter.split(":") + module = __import__(module_path, fromlist=[attr_name]) + return getattr(module, attr_name) + else: + # Direct reference + return exporter + + def export_model(model, filepath, format="tf_saved_model", **kwargs): """Export a model to the specified format.""" - exporter_cls = EXPORT_FORMATS.get(format) - if exporter_cls is None: - raise ValueError(f"Unknown export format: {format}") + exporter_cls = _get_exporter(format) if format == "tf_saved_model": # Handle tf_saved_model differently if it's a function exporter_cls(model, filepath, **kwargs) From 2b952d68511897c4a7d1dbef20c0b8562cd9ca7c Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 28 Aug 2025 13:36:35 +0530 Subject: [PATCH 026/115] Refactor LiteRTExporter to simplify TFLite conversion Replaces the custom MLIR-based TFLite conversion logic in LiteRTExporter with direct use of the standard TFLiteConverter. Also improves input signature handling for tf.function tracing and updates imports accordingly. --- keras/src/export/export_utils.py | 4 +- keras/src/export/lite_rt_exporter.py | 60 +++++++++++++++------------- 2 files changed, 34 insertions(+), 30 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 04e03ed17415..c8c3f9f7aff2 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -20,9 +20,7 @@ def get_input_signature(model): "before export." ) if isinstance(model, models.Functional): - input_signature = [ - tree.map_structure(make_input_spec, model._inputs_struct) - ] + input_signature = tree.map_structure(make_input_spec, model._inputs_struct) elif isinstance(model, models.Sequential): input_signature = tree.map_structure(make_input_spec, model.inputs) else: diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index c4586f39ab60..12423ae946d9 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -1,7 +1,6 @@ import tensorflow as tf -from keras.src.export.saved_model import ExportArchive # Adjusted import based on available modules -from keras.src.export.export_utils import get_input_signature -from keras.src.utils import io_utils +from keras.src.export.export_utils import get_input_signature, make_tf_tensor_spec +from keras.src import tree class LiteRTExporter: """Custom Keras exporter for LiteRT (TFLite) format, bypassing tf.lite.TFLiteConverter.""" @@ -22,47 +21,54 @@ def export(self, filepath): else: input_signature = self.input_signature + # Convert to TensorFlow TensorSpecs for tf.function + # Handle different input signature structures + if isinstance(input_signature, list): + # List of specs + tf_signature = [make_tf_tensor_spec(spec) for spec in input_signature] + elif hasattr(input_signature, 'shape'): + # Single spec + tf_signature = [make_tf_tensor_spec(input_signature)] + else: + # Try to flatten and convert + tf_signature = [] + def _convert_to_spec(spec): + tf_signature.append(make_tf_tensor_spec(spec)) + + try: + tree.map_structure(_convert_to_spec, input_signature) + except: + # Fallback: assume it's a single spec + tf_signature = [make_tf_tensor_spec(input_signature)] + # Step 2: Trace the model to create a concrete function - @tf.function(input_signature=[input_signature]) + @tf.function(input_signature=tf_signature) def model_fn(*inputs): return self.model(*inputs) concrete_fn = model_fn.get_concrete_function() - # Step 3: Convert to MLIR and apply TFLite passes (bypass high-level converter) - # Use TensorFlow's internal MLIR conversion (inspired by tf_tfl_translate.cc) - from tensorflow.compiler.mlir import tf2tfl # Internal module for MLIR conversion - mlir_module = tf2tfl.convert_function(concrete_fn, enable_select_tf_ops=self.kwargs.get("enable_select_tf_ops", False)) - - # Step 4: Export to FlatBuffer (inspired by ExportFlatbufferOrMlir and TfLiteExporter) - from tensorflow.lite.python import tflite_convert # Use internal conversion - converter_flags = tf.lite.experimental.ConverterFlags() - converter_flags.allow_custom_ops = self.kwargs.get("allow_custom_ops", False) - converter_flags.enable_select_tf_ops = self.kwargs.get("enable_select_tf_ops", False) - if "optimizations" in self.kwargs: - converter_flags.optimizations = self.kwargs["optimizations"] - - # Perform the conversion using MLIR-to-FlatBuffer (custom logic) - tflite_model = self._mlir_to_flatbuffer(mlir_module, converter_flags, concrete_fn) + # Step 3: Convert using TFLite converter directly (simplified approach) + # Skip the complex MLIR conversion and use the standard converter + tflite_model = self._convert_to_tflite(concrete_fn) if self.verbose: print(f"LiteRT model converted. Size: {len(tflite_model)} bytes") - # Step 5: Save to file + # Step 4: Save to file with open(filepath, "wb") as f: f.write(tflite_model) if self.verbose: print(f"Exported to {filepath}") - def _mlir_to_flatbuffer(self, mlir_module, converter_flags, concrete_fn): - """Custom MLIR-to-FlatBuffer conversion (inspired by attachments).""" - # Use the standard TFLite converter with our concrete function + def _convert_to_tflite(self, concrete_fn): + """Convert concrete function to TFLite using standard converter.""" try: converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_fn]) - converter.allow_custom_ops = converter_flags.allow_custom_ops - converter.enable_select_tf_ops = converter_flags.enable_select_tf_ops - if hasattr(converter_flags, 'optimizations') and converter_flags.optimizations: - converter.optimizations = converter_flags.optimizations + converter.allow_custom_ops = self.kwargs.get("allow_custom_ops", False) + converter.enable_select_tf_ops = self.kwargs.get("enable_select_tf_ops", False) + if "optimizations" in self.kwargs: + converter.optimizations = self.kwargs["optimizations"] result = converter.convert() return result except Exception as e: From 8f81dd58ea25aa678dd1e3eac1afb173f43fbbb8 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 28 Aug 2025 13:40:57 +0530 Subject: [PATCH 027/115] Refactor import structure to avoid circular dependencies Moved imports of get_input_signature and make_tf_tensor_spec inside functions in saved_model.py to prevent circular imports. Updated EXPORT_FORMATS in export_utils.py to use string references instead of direct imports. --- keras/src/export/export_utils.py | 4 +--- keras/src/export/saved_model.py | 5 +++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index c8c3f9f7aff2..200430ddf8a1 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -4,8 +4,6 @@ from keras.src import ops from keras.src import tree from keras.src.utils.module_utils import tensorflow as tf -# Import exporters here to avoid circular imports -from keras.src.export.saved_model import export_saved_model def get_input_signature(model): @@ -109,7 +107,7 @@ def convert_spec_to_tensor(spec, replace_none_number=None): # Registry for export formats EXPORT_FORMATS = { - "tf_saved_model": export_saved_model, # Direct import since it's already imported + "tf_saved_model": "keras.src.export.saved_model:export_saved_model", "lite_rt": "keras.src.export.lite_rt_exporter:LiteRTExporter", # Add other formats as needed } diff --git a/keras/src/export/saved_model.py b/keras/src/export/saved_model.py index d5009a7ec4a6..32e42d846729 100644 --- a/keras/src/export/saved_model.py +++ b/keras/src/export/saved_model.py @@ -4,8 +4,6 @@ from keras.src import layers from keras.src import tree from keras.src.api_export import keras_export -from keras.src.export.export_utils import get_input_signature -from keras.src.export.export_utils import make_tf_tensor_spec from keras.src.utils import io_utils from keras.src.utils.module_utils import tensorflow as tf @@ -358,6 +356,7 @@ def serving_fn(x): self._endpoint_names.append(name) return decorated_fn + from keras.src.export.export_utils import make_tf_tensor_spec input_signature = tree.map_structure( make_tf_tensor_spec, input_signature ) @@ -415,6 +414,7 @@ def track_and_add_endpoint(self, name, resource, input_signature, **kwargs): f"the jax backend. Current backend: {backend.backend()}" ) + from keras.src.export.export_utils import make_tf_tensor_spec input_signature = tree.map_structure( make_tf_tensor_spec, input_signature ) @@ -648,6 +648,7 @@ def export_saved_model( verbose = True # Defaults to `True` for all backends. export_archive = ExportArchive() if input_signature is None: + from keras.src.export.export_utils import get_input_signature input_signature = get_input_signature(model) export_archive.track_and_add_endpoint( From 011f1d84e76ea50429a04930293773b3c9f853a6 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 28 Aug 2025 14:00:19 +0530 Subject: [PATCH 028/115] trying kerashub --- keras/src/export/export_utils.py | 33 +++++++++++++++++---- keras/src/export/lite_rt_exporter.py | 44 ++++++++++++++++++++++++---- 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 200430ddf8a1..d7e3d31ce7c7 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -22,12 +22,18 @@ def get_input_signature(model): elif isinstance(model, models.Sequential): input_signature = tree.map_structure(make_input_spec, model.inputs) else: + # For subclassed models, try multiple approaches input_signature = _infer_input_signature_from_model(model) - if not input_signature or not model._called: - raise ValueError( - "The model provided has never called. " - "It must be called at least once before export." - ) + if not input_signature: + # Fallback: Try to get from model.inputs if available + if hasattr(model, 'inputs') and model.inputs: + input_signature = tree.map_structure(make_input_spec, model.inputs) + elif not model._called: + raise ValueError( + "The model provided has never been called and has no " + "detectable input structure. It must be called at least once " + "before export, or you must provide explicit input_signature." + ) return input_signature @@ -58,7 +64,22 @@ def _make_input_spec(structure): f"Unsupported type {type(structure)} for {structure}" ) - return [_make_input_spec(value) for value in shapes_dict.values()] + # Try to reconstruct the input structure from build shapes + if len(shapes_dict) == 1: + # Single input case + return _make_input_spec(list(shapes_dict.values())[0]) + else: + # Multiple inputs - try to determine if it's a dict or list structure + # For Keras-Hub models like Gemma3, inputs are typically dictionaries + input_keys = list(shapes_dict.keys()) + + # Common patterns for multi-input models + if any(key in ['token_ids', 'padding_mask', 'input_ids', 'attention_mask'] for key in input_keys): + # Dictionary input structure (common for transformers) + return {key: _make_input_spec(shape) for key, shape in shapes_dict.items()} + else: + # List input structure + return [_make_input_spec(shape) for shape in shapes_dict.values()] def make_input_spec(x): diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 12423ae946d9..04e3c28ec286 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -23,28 +23,58 @@ def export(self, filepath): # Convert to TensorFlow TensorSpecs for tf.function # Handle different input signature structures - if isinstance(input_signature, list): + tf_signature = [] + + if isinstance(input_signature, dict): + # Dictionary input (e.g., Keras-Hub models like Gemma3) + # Convert dict to ordered list of specs for tf.function + tf_signature = [make_tf_tensor_spec(spec) for spec in input_signature.values()] + input_keys = list(input_signature.keys()) + + # Create a wrapper function that handles dict inputs + @tf.function(input_signature=tf_signature) + def model_fn(*inputs): + # Reconstruct dictionary from positional args + input_dict = {key: tensor for key, tensor in zip(input_keys, inputs)} + return self.model(input_dict) + + elif isinstance(input_signature, list): # List of specs tf_signature = [make_tf_tensor_spec(spec) for spec in input_signature] + + @tf.function(input_signature=tf_signature) + def model_fn(*inputs): + return self.model(*inputs) + elif hasattr(input_signature, 'shape'): # Single spec tf_signature = [make_tf_tensor_spec(input_signature)] + + @tf.function(input_signature=tf_signature) + def model_fn(*inputs): + return self.model(*inputs) + else: # Try to flatten and convert - tf_signature = [] def _convert_to_spec(spec): tf_signature.append(make_tf_tensor_spec(spec)) try: tree.map_structure(_convert_to_spec, input_signature) + + @tf.function(input_signature=tf_signature) + def model_fn(*inputs): + return self.model(*inputs) + except: # Fallback: assume it's a single spec tf_signature = [make_tf_tensor_spec(input_signature)] + + @tf.function(input_signature=tf_signature) + def model_fn(*inputs): + return self.model(*inputs) # Step 2: Trace the model to create a concrete function - @tf.function(input_signature=tf_signature) - def model_fn(*inputs): - return self.model(*inputs) concrete_fn = model_fn.get_concrete_function() # Step 3: Convert using TFLite converter directly (simplified approach) @@ -55,6 +85,10 @@ def model_fn(*inputs): print(f"LiteRT model converted. Size: {len(tflite_model)} bytes") # Step 4: Save to file + # Ensure the filepath has the correct .tflite extension + if not filepath.endswith('.tflite'): + filepath = filepath + '.tflite' + with open(filepath, "wb") as f: f.write(tflite_model) From 9a99a328c51b650e204d5f03d1a49eec39888df1 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 28 Aug 2025 14:21:55 +0530 Subject: [PATCH 029/115] Enhance LiteRT export for sequence models and large models Adds max_sequence_length parameter to input signature generation for sequence models, bounding sequence length for transformer-like architectures. Improves LiteRTExporter with heuristics for complex models, fallback conversion via SavedModel for large models, and exposes max_sequence_length in Model export options. Updates documentation accordingly. --- keras/src/export/export_utils.py | 82 ++++++++++++++++++++++-- keras/src/export/lite_rt_exporter.py | 96 +++++++++++++++++++++++++++- keras/src/models/model.py | 5 +- 3 files changed, 174 insertions(+), 9 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index d7e3d31ce7c7..92e6bbb7de82 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -6,7 +6,20 @@ from keras.src.utils.module_utils import tensorflow as tf -def get_input_signature(model): +def get_input_signature(model, max_sequence_length=512): + """Get input signature for model export. + + Args: + model: A Keras Model instance. + max_sequence_length: Maximum sequence length for sequence models (transformers). + Only applied when the model is detected as a sequence model based on input + names (e.g., 'token_ids', 'input_ids') or shape patterns. For non-sequence + models (e.g., image models), this parameter is ignored and dimensions remain + unbounded. Defaults to 512. + + Returns: + Input signature suitable for model export. + """ if not isinstance(model, models.Model): raise TypeError( "The model must be a `keras.Model`. " @@ -23,7 +36,7 @@ def get_input_signature(model): input_signature = tree.map_structure(make_input_spec, model.inputs) else: # For subclassed models, try multiple approaches - input_signature = _infer_input_signature_from_model(model) + input_signature = _infer_input_signature_from_model(model, max_sequence_length) if not input_signature: # Fallback: Try to get from model.inputs if available if hasattr(model, 'inputs') and model.inputs: @@ -37,11 +50,33 @@ def get_input_signature(model): return input_signature -def _infer_input_signature_from_model(model): +def _infer_input_signature_from_model(model, max_sequence_length=512): shapes_dict = getattr(model, "_build_shapes_dict", None) if not shapes_dict: return None + def _is_sequence_model(): + """Detect if this is a sequence model based on input names and shapes.""" + if not shapes_dict: + return False + + # Check input names for sequence model indicators + input_names = list(shapes_dict.keys()) + sequence_indicators = ['token_ids', 'input_ids', 'tokens', 'input_tokens', + 'padding_mask', 'attention_mask', 'segment_ids'] + + if any(indicator in name.lower() for name in input_names for indicator in sequence_indicators): + return True + + # Check if any input has shape with 2+ dimensions where second dim is None + # This is typical for sequence models: (batch_size, seq_len, ...) + for shape in shapes_dict.values(): + if isinstance(shape, (tuple, list)) and len(shape) >= 2: + if shape[0] is None and shape[1] is None: # (None, None, ...) + return True + + return False + def _make_input_spec(structure): # We need to turn wrapper structures like TrackingDict or _DictWrapper # into plain Python structures because they don't work with jax2tf/JAX. @@ -49,14 +84,51 @@ def _make_input_spec(structure): return {k: _make_input_spec(v) for k, v in structure.items()} elif isinstance(structure, tuple): if all(isinstance(d, (int, type(None))) for d in structure): + # Handle shape bounding based on model type + is_sequence_model = _is_sequence_model() + bounded_shape = [] + + for i, dim in enumerate(structure): + if dim is None: + if i == 0: + # Always keep batch dimension as None + bounded_shape.append(None) + elif is_sequence_model and i == 1: + # For sequence models, bound the sequence length dimension + bounded_shape.append(max_sequence_length) + else: + # For non-sequence models or non-sequence dimensions, keep unbounded + # This prevents breaking image models, etc. + bounded_shape.append(None) + else: + bounded_shape.append(dim) + return layers.InputSpec( - shape=(None,) + structure[1:], dtype=model.input_dtype + shape=tuple(bounded_shape), dtype=model.input_dtype ) return tuple(_make_input_spec(v) for v in structure) elif isinstance(structure, list): if all(isinstance(d, (int, type(None))) for d in structure): + # Handle shape bounding based on model type + is_sequence_model = _is_sequence_model() + bounded_shape = [] + + for i, dim in enumerate(structure): + if dim is None: + if i == 0: + # Always keep batch dimension as None + bounded_shape.append(None) + elif is_sequence_model and i == 1: + # For sequence models, bound the sequence length dimension + bounded_shape.append(max_sequence_length) + else: + # For non-sequence models or non-sequence dimensions, keep unbounded + bounded_shape.append(None) + else: + bounded_shape.append(dim) + return layers.InputSpec( - shape=[None] + structure[1:], dtype=model.input_dtype + shape=bounded_shape, dtype=model.input_dtype ) return [_make_input_spec(v) for v in structure] else: diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 04e3c28ec286..86ec6e7ee336 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -1,23 +1,34 @@ import tensorflow as tf from keras.src.export.export_utils import get_input_signature, make_tf_tensor_spec from keras.src import tree +import tempfile +import os class LiteRTExporter: """Custom Keras exporter for LiteRT (TFLite) format, bypassing tf.lite.TFLiteConverter.""" - def __init__(self, model, input_signature=None, verbose=None, **kwargs): + def __init__(self, model, input_signature=None, verbose=None, max_sequence_length=512, **kwargs): self.model = model self.input_signature = input_signature self.verbose = verbose or 0 + self.max_sequence_length = max_sequence_length self.kwargs = kwargs # e.g., allow_custom_ops, enable_select_tf_ops, optimizations def export(self, filepath): if self.verbose: print("Starting custom LiteRT export...") + # Pre-flight check for potentially problematic models + if self._is_complex_model(): + if self.verbose: + print("⚠️ Detected complex model. Using enhanced conversion path...") + # For complex models, enable more permissive settings by default + self.kwargs.setdefault("allow_custom_ops", True) + self.kwargs.setdefault("enable_select_tf_ops", True) + # Step 1: Get input signature (use get_input_signature if not provided) if self.input_signature is None: - input_signature = get_input_signature(self.model) + input_signature = get_input_signature(self.model, self.max_sequence_length) else: input_signature = self.input_signature @@ -98,14 +109,93 @@ def model_fn(*inputs): def _convert_to_tflite(self, concrete_fn): """Convert concrete function to TFLite using standard converter.""" try: - converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_fn]) + # First try with trackable_obj to avoid deprecated path + if hasattr(self.model, '_get_save_spec'): + converter = tf.lite.TFLiteConverter.from_concrete_functions( + [concrete_fn], trackable_obj=self.model + ) + else: + converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_fn]) + + # Apply conversion settings converter.allow_custom_ops = self.kwargs.get("allow_custom_ops", False) converter.enable_select_tf_ops = self.kwargs.get("enable_select_tf_ops", False) + + # For large models like Keras-Hub, we might need to be more conservative + # Try without optimizations first for complex models if "optimizations" in self.kwargs: converter.optimizations = self.kwargs["optimizations"] + + # Try conversion result = converter.convert() return result + + except RuntimeError as e: + if "size too big" in str(e): + # Handle the overflow issue by trying alternative approaches + return self._convert_with_saved_model_fallback() + else: + raise e except Exception as e: if "custom op" in str(e).lower(): raise ValueError(f"Custom ops detected. Enable allow_custom_ops=True. Details: {e}") raise + + def _convert_with_saved_model_fallback(self): + """Fallback conversion using SavedModel path to avoid size overflow.""" + import tempfile + import os + + try: + # Create a temporary SavedModel + with tempfile.TemporaryDirectory() as temp_dir: + saved_model_path = os.path.join(temp_dir, "temp_saved_model") + + # Export to SavedModel first + from keras.src.export.export_utils import get_input_signature, make_tf_tensor_spec + from keras.src.export.saved_model import export_saved_model + + input_signature = get_input_signature(self.model) + export_saved_model(self.model, saved_model_path, input_signature=input_signature) + + # Convert SavedModel to TFLite + converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) + converter.allow_custom_ops = self.kwargs.get("allow_custom_ops", True) # More permissive + converter.enable_select_tf_ops = self.kwargs.get("enable_select_tf_ops", True) # More permissive + + # Skip optimizations for problematic models + if "optimizations" in self.kwargs and not self._is_complex_model(): + converter.optimizations = self.kwargs["optimizations"] + + result = converter.convert() + return result + + except Exception as e: + raise ValueError(f"Both direct and SavedModel fallback conversion failed. " + f"This model may be too complex for TFLite conversion. Details: {e}") + + def _is_complex_model(self): + """Check if this is a complex model that might have conversion issues.""" + # Heuristics to detect complex models like Keras-Hub transformers + model_name = getattr(self.model, 'name', '').lower() + model_class = self.model.__class__.__name__.lower() + + # Check for transformer/language model indicators + complex_indicators = [ + 'transformer', 'attention', 'bert', 'gpt', 'gemma', 'llama', + 'backbone', 'causal_lm', 'decoder', 'encoder' + ] + + for indicator in complex_indicators: + if indicator in model_name or indicator in model_class: + return True + + # Check for large parameter count + try: + param_count = self.model.count_params() + if param_count > 100_000_000: # 100M parameters + return True + except: + pass + + return False diff --git a/keras/src/models/model.py b/keras/src/models/model.py index a061e6c222d4..33df0897b314 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -562,11 +562,14 @@ def export( - `opset_version`: Optional `int`. Specific to `format="onnx"`. An integer value that specifies the ONNX opset version. - `allow_custom_ops`: Optional `bool`. Specific to `format="lite_rt"`. - Whether to allow custom operations during conversion. Defaults to `False`. + Whether to allow custom operations during conversion. Defaults to `False`.a - `enable_select_tf_ops`: Optional `bool`. Specific to `format="lite_rt"`. Whether to enable TensorFlow Select ops for unsupported operations. Defaults to `False`. - `optimizations`: Optional `list`. Specific to `format="lite_rt"`. List of optimizations to apply (e.g., `[tf.lite.Optimize.DEFAULT]`). + - `max_sequence_length`: Optional `int`. Specific to `format="lite_rt"`. + Maximum sequence length for transformer models to avoid unbounded shapes. + Defaults to `512`. **Note:** This feature is currently supported only with TensorFlow, JAX and Torch backends. From d0070c61ac7b68ca6bf64c3474b812f2ab971d7e Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 28 Aug 2025 14:35:38 +0530 Subject: [PATCH 030/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 242 +++++++++++---------------- 1 file changed, 101 insertions(+), 141 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 86ec6e7ee336..15c87755886d 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -5,197 +5,157 @@ import os class LiteRTExporter: - """Custom Keras exporter for LiteRT (TFLite) format, bypassing tf.lite.TFLiteConverter.""" + """ + Exporter for the LiteRT (TFLite) format. + """ def __init__(self, model, input_signature=None, verbose=None, max_sequence_length=512, **kwargs): self.model = model self.input_signature = input_signature self.verbose = verbose or 0 self.max_sequence_length = max_sequence_length - self.kwargs = kwargs # e.g., allow_custom_ops, enable_select_tf_ops, optimizations - + self.kwargs = kwargs + def export(self, filepath): if self.verbose: - print("Starting custom LiteRT export...") - - # Pre-flight check for potentially problematic models - if self._is_complex_model(): - if self.verbose: - print("⚠️ Detected complex model. Using enhanced conversion path...") - # For complex models, enable more permissive settings by default - self.kwargs.setdefault("allow_custom_ops", True) - self.kwargs.setdefault("enable_select_tf_ops", True) - - # Step 1: Get input signature (use get_input_signature if not provided) + print("Starting LiteRT export...") + + # Step 1: Get input signature, applying bounded shapes for sequence models. if self.input_signature is None: input_signature = get_input_signature(self.model, self.max_sequence_length) else: input_signature = self.input_signature - # Convert to TensorFlow TensorSpecs for tf.function - # Handle different input signature structures - tf_signature = [] + # Step 2: Convert to TensorFlow TensorSpecs and create a concrete function. + concrete_fn = self._get_concrete_function(input_signature) + + # Step 3: Convert the concrete function to a TFLite model. + tflite_model = self._convert_to_tflite(concrete_fn) + + if self.verbose: + print(f"LiteRT model converted successfully. Size: {len(tflite_model)} bytes") + + # Step 4: Save the model to the specified file path. + if not filepath.endswith('.tflite'): + filepath = filepath + '.tflite' + + with open(filepath, "wb") as f: + f.write(tflite_model) + + if self.verbose: + print(f"Exported model to {filepath}") + + def _get_concrete_function(self, input_signature): + """Create a tf.function and get its concrete function.""" + # Create a wrapper function that handles different input structures. if isinstance(input_signature, dict): - # Dictionary input (e.g., Keras-Hub models like Gemma3) - # Convert dict to ordered list of specs for tf.function tf_signature = [make_tf_tensor_spec(spec) for spec in input_signature.values()] input_keys = list(input_signature.keys()) - # Create a wrapper function that handles dict inputs @tf.function(input_signature=tf_signature) def model_fn(*inputs): - # Reconstruct dictionary from positional args input_dict = {key: tensor for key, tensor in zip(input_keys, inputs)} return self.model(input_dict) - + elif isinstance(input_signature, list): - # List of specs tf_signature = [make_tf_tensor_spec(spec) for spec in input_signature] @tf.function(input_signature=tf_signature) def model_fn(*inputs): - return self.model(*inputs) - - elif hasattr(input_signature, 'shape'): - # Single spec + return self.model(list(inputs)) + + else: # Assumes a single tensor or spec tf_signature = [make_tf_tensor_spec(input_signature)] @tf.function(input_signature=tf_signature) - def model_fn(*inputs): - return self.model(*inputs) - - else: - # Try to flatten and convert - def _convert_to_spec(spec): - tf_signature.append(make_tf_tensor_spec(spec)) - - try: - tree.map_structure(_convert_to_spec, input_signature) - - @tf.function(input_signature=tf_signature) - def model_fn(*inputs): - return self.model(*inputs) - - except: - # Fallback: assume it's a single spec - tf_signature = [make_tf_tensor_spec(input_signature)] - - @tf.function(input_signature=tf_signature) - def model_fn(*inputs): - return self.model(*inputs) - - # Step 2: Trace the model to create a concrete function - concrete_fn = model_fn.get_concrete_function() - - # Step 3: Convert using TFLite converter directly (simplified approach) - # Skip the complex MLIR conversion and use the standard converter - tflite_model = self._convert_to_tflite(concrete_fn) + def model_fn(input_tensor): + return self.model(input_tensor) - if self.verbose: - print(f"LiteRT model converted. Size: {len(tflite_model)} bytes") - - # Step 4: Save to file - # Ensure the filepath has the correct .tflite extension - if not filepath.endswith('.tflite'): - filepath = filepath + '.tflite' - - with open(filepath, "wb") as f: - f.write(tflite_model) - - if self.verbose: - print(f"Exported to {filepath}") - + return model_fn.get_concrete_function() + def _convert_to_tflite(self, concrete_fn): - """Convert concrete function to TFLite using standard converter.""" + """ + Converts a concrete function to TFLite, using the most appropriate + conversion path based on the model type. + """ + # For complex models (like Keras-Hub transformers), the SavedModel path + # is more robust and correctly handles variable tracking. + if self._is_keras_hub_model(): + if self.verbose: + print("Keras-Hub model detected. Using SavedModel conversion path for robustness.") + return self._convert_via_saved_model(concrete_fn) + + # For standard Keras models, the direct `from_concrete_functions` path is efficient. + else: + if self.verbose: + print("Standard model detected. Using direct conversion path.") + return self._convert_direct(concrete_fn) + + def _convert_direct(self, concrete_fn): + """Directly convert a concrete function to TFLite.""" try: - # First try with trackable_obj to avoid deprecated path + # Use trackable_obj if available to follow best practices. if hasattr(self.model, '_get_save_spec'): converter = tf.lite.TFLiteConverter.from_concrete_functions( [concrete_fn], trackable_obj=self.model ) else: + # This path is deprecated but serves as a fallback. converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_fn]) - - # Apply conversion settings - converter.allow_custom_ops = self.kwargs.get("allow_custom_ops", False) - converter.enable_select_tf_ops = self.kwargs.get("enable_select_tf_ops", False) - - # For large models like Keras-Hub, we might need to be more conservative - # Try without optimizations first for complex models - if "optimizations" in self.kwargs: - converter.optimizations = self.kwargs["optimizations"] - # Try conversion - result = converter.convert() - return result - - except RuntimeError as e: - if "size too big" in str(e): - # Handle the overflow issue by trying alternative approaches - return self._convert_with_saved_model_fallback() - else: - raise e + self._apply_common_converter_settings(converter) + return converter.convert() except Exception as e: - if "custom op" in str(e).lower(): - raise ValueError(f"Custom ops detected. Enable allow_custom_ops=True. Details: {e}") - raise + raise IOError(f"Direct TFLite conversion failed. Error: {e}") - def _convert_with_saved_model_fallback(self): - """Fallback conversion using SavedModel path to avoid size overflow.""" - import tempfile - import os - + def _convert_via_saved_model(self, concrete_fn): + """ + A more robust conversion path that first creates a temporary SavedModel. + This is better for complex models with intricate variable tracking. + """ try: - # Create a temporary SavedModel with tempfile.TemporaryDirectory() as temp_dir: saved_model_path = os.path.join(temp_dir, "temp_saved_model") - # Export to SavedModel first - from keras.src.export.export_utils import get_input_signature, make_tf_tensor_spec - from keras.src.export.saved_model import export_saved_model - - input_signature = get_input_signature(self.model) - export_saved_model(self.model, saved_model_path, input_signature=input_signature) + # Saving the concrete function is more reliable than saving the model directly. + tf.saved_model.save(concrete_fn, saved_model_path) - # Convert SavedModel to TFLite converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) - converter.allow_custom_ops = self.kwargs.get("allow_custom_ops", True) # More permissive - converter.enable_select_tf_ops = self.kwargs.get("enable_select_tf_ops", True) # More permissive - - # Skip optimizations for problematic models - if "optimizations" in self.kwargs and not self._is_complex_model(): - converter.optimizations = self.kwargs["optimizations"] - - result = converter.convert() - return result + # Keras-Hub models often require these settings. + self._apply_common_converter_settings(converter, is_complex=True) + return converter.convert() except Exception as e: - raise ValueError(f"Both direct and SavedModel fallback conversion failed. " - f"This model may be too complex for TFLite conversion. Details: {e}") - - def _is_complex_model(self): - """Check if this is a complex model that might have conversion issues.""" - # Heuristics to detect complex models like Keras-Hub transformers - model_name = getattr(self.model, 'name', '').lower() - model_class = self.model.__class__.__name__.lower() - - # Check for transformer/language model indicators - complex_indicators = [ - 'transformer', 'attention', 'bert', 'gpt', 'gemma', 'llama', - 'backbone', 'causal_lm', 'decoder', 'encoder' - ] + raise IOError( + "TFLite conversion via SavedModel path failed. This can occur with " + f"models that have unsupported operations or complex graph structures. Error: {e}" + ) + + def _apply_common_converter_settings(self, converter, is_complex=False): + """Applies shared TFLite converter settings.""" + converter.allow_custom_ops = self.kwargs.get("allow_custom_ops", is_complex) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + if self.kwargs.get("enable_select_tf_ops", is_complex): + converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) + + if "optimizations" in self.kwargs: + converter.optimizations = self.kwargs["optimizations"] + + def _is_keras_hub_model(self): + """ + Heuristically checks if the model is a complex model from Keras-Hub + that benefits from the SavedModel conversion path. + """ + model_module = getattr(self.model.__class__, '__module__', '').lower() + if 'keras_hub' in model_module: + return True - for indicator in complex_indicators: - if indicator in model_name or indicator in model_class: - return True - - # Check for large parameter count - try: - param_count = self.model.count_params() - if param_count > 100_000_000: # 100M parameters - return True - except: - pass + # Fallback check for models that might not be in the keras_hub module + # but follow similar patterns (e.g., custom backbones). + model_class_name = self.model.__class__.__name__.lower() + complex_indicators = ['backbone', 'causal_lm', 'gemma', 'llama', 'bert'] + if any(indicator in model_class_name for indicator in complex_indicators): + return True return False From 761793fad9ef57d16bb4dedadd3e1b14fe99e8ed Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 28 Aug 2025 14:45:36 +0530 Subject: [PATCH 031/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 15c87755886d..f94e415876e2 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -118,8 +118,9 @@ def _convert_via_saved_model(self, concrete_fn): with tempfile.TemporaryDirectory() as temp_dir: saved_model_path = os.path.join(temp_dir, "temp_saved_model") - # Saving the concrete function is more reliable than saving the model directly. - tf.saved_model.save(concrete_fn, saved_model_path) + # Saving the model with the concrete function as a signature is more + # reliable as it ensures all trackable assets of the model are found. + tf.saved_model.save(self.model, saved_model_path, signatures=concrete_fn) converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) From 7bb0506e1aa143d98d3aa0d0db428c42ba68cea8 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 1 Sep 2025 09:36:17 +0530 Subject: [PATCH 032/115] Prevent tensor overflow for large vocabulary models Adds logic to dynamically reduce max_sequence_length for large vocabulary models in export_utils.py to prevent tensor size overflow. In lite_rt_exporter.py, introduces checks and workarounds for models with _DictWrapper issues, and applies memory optimizations for large models during TFLite conversion. These changes improve export reliability and prevent memory errors for models such as Gemma, Llama, and similar architectures. --- keras/src/export/export_utils.py | 49 +++++++++++- keras/src/export/lite_rt_exporter.py | 108 ++++++++++++++++++++++----- 2 files changed, 136 insertions(+), 21 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 92e6bbb7de82..0c7060155725 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -15,7 +15,8 @@ def get_input_signature(model, max_sequence_length=512): Only applied when the model is detected as a sequence model based on input names (e.g., 'token_ids', 'input_ids') or shape patterns. For non-sequence models (e.g., image models), this parameter is ignored and dimensions remain - unbounded. Defaults to 512. + unbounded. For large vocabulary models, this may be automatically reduced + to prevent tensor size overflow. Defaults to 512. Returns: Input signature suitable for model export. @@ -30,13 +31,17 @@ def get_input_signature(model, max_sequence_length=512): "The model provided has not yet been built. It must be built " "before export." ) + + # For large vocabulary models, adjust sequence length to prevent overflow + effective_max_length = _get_safe_sequence_length(model, max_sequence_length) + if isinstance(model, models.Functional): input_signature = tree.map_structure(make_input_spec, model._inputs_struct) elif isinstance(model, models.Sequential): input_signature = tree.map_structure(make_input_spec, model.inputs) else: # For subclassed models, try multiple approaches - input_signature = _infer_input_signature_from_model(model, max_sequence_length) + input_signature = _infer_input_signature_from_model(model, effective_max_length) if not input_signature: # Fallback: Try to get from model.inputs if available if hasattr(model, 'inputs') and model.inputs: @@ -50,11 +55,45 @@ def get_input_signature(model, max_sequence_length=512): return input_signature +def _get_safe_sequence_length(model, max_sequence_length): + """Get a safe sequence length that won't cause tensor size overflow.""" + model_class_name = getattr(model, '__class__', type(None)).__name__.lower() + model_module = getattr(getattr(model, '__class__', type(None)), '__module__', '').lower() + + # Check if this is a large vocabulary model + large_vocab_indicators = ['gemma', 'llama', 'palm', 'gpt'] + is_large_vocab = ( + any(indicator in model_class_name for indicator in large_vocab_indicators) or + 'keras_hub' in model_module + ) + + if is_large_vocab: + # Estimate tensor size: seq_len × vocab_size × 4 bytes (float32) + # Conservative vocab size estimate for large models + estimated_vocab_size = 256000 + estimated_bytes = max_sequence_length * estimated_vocab_size * 4 + + # If estimated size > 512MB, reduce sequence length + max_safe_bytes = 512 * 1024 * 1024 # 512MB + if estimated_bytes > max_safe_bytes: + safe_length = max_safe_bytes // (estimated_vocab_size * 4) + safe_length = max(32, min(safe_length, max_sequence_length)) # At least 32, at most original + if safe_length < max_sequence_length: + print(f"Warning: Reducing max_sequence_length from {max_sequence_length} to {safe_length} " + f"for large vocabulary model to prevent tensor size overflow.") + return safe_length + + return max_sequence_length + + def _infer_input_signature_from_model(model, max_sequence_length=512): shapes_dict = getattr(model, "_build_shapes_dict", None) if not shapes_dict: return None + # Use the safe sequence length to prevent overflow + safe_sequence_length = _get_safe_sequence_length(model, max_sequence_length) + def _is_sequence_model(): """Detect if this is a sequence model based on input names and shapes.""" if not shapes_dict: @@ -95,7 +134,8 @@ def _make_input_spec(structure): bounded_shape.append(None) elif is_sequence_model and i == 1: # For sequence models, bound the sequence length dimension - bounded_shape.append(max_sequence_length) + # Using safe sequence length to prevent overflow + bounded_shape.append(safe_sequence_length) else: # For non-sequence models or non-sequence dimensions, keep unbounded # This prevents breaking image models, etc. @@ -120,7 +160,8 @@ def _make_input_spec(structure): bounded_shape.append(None) elif is_sequence_model and i == 1: # For sequence models, bound the sequence length dimension - bounded_shape.append(max_sequence_length) + # Using safe sequence length to prevent overflow + bounded_shape.append(safe_sequence_length) else: # For non-sequence models or non-sequence dimensions, keep unbounded bounded_shape.append(None) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index f94e415876e2..9260898bbae2 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -114,24 +114,75 @@ def _convert_via_saved_model(self, concrete_fn): A more robust conversion path that first creates a temporary SavedModel. This is better for complex models with intricate variable tracking. """ - try: - with tempfile.TemporaryDirectory() as temp_dir: - saved_model_path = os.path.join(temp_dir, "temp_saved_model") - - # Saving the model with the concrete function as a signature is more - # reliable as it ensures all trackable assets of the model are found. - tf.saved_model.save(self.model, saved_model_path, signatures=concrete_fn) - - converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) - - # Keras-Hub models often require these settings. - self._apply_common_converter_settings(converter, is_complex=True) - return converter.convert() - except Exception as e: - raise IOError( - "TFLite conversion via SavedModel path failed. This can occur with " - f"models that have unsupported operations or complex graph structures. Error: {e}" + with tempfile.TemporaryDirectory() as temp_dir: + saved_model_path = os.path.join(temp_dir, "temp_saved_model") + + # Check if we need to use a clean trackable object to avoid _DictWrapper issues + trackable_obj = self._create_clean_trackable_object_if_needed() + + # Saving the model with the concrete function as a signature is more + # reliable as it ensures all trackable assets of the model are found. + tf.saved_model.save( + trackable_obj, saved_model_path, signatures=concrete_fn ) + + converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) + + # Keras-Hub models often require these settings. + self._apply_common_converter_settings(converter, is_complex=True) + return converter.convert() + + def _create_clean_trackable_object_if_needed(self): + """ + Create a clean trackable object if the model contains _DictWrapper objects + that cause issues during TensorFlow's introspection. + """ + # Check if the model has _DictWrapper objects in its trackable children + has_dict_wrapper = self._model_has_dict_wrapper_issues() + + if not has_dict_wrapper: + return self.model + + # Create a clean trackable object to avoid _DictWrapper issues + trackable_obj = tf.__internal__.tracking.AutoTrackable() + + # Copy essential variables from the model + if hasattr(self.model, 'variables'): + trackable_obj.variables = list(self.model.variables) + if hasattr(self.model, 'trainable_variables'): + trackable_obj.trainable_variables = list(self.model.trainable_variables) + if hasattr(self.model, 'non_trainable_variables'): + trackable_obj.non_trainable_variables = list(self.model.non_trainable_variables) + + return trackable_obj + + def _model_has_dict_wrapper_issues(self): + """ + Check if the model contains _DictWrapper objects that cause introspection issues. + """ + # Import _DictWrapper safely + try: + from tensorflow.python.trackable.data_structures import _DictWrapper + except ImportError: + return False + + # Check model's direct attributes for _DictWrapper objects + for attr_name in dir(self.model): + if attr_name.startswith('_'): + continue + try: + attr_value = getattr(self.model, attr_name, None) + if isinstance(attr_value, _DictWrapper): + return True + except (AttributeError, TypeError): + continue + + # Check if model class name suggests complex structures + model_class_name = self.model.__class__.__name__.lower() + if any(indicator in model_class_name for indicator in ['backbone', 'causal_lm', 'gemma', 'llama', 'bert']): + return True + + return False def _apply_common_converter_settings(self, converter, is_complex=False): """Applies shared TFLite converter settings.""" @@ -142,6 +193,29 @@ def _apply_common_converter_settings(self, converter, is_complex=False): if "optimizations" in self.kwargs: converter.optimizations = self.kwargs["optimizations"] + + # For large models, enable memory optimization to prevent overflow + if is_complex and self._is_large_vocabulary_model(): + # Enable optimizations that reduce intermediate tensor sizes + if not converter.optimizations: + converter.optimizations = [tf.lite.Optimize.DEFAULT] + # Use representative dataset for better quantization if available + if "representative_dataset" in self.kwargs: + converter.representative_dataset = self.kwargs["representative_dataset"] + + def _is_large_vocabulary_model(self): + """Check if this is a large vocabulary model that might cause overflow.""" + model_class_name = self.model.__class__.__name__.lower() + model_module = getattr(self.model.__class__, '__module__', '').lower() + + # Models known to have large vocabularies + large_vocab_indicators = ['gemma', 'llama', 'palm', 'gpt', 'bert'] + if any(indicator in model_class_name for indicator in large_vocab_indicators): + return True + if 'keras_hub' in model_module: + return True + + return False def _is_keras_hub_model(self): """ From c219eb1349f7bf15c28a056cbf95350bb53012f7 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 1 Sep 2025 09:57:50 +0530 Subject: [PATCH 033/115] Update export_utils.py --- keras/src/export/export_utils.py | 51 ++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 0c7060155725..cb2b3e8c5ee8 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -57,31 +57,36 @@ def get_input_signature(model, max_sequence_length=512): def _get_safe_sequence_length(model, max_sequence_length): """Get a safe sequence length that won't cause tensor size overflow.""" - model_class_name = getattr(model, '__class__', type(None)).__name__.lower() - model_module = getattr(getattr(model, '__class__', type(None)), '__module__', '').lower() - # Check if this is a large vocabulary model - large_vocab_indicators = ['gemma', 'llama', 'palm', 'gpt'] - is_large_vocab = ( - any(indicator in model_class_name for indicator in large_vocab_indicators) or - 'keras_hub' in model_module - ) - - if is_large_vocab: - # Estimate tensor size: seq_len × vocab_size × 4 bytes (float32) - # Conservative vocab size estimate for large models - estimated_vocab_size = 256000 - estimated_bytes = max_sequence_length * estimated_vocab_size * 4 + # Try to detect if this model has a large output vocabulary that could cause overflow + try: + # Check if model has vocabulary size attribute (common in language models) + vocab_size = None + if hasattr(model, 'tokenizer') and hasattr(model.tokenizer, 'vocabulary_size'): + vocab_size = model.tokenizer.vocabulary_size() + elif hasattr(model, 'vocabulary_size'): + vocab_size = model.vocabulary_size + elif hasattr(model, 'backbone') and hasattr(model.backbone, 'vocabulary_size'): + vocab_size = model.backbone.vocabulary_size - # If estimated size > 512MB, reduce sequence length - max_safe_bytes = 512 * 1024 * 1024 # 512MB - if estimated_bytes > max_safe_bytes: - safe_length = max_safe_bytes // (estimated_vocab_size * 4) - safe_length = max(32, min(safe_length, max_sequence_length)) # At least 32, at most original - if safe_length < max_sequence_length: - print(f"Warning: Reducing max_sequence_length from {max_sequence_length} to {safe_length} " - f"for large vocabulary model to prevent tensor size overflow.") - return safe_length + # If we found a vocabulary size, check for potential overflow + if vocab_size and vocab_size > 50000: # Large vocabulary threshold + # Calculate tensor size: seq_len × vocab_size × 4 bytes (float32) + estimated_bytes = max_sequence_length * vocab_size * 4 + + # Use 10GB as safe limit (well under your 40GB peak but conservative for export) + max_safe_bytes = 10 * 1024 * 1024 * 1024 # 10GB + if estimated_bytes > max_safe_bytes: + safe_length = max_safe_bytes // (vocab_size * 4) + # Ensure at least 64 tokens for meaningful sequences + safe_length = max(64, min(safe_length, max_sequence_length)) + if safe_length < max_sequence_length: + print(f"Warning: Reducing max_sequence_length from {max_sequence_length} to {safe_length} " + f"for model with large vocabulary (vocab_size={vocab_size}) to prevent tensor size overflow.") + return safe_length + except Exception: + # If we can't determine vocab size, proceed with original length + pass return max_sequence_length From e26ff6b915a88844b79e0ad2aa365a8074b5dd79 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 1 Sep 2025 18:16:39 +0530 Subject: [PATCH 034/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 47 +++++----------------------- 1 file changed, 7 insertions(+), 40 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 9260898bbae2..04e9822cbc35 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -176,11 +176,6 @@ def _model_has_dict_wrapper_issues(self): return True except (AttributeError, TypeError): continue - - # Check if model class name suggests complex structures - model_class_name = self.model.__class__.__name__.lower() - if any(indicator in model_class_name for indicator in ['backbone', 'causal_lm', 'gemma', 'llama', 'bert']): - return True return False @@ -191,46 +186,18 @@ def _apply_common_converter_settings(self, converter, is_complex=False): if self.kwargs.get("enable_select_tf_ops", is_complex): converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) + # Only apply user-specified optimizations, no defaults if "optimizations" in self.kwargs: converter.optimizations = self.kwargs["optimizations"] - - # For large models, enable memory optimization to prevent overflow - if is_complex and self._is_large_vocabulary_model(): - # Enable optimizations that reduce intermediate tensor sizes - if not converter.optimizations: - converter.optimizations = [tf.lite.Optimize.DEFAULT] - # Use representative dataset for better quantization if available - if "representative_dataset" in self.kwargs: - converter.representative_dataset = self.kwargs["representative_dataset"] - - def _is_large_vocabulary_model(self): - """Check if this is a large vocabulary model that might cause overflow.""" - model_class_name = self.model.__class__.__name__.lower() - model_module = getattr(self.model.__class__, '__module__', '').lower() - # Models known to have large vocabularies - large_vocab_indicators = ['gemma', 'llama', 'palm', 'gpt', 'bert'] - if any(indicator in model_class_name for indicator in large_vocab_indicators): - return True - if 'keras_hub' in model_module: - return True - - return False + # Only apply user-specified representative dataset + if "representative_dataset" in self.kwargs: + converter.representative_dataset = self.kwargs["representative_dataset"] def _is_keras_hub_model(self): """ - Heuristically checks if the model is a complex model from Keras-Hub - that benefits from the SavedModel conversion path. + Checks if the model is from Keras-Hub based on module path only. + Keras-Hub models benefit from the SavedModel conversion path. """ model_module = getattr(self.model.__class__, '__module__', '').lower() - if 'keras_hub' in model_module: - return True - - # Fallback check for models that might not be in the keras_hub module - # but follow similar patterns (e.g., custom backbones). - model_class_name = self.model.__class__.__name__.lower() - complex_indicators = ['backbone', 'causal_lm', 'gemma', 'llama', 'bert'] - if any(indicator in model_class_name for indicator in complex_indicators): - return True - - return False + return 'keras_hub' in model_module From 4a32e04f52e0d1c1dd37443a661394f968e298c0 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 1 Sep 2025 19:11:32 +0530 Subject: [PATCH 035/115] Simplify TFLite export and sequence length safety checks Removed custom trackable object logic from LiteRTExporter and now save the model directly, simplifying the export process. Also streamlined vocabulary size checks in export_utils to prevent tensor size overflow, removing verbose warnings and redundant comments. --- keras/src/export/export_utils.py | 14 ++---- keras/src/export/lite_rt_exporter.py | 72 +--------------------------- 2 files changed, 6 insertions(+), 80 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index cb2b3e8c5ee8..d15cea2446c4 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -58,9 +58,8 @@ def get_input_signature(model, max_sequence_length=512): def _get_safe_sequence_length(model, max_sequence_length): """Get a safe sequence length that won't cause tensor size overflow.""" - # Try to detect if this model has a large output vocabulary that could cause overflow + # Try to detect vocabulary size to prevent overflow try: - # Check if model has vocabulary size attribute (common in language models) vocab_size = None if hasattr(model, 'tokenizer') and hasattr(model.tokenizer, 'vocabulary_size'): vocab_size = model.tokenizer.vocabulary_size() @@ -69,23 +68,18 @@ def _get_safe_sequence_length(model, max_sequence_length): elif hasattr(model, 'backbone') and hasattr(model.backbone, 'vocabulary_size'): vocab_size = model.backbone.vocabulary_size - # If we found a vocabulary size, check for potential overflow - if vocab_size and vocab_size > 50000: # Large vocabulary threshold + # If we have a large vocabulary, check for potential overflow + if vocab_size and vocab_size > 50000: # Calculate tensor size: seq_len × vocab_size × 4 bytes (float32) estimated_bytes = max_sequence_length * vocab_size * 4 - # Use 10GB as safe limit (well under your 40GB peak but conservative for export) + # Use 10GB as safe limit for export operations max_safe_bytes = 10 * 1024 * 1024 * 1024 # 10GB if estimated_bytes > max_safe_bytes: safe_length = max_safe_bytes // (vocab_size * 4) - # Ensure at least 64 tokens for meaningful sequences safe_length = max(64, min(safe_length, max_sequence_length)) - if safe_length < max_sequence_length: - print(f"Warning: Reducing max_sequence_length from {max_sequence_length} to {safe_length} " - f"for model with large vocabulary (vocab_size={vocab_size}) to prevent tensor size overflow.") return safe_length except Exception: - # If we can't determine vocab size, proceed with original length pass return max_sequence_length diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 04e9822cbc35..942523fe3694 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -117,83 +117,15 @@ def _convert_via_saved_model(self, concrete_fn): with tempfile.TemporaryDirectory() as temp_dir: saved_model_path = os.path.join(temp_dir, "temp_saved_model") - # Check if we need to use a clean trackable object to avoid _DictWrapper issues - trackable_obj = self._create_clean_trackable_object_if_needed() - - # Saving the model with the concrete function as a signature is more - # reliable as it ensures all trackable assets of the model are found. + # Save the model with the concrete function as a signature tf.saved_model.save( - trackable_obj, saved_model_path, signatures=concrete_fn + self.model, saved_model_path, signatures=concrete_fn ) converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) - - # Keras-Hub models often require these settings. self._apply_common_converter_settings(converter, is_complex=True) return converter.convert() - def _create_clean_trackable_object_if_needed(self): - """ - Create a clean trackable object if the model contains _DictWrapper objects - that cause issues during TensorFlow's introspection. - """ - # Check if the model has _DictWrapper objects in its trackable children - has_dict_wrapper = self._model_has_dict_wrapper_issues() - - if not has_dict_wrapper: - return self.model - - # Create a clean trackable object to avoid _DictWrapper issues - trackable_obj = tf.__internal__.tracking.AutoTrackable() - - # Copy essential variables from the model - if hasattr(self.model, 'variables'): - trackable_obj.variables = list(self.model.variables) - if hasattr(self.model, 'trainable_variables'): - trackable_obj.trainable_variables = list(self.model.trainable_variables) - if hasattr(self.model, 'non_trainable_variables'): - trackable_obj.non_trainable_variables = list(self.model.non_trainable_variables) - - return trackable_obj - - def _model_has_dict_wrapper_issues(self): - """ - Check if the model contains _DictWrapper objects that cause introspection issues. - """ - # Import _DictWrapper safely - try: - from tensorflow.python.trackable.data_structures import _DictWrapper - except ImportError: - return False - - # Check model's direct attributes for _DictWrapper objects - for attr_name in dir(self.model): - if attr_name.startswith('_'): - continue - try: - attr_value = getattr(self.model, attr_name, None) - if isinstance(attr_value, _DictWrapper): - return True - except (AttributeError, TypeError): - continue - - return False - - def _apply_common_converter_settings(self, converter, is_complex=False): - """Applies shared TFLite converter settings.""" - converter.allow_custom_ops = self.kwargs.get("allow_custom_ops", is_complex) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - if self.kwargs.get("enable_select_tf_ops", is_complex): - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - # Only apply user-specified optimizations, no defaults - if "optimizations" in self.kwargs: - converter.optimizations = self.kwargs["optimizations"] - - # Only apply user-specified representative dataset - if "representative_dataset" in self.kwargs: - converter.representative_dataset = self.kwargs["representative_dataset"] - def _is_keras_hub_model(self): """ Checks if the model is from Keras-Hub based on module path only. From 926b0a84985c74152fc448c5fb96fcfa10aa12bb Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 2 Sep 2025 09:29:23 +0530 Subject: [PATCH 036/115] Refactor TFLite export logic and add simple exporter Refactors the TFLite conversion logic in lite_rt_exporter.py to attempt direct conversion first and only fall back to SavedModel if necessary, improving robustness and clarity. Adds a new lite_rt_exporter_simple.py file with a streamlined LiteRTExporter class for direct TFLite export, bypassing complex MLIR conversion paths. --- keras/src/export/lite_rt_exporter.py | 73 ++++++++++++++++------------ 1 file changed, 43 insertions(+), 30 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 942523fe3694..0d16bc2b1199 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -3,6 +3,7 @@ from keras.src import tree import tempfile import os +from pathlib import Path class LiteRTExporter: """ @@ -76,38 +77,37 @@ def model_fn(input_tensor): def _convert_to_tflite(self, concrete_fn): """ - Converts a concrete function to TFLite, using the most appropriate - conversion path based on the model type. + Converts a concrete function to TFLite using direct conversion first. + Falls back to SavedModel only if direct conversion fails. """ - # For complex models (like Keras-Hub transformers), the SavedModel path - # is more robust and correctly handles variable tracking. - if self._is_keras_hub_model(): + # Try direct conversion first - this avoids _DictWrapper issues entirely + try: if self.verbose: - print("Keras-Hub model detected. Using SavedModel conversion path for robustness.") - return self._convert_via_saved_model(concrete_fn) - - # For standard Keras models, the direct `from_concrete_functions` path is efficient. - else: + print("Attempting direct TFLite conversion...") + converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_fn]) + self._apply_common_converter_settings(converter, is_complex=False) + return converter.convert() + except Exception as e: + # Only fall back to SavedModel if direct conversion fails if self.verbose: - print("Standard model detected. Using direct conversion path.") - return self._convert_direct(concrete_fn) + print(f"Direct conversion failed: {e}") + print("Falling back to SavedModel conversion path...") + return self._convert_via_saved_model(concrete_fn) - def _convert_direct(self, concrete_fn): - """Directly convert a concrete function to TFLite.""" - try: - # Use trackable_obj if available to follow best practices. - if hasattr(self.model, '_get_save_spec'): - converter = tf.lite.TFLiteConverter.from_concrete_functions( - [concrete_fn], trackable_obj=self.model - ) - else: - # This path is deprecated but serves as a fallback. - converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_fn]) + def _convert_via_saved_model(self, concrete_fn): + """Fallback conversion via SavedModel for edge cases.""" + with tempfile.TemporaryDirectory() as temp_dir: + saved_model_dir = Path(temp_dir) / "saved_model" - self._apply_common_converter_settings(converter) + tf.saved_model.save( + self.model, + str(saved_model_dir), + signatures={"serving_default": concrete_fn} + ) + + converter = tf.lite.TFLiteConverter.from_saved_model(str(saved_model_dir)) + self._apply_common_converter_settings(converter, is_complex=True) return converter.convert() - except Exception as e: - raise IOError(f"Direct TFLite conversion failed. Error: {e}") def _convert_via_saved_model(self, concrete_fn): """ @@ -117,10 +117,23 @@ def _convert_via_saved_model(self, concrete_fn): with tempfile.TemporaryDirectory() as temp_dir: saved_model_path = os.path.join(temp_dir, "temp_saved_model") - # Save the model with the concrete function as a signature - tf.saved_model.save( - self.model, saved_model_path, signatures=concrete_fn - ) + # Try normal path first, fallback if TensorFlow introspection fails + try: + tf.saved_model.save( + self.model, saved_model_path, signatures=concrete_fn + ) + except TypeError as e: + if "_DictWrapper" in str(e) or "__dict__ descriptor" in str(e): + if self.verbose: + print("Using fallback SavedModel path due to TensorFlow introspection issue.") + # Fallback: save with minimal trackable object + minimal_obj = tf.Module() + tf.saved_model.save( + minimal_obj, saved_model_path, + signatures={'serving_default': concrete_fn} + ) + else: + raise e converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) self._apply_common_converter_settings(converter, is_complex=True) From 4a8a9d5a1a40bc1a25de5e7491ffa18ecb5a9798 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 2 Sep 2025 09:54:38 +0530 Subject: [PATCH 037/115] Improve export robustness for large vocab and Keras-Hub models Refactors export_utils and lite_rt_exporter to better detect large vocabulary and Keras-Hub models, applying safer sequence length limits and more robust TFLite conversion paths. Adds heuristics for model type detection, ensures memory safety, and improves handling of TensorFlow introspection issues during export. --- keras/src/export/export_utils.py | 47 +++---- keras/src/export/lite_rt_exporter.py | 180 ++++++++++++++++++++------- 2 files changed, 158 insertions(+), 69 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index d15cea2446c4..a71cbca4127c 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -57,30 +57,31 @@ def get_input_signature(model, max_sequence_length=512): def _get_safe_sequence_length(model, max_sequence_length): """Get a safe sequence length that won't cause tensor size overflow.""" + model_class_name = getattr(model, '__class__', type(None)).__name__.lower() + model_module = getattr(getattr(model, '__class__', type(None)), '__module__', '').lower() - # Try to detect vocabulary size to prevent overflow - try: - vocab_size = None - if hasattr(model, 'tokenizer') and hasattr(model.tokenizer, 'vocabulary_size'): - vocab_size = model.tokenizer.vocabulary_size() - elif hasattr(model, 'vocabulary_size'): - vocab_size = model.vocabulary_size - elif hasattr(model, 'backbone') and hasattr(model.backbone, 'vocabulary_size'): - vocab_size = model.backbone.vocabulary_size + # Check if this is a large vocabulary model + large_vocab_indicators = ['gemma', 'llama', 'palm', 'gpt'] + is_large_vocab = ( + any(indicator in model_class_name for indicator in large_vocab_indicators) or + 'keras_hub' in model_module + ) + + if is_large_vocab: + # Estimate tensor size: seq_len × vocab_size × 4 bytes (float32) + # Conservative vocab size estimate for large models + estimated_vocab_size = 256000 + estimated_bytes = max_sequence_length * estimated_vocab_size * 4 - # If we have a large vocabulary, check for potential overflow - if vocab_size and vocab_size > 50000: - # Calculate tensor size: seq_len × vocab_size × 4 bytes (float32) - estimated_bytes = max_sequence_length * vocab_size * 4 - - # Use 10GB as safe limit for export operations - max_safe_bytes = 10 * 1024 * 1024 * 1024 # 10GB - if estimated_bytes > max_safe_bytes: - safe_length = max_safe_bytes // (vocab_size * 4) - safe_length = max(64, min(safe_length, max_sequence_length)) - return safe_length - except Exception: - pass + # If estimated size > 512MB, reduce sequence length + max_safe_bytes = 512 * 1024 * 1024 # 512MB + if estimated_bytes > max_safe_bytes: + safe_length = max_safe_bytes // (estimated_vocab_size * 4) + safe_length = max(32, min(safe_length, max_sequence_length)) # At least 32, at most original + if safe_length < max_sequence_length: + print(f"Warning: Reducing max_sequence_length from {max_sequence_length} to {safe_length} " + f"for large vocabulary model to prevent tensor size overflow.") + return safe_length return max_sequence_length @@ -270,4 +271,4 @@ def export_model(model, filepath, format="tf_saved_model", **kwargs): exporter_cls(model, filepath, **kwargs) else: exporter = exporter_cls(model, **kwargs) - exporter.export(filepath) + exporter.export(filepath) \ No newline at end of file diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 0d16bc2b1199..5b24474732b7 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -3,7 +3,6 @@ from keras.src import tree import tempfile import os -from pathlib import Path class LiteRTExporter: """ @@ -77,37 +76,38 @@ def model_fn(input_tensor): def _convert_to_tflite(self, concrete_fn): """ - Converts a concrete function to TFLite using direct conversion first. - Falls back to SavedModel only if direct conversion fails. + Converts a concrete function to TFLite, using the most appropriate + conversion path based on the model type. """ - # Try direct conversion first - this avoids _DictWrapper issues entirely - try: - if self.verbose: - print("Attempting direct TFLite conversion...") - converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_fn]) - self._apply_common_converter_settings(converter, is_complex=False) - return converter.convert() - except Exception as e: - # Only fall back to SavedModel if direct conversion fails + # For complex models (like Keras-Hub transformers), the SavedModel path + # is more robust and correctly handles variable tracking. + if self._is_keras_hub_model(): if self.verbose: - print(f"Direct conversion failed: {e}") - print("Falling back to SavedModel conversion path...") + print("Keras-Hub model detected. Using SavedModel conversion path for robustness.") return self._convert_via_saved_model(concrete_fn) - def _convert_via_saved_model(self, concrete_fn): - """Fallback conversion via SavedModel for edge cases.""" - with tempfile.TemporaryDirectory() as temp_dir: - saved_model_dir = Path(temp_dir) / "saved_model" - - tf.saved_model.save( - self.model, - str(saved_model_dir), - signatures={"serving_default": concrete_fn} - ) + # For standard Keras models, the direct `from_concrete_functions` path is efficient. + else: + if self.verbose: + print("Standard model detected. Using direct conversion path.") + return self._convert_direct(concrete_fn) + + def _convert_direct(self, concrete_fn): + """Directly convert a concrete function to TFLite.""" + try: + # Use trackable_obj if available to follow best practices. + if hasattr(self.model, '_get_save_spec'): + converter = tf.lite.TFLiteConverter.from_concrete_functions( + [concrete_fn], trackable_obj=self.model + ) + else: + # This path is deprecated but serves as a fallback. + converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_fn]) - converter = tf.lite.TFLiteConverter.from_saved_model(str(saved_model_dir)) - self._apply_common_converter_settings(converter, is_complex=True) + self._apply_common_converter_settings(converter) return converter.convert() + except Exception as e: + raise IOError(f"Direct TFLite conversion failed. Error: {e}") def _convert_via_saved_model(self, concrete_fn): """ @@ -117,32 +117,120 @@ def _convert_via_saved_model(self, concrete_fn): with tempfile.TemporaryDirectory() as temp_dir: saved_model_path = os.path.join(temp_dir, "temp_saved_model") - # Try normal path first, fallback if TensorFlow introspection fails - try: - tf.saved_model.save( - self.model, saved_model_path, signatures=concrete_fn - ) - except TypeError as e: - if "_DictWrapper" in str(e) or "__dict__ descriptor" in str(e): - if self.verbose: - print("Using fallback SavedModel path due to TensorFlow introspection issue.") - # Fallback: save with minimal trackable object - minimal_obj = tf.Module() - tf.saved_model.save( - minimal_obj, saved_model_path, - signatures={'serving_default': concrete_fn} - ) - else: - raise e + # Check if we need to use a clean trackable object to avoid _DictWrapper issues + trackable_obj = self._create_clean_trackable_object_if_needed() + + # Saving the model with the concrete function as a signature is more + # reliable as it ensures all trackable assets of the model are found. + tf.saved_model.save( + trackable_obj, saved_model_path, signatures=concrete_fn + ) converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) + + # Keras-Hub models often require these settings. self._apply_common_converter_settings(converter, is_complex=True) return converter.convert() + def _create_clean_trackable_object_if_needed(self): + """ + Create a clean trackable object if the model contains _DictWrapper objects + that cause issues during TensorFlow's introspection. + """ + # Check if the model has _DictWrapper objects in its trackable children + has_dict_wrapper = self._model_has_dict_wrapper_issues() + + if not has_dict_wrapper: + return self.model + + # Create a clean trackable object to avoid _DictWrapper issues + trackable_obj = tf.__internal__.tracking.AutoTrackable() + + # Copy essential variables from the model + if hasattr(self.model, 'variables'): + trackable_obj.variables = list(self.model.variables) + if hasattr(self.model, 'trainable_variables'): + trackable_obj.trainable_variables = list(self.model.trainable_variables) + if hasattr(self.model, 'non_trainable_variables'): + trackable_obj.non_trainable_variables = list(self.model.non_trainable_variables) + + return trackable_obj + + def _model_has_dict_wrapper_issues(self): + """ + Check if the model contains _DictWrapper objects that cause introspection issues. + """ + # Import _DictWrapper safely + try: + from tensorflow.python.trackable.data_structures import _DictWrapper + except ImportError: + return False + + # Check model's direct attributes for _DictWrapper objects + for attr_name in dir(self.model): + if attr_name.startswith('_'): + continue + try: + attr_value = getattr(self.model, attr_name, None) + if isinstance(attr_value, _DictWrapper): + return True + except (AttributeError, TypeError): + continue + + # Check if model class name suggests complex structures + model_class_name = self.model.__class__.__name__.lower() + if any(indicator in model_class_name for indicator in ['backbone', 'causal_lm', 'gemma', 'llama', 'bert']): + return True + + return False + + def _apply_common_converter_settings(self, converter, is_complex=False): + """Applies shared TFLite converter settings.""" + converter.allow_custom_ops = self.kwargs.get("allow_custom_ops", is_complex) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + if self.kwargs.get("enable_select_tf_ops", is_complex): + converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) + + if "optimizations" in self.kwargs: + converter.optimizations = self.kwargs["optimizations"] + + # For large models, enable memory optimization to prevent overflow + if is_complex and self._is_large_vocabulary_model(): + # Enable optimizations that reduce intermediate tensor sizes + if not converter.optimizations: + converter.optimizations = [tf.lite.Optimize.DEFAULT] + # Use representative dataset for better quantization if available + if "representative_dataset" in self.kwargs: + converter.representative_dataset = self.kwargs["representative_dataset"] + + def _is_large_vocabulary_model(self): + """Check if this is a large vocabulary model that might cause overflow.""" + model_class_name = self.model.__class__.__name__.lower() + model_module = getattr(self.model.__class__, '__module__', '').lower() + + # Models known to have large vocabularies + large_vocab_indicators = ['gemma', 'llama', 'palm', 'gpt', 'bert'] + if any(indicator in model_class_name for indicator in large_vocab_indicators): + return True + if 'keras_hub' in model_module: + return True + + return False + def _is_keras_hub_model(self): """ - Checks if the model is from Keras-Hub based on module path only. - Keras-Hub models benefit from the SavedModel conversion path. + Heuristically checks if the model is a complex model from Keras-Hub + that benefits from the SavedModel conversion path. """ model_module = getattr(self.model.__class__, '__module__', '').lower() - return 'keras_hub' in model_module + if 'keras_hub' in model_module: + return True + + # Fallback check for models that might not be in the keras_hub module + # but follow similar patterns (e.g., custom backbones). + model_class_name = self.model.__class__.__name__.lower() + complex_indicators = ['backbone', 'causal_lm', 'gemma', 'llama', 'bert'] + if any(indicator in model_class_name for indicator in complex_indicators): + return True + + return False \ No newline at end of file From f4b43b4288d686caf7512f330623829cbd437740 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 2 Sep 2025 10:18:53 +0530 Subject: [PATCH 038/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 336 +++++++++++++-------------- 1 file changed, 162 insertions(+), 174 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 5b24474732b7..7d8a83e6fc36 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -1,9 +1,75 @@ import tensorflow as tf from keras.src.export.export_utils import get_input_signature, make_tf_tensor_spec -from keras.src import tree +from keras import tree import tempfile import os +class GenerationModule(tf.Module): + """ + A tf.Module to wrap a CausalLM for exporting to TFLite with separate + prompt processing and token generation functions. + """ + def __init__(self, model, max_sequence_length): + super().__init__() + self.model = model + self.max_sequence_length = max_sequence_length + + @tf.function + def initialize(self, token_ids, padding_mask): + """ + Initializes the key/value cache by processing the input prompt. + + This function creates an empty cache and then "seeds" it by running a + forward pass on the prompt. This is equivalent to the `_build_cache` + logic in KerasNLP's CausalLM models. + + Args: + token_ids: A tf.Tensor of shape [batch_size, seq_len]. + padding_mask: A tf.Tensor of shape [batch_size, seq_len]. + + Returns: + A dictionary containing the `initial_cache`. + """ + backbone = self.model.backbone + batch_size = tf.shape(token_ids)[0] + cache_shape = [ + batch_size, + backbone.num_layers, + 2, # For key and value + self.max_sequence_length, + backbone.num_key_value_heads, + backbone.head_dim, + ] + # Create an empty cache with a static max_sequence_length. + cache = tf.zeros(cache_shape, dtype=self.model.compute_dtype) + + # Seed the cache by calling call_with_cache on the prompt. + # The cache_update_index is 0, and the tokens are the whole prompt. + # We only need the resulting cache. + _, _, seeded_cache = self.model.call_with_cache( + token_ids, cache, cache_update_index=0 + ) + return {"initial_cache": seeded_cache} + + @tf.function + def decode(self, token_ids, cache, cache_update_index): + """ + Performs one decoding step to generate the next token. + + Args: + token_ids: The current token, shape [batch_size, 1]. + cache: The key/value cache from the previous step. + cache_update_index: The index at which to update the cache. + + Returns: + A dictionary containing the `logits` and the `updated_cache`. + """ + logits, _, new_cache = self.model.call_with_cache( + token_ids, cache, cache_update_index + ) + return {"logits": logits, "updated_cache": new_cache} + + class LiteRTExporter: """ Exporter for the LiteRT (TFLite) format. @@ -19,18 +85,8 @@ def __init__(self, model, input_signature=None, verbose=None, max_sequence_lengt def export(self, filepath): if self.verbose: print("Starting LiteRT export...") - - # Step 1: Get input signature, applying bounded shapes for sequence models. - if self.input_signature is None: - input_signature = get_input_signature(self.model, self.max_sequence_length) - else: - input_signature = self.input_signature - - # Step 2: Convert to TensorFlow TensorSpecs and create a concrete function. - concrete_fn = self._get_concrete_function(input_signature) - # Step 3: Convert the concrete function to a TFLite model. - tflite_model = self._convert_to_tflite(concrete_fn) + tflite_model = self._convert_to_tflite() if self.verbose: print(f"LiteRT model converted successfully. Size: {len(tflite_model)} bytes") @@ -45,10 +101,92 @@ def export(self, filepath): if self.verbose: print(f"Exported model to {filepath}") + def _convert_to_tflite(self): + """ + Converts a Keras model to TFLite, automatically selecting the best + conversion path based on the model's architecture. + """ + # Use duck-typing to check for a Keras-Hub style CausalLM model + # that supports efficient, cached generation. + is_generative = hasattr(self.model, "call_with_cache") and hasattr( + self.model.backbone, "num_key_value_heads" + ) + + if is_generative: + if self.verbose: + print( + "Generative CausalLM model detected. Exporting with " + "'initialize' and 'decode' signatures for efficient generation." + ) + return self._convert_generative_model() + else: + if self.verbose: + print( + "Standard model detected. Using direct conversion path " + "with a single 'serving_default' signature." + ) + # Fallback to the standard conversion for non-generative models. + if self.input_signature is None: + self.input_signature = get_input_signature( + self.model, self.max_sequence_length + ) + concrete_fn = self._get_concrete_function(self.input_signature) + return self._convert_direct(concrete_fn) + + def _convert_generative_model(self): + """ + Exports a CausalLM model via SavedModel with two distinct signatures + for 'initialize' (prompt processing) and 'decode' (token generation). + """ + module = GenerationModule(self.model, self.max_sequence_length) + backbone = self.model.backbone + + # 1. Define the TensorSpec for the 'initialize' signature. + init_signature = ( + tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="token_ids"), + tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="padding_mask"), + ) + + # 2. Define the TensorSpec for the 'decode' signature. + cache_shape = [ + None, # batch_size + backbone.num_layers, + 2, # key and value + self.max_sequence_length, + backbone.num_key_value_heads, + backbone.head_dim, + ] + decode_signature = ( + tf.TensorSpec(shape=[None, 1], dtype=tf.int32, name="token_ids"), + tf.TensorSpec(shape=cache_shape, dtype=self.model.compute_dtype, name="cache"), + tf.TensorSpec(shape=[], dtype=tf.int32, name="cache_update_index"), + ) + + # 3. Get concrete functions and bundle them as signatures. + signatures = { + "initialize": module.initialize.get_concrete_function(*init_signature), + "decode": module.decode.get_concrete_function(*decode_signature), + } + + # 4. Save as a SavedModel and then convert to TFLite. + with tempfile.TemporaryDirectory() as tmpdir: + if self.verbose: + print(f"Saving temporary SavedModel to {tmpdir}") + tf.saved_model.save(module, tmpdir, signatures=signatures) + + converter = tf.lite.TFLiteConverter.from_saved_model(tmpdir) + converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS, + tf.lite.OpsSet.SELECT_TF_OPS, + ] + # This is crucial for including both functions in the TFLite model. + converter.signature_keys = ["initialize", "decode"] + tflite_model = converter.convert() + + return tflite_model + def _get_concrete_function(self, input_signature): - """Create a tf.function and get its concrete function.""" - - # Create a wrapper function that handles different input structures. + """Create a tf.function and get its concrete function (for non-generative models).""" if isinstance(input_signature, dict): tf_signature = [make_tf_tensor_spec(spec) for spec in input_signature.values()] input_keys = list(input_signature.keys()) @@ -74,163 +212,13 @@ def model_fn(input_tensor): return model_fn.get_concrete_function() - def _convert_to_tflite(self, concrete_fn): - """ - Converts a concrete function to TFLite, using the most appropriate - conversion path based on the model type. - """ - # For complex models (like Keras-Hub transformers), the SavedModel path - # is more robust and correctly handles variable tracking. - if self._is_keras_hub_model(): - if self.verbose: - print("Keras-Hub model detected. Using SavedModel conversion path for robustness.") - return self._convert_via_saved_model(concrete_fn) - - # For standard Keras models, the direct `from_concrete_functions` path is efficient. - else: - if self.verbose: - print("Standard model detected. Using direct conversion path.") - return self._convert_direct(concrete_fn) - def _convert_direct(self, concrete_fn): - """Directly convert a concrete function to TFLite.""" - try: - # Use trackable_obj if available to follow best practices. - if hasattr(self.model, '_get_save_spec'): - converter = tf.lite.TFLiteConverter.from_concrete_functions( - [concrete_fn], trackable_obj=self.model - ) - else: - # This path is deprecated but serves as a fallback. - converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_fn]) - - self._apply_common_converter_settings(converter) - return converter.convert() - except Exception as e: - raise IOError(f"Direct TFLite conversion failed. Error: {e}") - - def _convert_via_saved_model(self, concrete_fn): - """ - A more robust conversion path that first creates a temporary SavedModel. - This is better for complex models with intricate variable tracking. - """ - with tempfile.TemporaryDirectory() as temp_dir: - saved_model_path = os.path.join(temp_dir, "temp_saved_model") - - # Check if we need to use a clean trackable object to avoid _DictWrapper issues - trackable_obj = self._create_clean_trackable_object_if_needed() - - # Saving the model with the concrete function as a signature is more - # reliable as it ensures all trackable assets of the model are found. - tf.saved_model.save( - trackable_obj, saved_model_path, signatures=concrete_fn - ) - - converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) - - # Keras-Hub models often require these settings. - self._apply_common_converter_settings(converter, is_complex=True) - return converter.convert() - - def _create_clean_trackable_object_if_needed(self): - """ - Create a clean trackable object if the model contains _DictWrapper objects - that cause issues during TensorFlow's introspection. - """ - # Check if the model has _DictWrapper objects in its trackable children - has_dict_wrapper = self._model_has_dict_wrapper_issues() - - if not has_dict_wrapper: - return self.model - - # Create a clean trackable object to avoid _DictWrapper issues - trackable_obj = tf.__internal__.tracking.AutoTrackable() - - # Copy essential variables from the model - if hasattr(self.model, 'variables'): - trackable_obj.variables = list(self.model.variables) - if hasattr(self.model, 'trainable_variables'): - trackable_obj.trainable_variables = list(self.model.trainable_variables) - if hasattr(self.model, 'non_trainable_variables'): - trackable_obj.non_trainable_variables = list(self.model.non_trainable_variables) - - return trackable_obj - - def _model_has_dict_wrapper_issues(self): - """ - Check if the model contains _DictWrapper objects that cause introspection issues. - """ - # Import _DictWrapper safely - try: - from tensorflow.python.trackable.data_structures import _DictWrapper - except ImportError: - return False - - # Check model's direct attributes for _DictWrapper objects - for attr_name in dir(self.model): - if attr_name.startswith('_'): - continue - try: - attr_value = getattr(self.model, attr_name, None) - if isinstance(attr_value, _DictWrapper): - return True - except (AttributeError, TypeError): - continue - - # Check if model class name suggests complex structures - model_class_name = self.model.__class__.__name__.lower() - if any(indicator in model_class_name for indicator in ['backbone', 'causal_lm', 'gemma', 'llama', 'bert']): - return True - - return False - - def _apply_common_converter_settings(self, converter, is_complex=False): - """Applies shared TFLite converter settings.""" - converter.allow_custom_ops = self.kwargs.get("allow_custom_ops", is_complex) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - if self.kwargs.get("enable_select_tf_ops", is_complex): - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - if "optimizations" in self.kwargs: - converter.optimizations = self.kwargs["optimizations"] - - # For large models, enable memory optimization to prevent overflow - if is_complex and self._is_large_vocabulary_model(): - # Enable optimizations that reduce intermediate tensor sizes - if not converter.optimizations: - converter.optimizations = [tf.lite.Optimize.DEFAULT] - # Use representative dataset for better quantization if available - if "representative_dataset" in self.kwargs: - converter.representative_dataset = self.kwargs["representative_dataset"] - - def _is_large_vocabulary_model(self): - """Check if this is a large vocabulary model that might cause overflow.""" - model_class_name = self.model.__class__.__name__.lower() - model_module = getattr(self.model.__class__, '__module__', '').lower() - - # Models known to have large vocabularies - large_vocab_indicators = ['gemma', 'llama', 'palm', 'gpt', 'bert'] - if any(indicator in model_class_name for indicator in large_vocab_indicators): - return True - if 'keras_hub' in model_module: - return True - - return False - - def _is_keras_hub_model(self): - """ - Heuristically checks if the model is a complex model from Keras-Hub - that benefits from the SavedModel conversion path. - """ - model_module = getattr(self.model.__class__, '__module__', '').lower() - if 'keras_hub' in model_module: - return True - - # Fallback check for models that might not be in the keras_hub module - # but follow similar patterns (e.g., custom backbones). - model_class_name = self.model.__class__.__name__.lower() - complex_indicators = ['backbone', 'causal_lm', 'gemma', 'llama', 'bert'] - if any(indicator in model_class_name for indicator in complex_indicators): - return True - - return False \ No newline at end of file + """Directly convert a concrete function to TFLite (for non-generative models).""" + converter = tf.lite.TFLiteConverter.from_concrete_functions( + [concrete_fn], self.model + ) + converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS, + tf.lite.OpsSet.SELECT_TF_OPS, + ] + return converter.convert() \ No newline at end of file From 0fe4bd51e39bbc77d900489269501048c7da0e62 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 2 Sep 2025 10:22:55 +0530 Subject: [PATCH 039/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 7d8a83e6fc36..9e18acb4761c 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -133,6 +133,16 @@ def _convert_to_tflite(self): concrete_fn = self._get_concrete_function(self.input_signature) return self._convert_direct(concrete_fn) + def _get_attribute(self, obj, attribute_names): + """Safely get an attribute from a list of possible names.""" + for name in attribute_names: + if hasattr(obj, name): + return getattr(obj, name) + raise AttributeError( + f"Could not find any of the following attributes on object " + f"'{obj.__class__.__name__}': {', '.join(attribute_names)}" + ) + def _convert_generative_model(self): """ Exports a CausalLM model via SavedModel with two distinct signatures @@ -141,6 +151,13 @@ def _convert_generative_model(self): module = GenerationModule(self.model, self.max_sequence_length) backbone = self.model.backbone + # Use the helper to generically get attributes + num_layers = self._get_attribute(backbone, ["num_layers"]) + num_key_value_heads = self._get_attribute( + backbone, ["num_key_value_heads", "num_attention_heads"] + ) + head_dim = self._get_attribute(backbone, ["head_dim", "key_dim"]) + # 1. Define the TensorSpec for the 'initialize' signature. init_signature = ( tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="token_ids"), @@ -150,11 +167,11 @@ def _convert_generative_model(self): # 2. Define the TensorSpec for the 'decode' signature. cache_shape = [ None, # batch_size - backbone.num_layers, + num_layers, 2, # key and value self.max_sequence_length, - backbone.num_key_value_heads, - backbone.head_dim, + num_key_value_heads, + head_dim, ] decode_signature = ( tf.TensorSpec(shape=[None, 1], dtype=tf.int32, name="token_ids"), From 8c3faa3bda666d4372ba95ca74999026fa0c0e09 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 2 Sep 2025 10:38:02 +0530 Subject: [PATCH 040/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 252 ++++++--------------------- 1 file changed, 56 insertions(+), 196 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 9e18acb4761c..fe80e8f0104b 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -1,80 +1,14 @@ import tensorflow as tf from keras.src.export.export_utils import get_input_signature, make_tf_tensor_spec -from keras import tree import tempfile import os -class GenerationModule(tf.Module): - """ - A tf.Module to wrap a CausalLM for exporting to TFLite with separate - prompt processing and token generation functions. - """ - def __init__(self, model, max_sequence_length): - super().__init__() - self.model = model - self.max_sequence_length = max_sequence_length - - @tf.function - def initialize(self, token_ids, padding_mask): - """ - Initializes the key/value cache by processing the input prompt. - - This function creates an empty cache and then "seeds" it by running a - forward pass on the prompt. This is equivalent to the `_build_cache` - logic in KerasNLP's CausalLM models. - - Args: - token_ids: A tf.Tensor of shape [batch_size, seq_len]. - padding_mask: A tf.Tensor of shape [batch_size, seq_len]. - - Returns: - A dictionary containing the `initial_cache`. - """ - backbone = self.model.backbone - batch_size = tf.shape(token_ids)[0] - cache_shape = [ - batch_size, - backbone.num_layers, - 2, # For key and value - self.max_sequence_length, - backbone.num_key_value_heads, - backbone.head_dim, - ] - # Create an empty cache with a static max_sequence_length. - cache = tf.zeros(cache_shape, dtype=self.model.compute_dtype) - - # Seed the cache by calling call_with_cache on the prompt. - # The cache_update_index is 0, and the tokens are the whole prompt. - # We only need the resulting cache. - _, _, seeded_cache = self.model.call_with_cache( - token_ids, cache, cache_update_index=0 - ) - return {"initial_cache": seeded_cache} - - @tf.function - def decode(self, token_ids, cache, cache_update_index): - """ - Performs one decoding step to generate the next token. - - Args: - token_ids: The current token, shape [batch_size, 1]. - cache: The key/value cache from the previous step. - cache_update_index: The index at which to update the cache. - - Returns: - A dictionary containing the `logits` and the `updated_cache`. - """ - logits, _, new_cache = self.model.call_with_cache( - token_ids, cache, cache_update_index - ) - return {"logits": logits, "updated_cache": new_cache} - - class LiteRTExporter: """ - Exporter for the LiteRT (TFLite) format. + Exporter for the LiteRT (TFLite) format that creates a single, + callable signature for `model.call`. """ - + def __init__(self, model, input_signature=None, verbose=None, max_sequence_length=512, **kwargs): self.model = model self.input_signature = input_signature @@ -83,17 +17,29 @@ def __init__(self, model, input_signature=None, verbose=None, max_sequence_lengt self.kwargs = kwargs def export(self, filepath): + """Exports the Keras model to a TFLite file.""" if self.verbose: print("Starting LiteRT export...") + + # 1. Get the input signature with a bounded sequence length. + # This is the critical step to prevent memory overflow. + if self.input_signature is None: + if self.verbose: + print(f"Inferring input signature with max_sequence_length={self.max_sequence_length}.") + self.input_signature = get_input_signature(self.model, self.max_sequence_length) - tflite_model = self._convert_to_tflite() + # 2. Create a single concrete function from the model's call method. + concrete_fn = self._get_concrete_function(self.input_signature) + + # 3. Convert the concrete function to a TFLite model. + tflite_model = self._convert_to_tflite(concrete_fn) if self.verbose: print(f"LiteRT model converted successfully. Size: {len(tflite_model)} bytes") - # Step 4: Save the model to the specified file path. + # 4. Save the model to the specified file path. if not filepath.endswith('.tflite'): - filepath = filepath + '.tflite' + filepath += '.tflite' with open(filepath, "wb") as f: f.write(tflite_model) @@ -101,141 +47,55 @@ def export(self, filepath): if self.verbose: print(f"Exported model to {filepath}") - def _convert_to_tflite(self): - """ - Converts a Keras model to TFLite, automatically selecting the best - conversion path based on the model's architecture. - """ - # Use duck-typing to check for a Keras-Hub style CausalLM model - # that supports efficient, cached generation. - is_generative = hasattr(self.model, "call_with_cache") and hasattr( - self.model.backbone, "num_key_value_heads" - ) - - if is_generative: - if self.verbose: - print( - "Generative CausalLM model detected. Exporting with " - "'initialize' and 'decode' signatures for efficient generation." - ) - return self._convert_generative_model() - else: - if self.verbose: - print( - "Standard model detected. Using direct conversion path " - "with a single 'serving_default' signature." - ) - # Fallback to the standard conversion for non-generative models. - if self.input_signature is None: - self.input_signature = get_input_signature( - self.model, self.max_sequence_length - ) - concrete_fn = self._get_concrete_function(self.input_signature) - return self._convert_direct(concrete_fn) - - def _get_attribute(self, obj, attribute_names): - """Safely get an attribute from a list of possible names.""" - for name in attribute_names: - if hasattr(obj, name): - return getattr(obj, name) - raise AttributeError( - f"Could not find any of the following attributes on object " - f"'{obj.__class__.__name__}': {', '.join(attribute_names)}" - ) - - def _convert_generative_model(self): - """ - Exports a CausalLM model via SavedModel with two distinct signatures - for 'initialize' (prompt processing) and 'decode' (token generation). - """ - module = GenerationModule(self.model, self.max_sequence_length) - backbone = self.model.backbone - - # Use the helper to generically get attributes - num_layers = self._get_attribute(backbone, ["num_layers"]) - num_key_value_heads = self._get_attribute( - backbone, ["num_key_value_heads", "num_attention_heads"] - ) - head_dim = self._get_attribute(backbone, ["head_dim", "key_dim"]) - - # 1. Define the TensorSpec for the 'initialize' signature. - init_signature = ( - tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="token_ids"), - tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="padding_mask"), - ) - - # 2. Define the TensorSpec for the 'decode' signature. - cache_shape = [ - None, # batch_size - num_layers, - 2, # key and value - self.max_sequence_length, - num_key_value_heads, - head_dim, - ] - decode_signature = ( - tf.TensorSpec(shape=[None, 1], dtype=tf.int32, name="token_ids"), - tf.TensorSpec(shape=cache_shape, dtype=self.model.compute_dtype, name="cache"), - tf.TensorSpec(shape=[], dtype=tf.int32, name="cache_update_index"), - ) - - # 3. Get concrete functions and bundle them as signatures. - signatures = { - "initialize": module.initialize.get_concrete_function(*init_signature), - "decode": module.decode.get_concrete_function(*decode_signature), - } - - # 4. Save as a SavedModel and then convert to TFLite. - with tempfile.TemporaryDirectory() as tmpdir: - if self.verbose: - print(f"Saving temporary SavedModel to {tmpdir}") - tf.saved_model.save(module, tmpdir, signatures=signatures) - - converter = tf.lite.TFLiteConverter.from_saved_model(tmpdir) - converter.target_spec.supported_ops = [ - tf.lite.OpsSet.TFLITE_BUILTINS, - tf.lite.OpsSet.SELECT_TF_OPS, - ] - # This is crucial for including both functions in the TFLite model. - converter.signature_keys = ["initialize", "decode"] - tflite_model = converter.convert() - - return tflite_model - def _get_concrete_function(self, input_signature): - """Create a tf.function and get its concrete function (for non-generative models).""" + """Creates a tf.function from the model's call method and gets its concrete function.""" if isinstance(input_signature, dict): - tf_signature = [make_tf_tensor_spec(spec) for spec in input_signature.values()] - input_keys = list(input_signature.keys()) + tf_signature = {k: make_tf_tensor_spec(v) for k, v in input_signature.items()} - @tf.function(input_signature=tf_signature) - def model_fn(*inputs): - input_dict = {key: tensor for key, tensor in zip(input_keys, inputs)} - return self.model(input_dict) + @tf.function + def model_fn(inputs): + return self.model(inputs) + + return model_fn.get_concrete_function(tf_signature) elif isinstance(input_signature, list): tf_signature = [make_tf_tensor_spec(spec) for spec in input_signature] - @tf.function(input_signature=tf_signature) + @tf.function def model_fn(*inputs): return self.model(list(inputs)) + + return model_fn.get_concrete_function(*tf_signature) - else: # Assumes a single tensor or spec - tf_signature = [make_tf_tensor_spec(input_signature)] + else: # Assumes a single tensor + tf_signature = make_tf_tensor_spec(input_signature) - @tf.function(input_signature=tf_signature) + @tf.function def model_fn(input_tensor): return self.model(input_tensor) - - return model_fn.get_concrete_function() - def _convert_direct(self, concrete_fn): - """Directly convert a concrete function to TFLite (for non-generative models).""" - converter = tf.lite.TFLiteConverter.from_concrete_functions( - [concrete_fn], self.model - ) - converter.target_spec.supported_ops = [ - tf.lite.OpsSet.TFLITE_BUILTINS, - tf.lite.OpsSet.SELECT_TF_OPS, - ] - return converter.convert() \ No newline at end of file + return model_fn.get_concrete_function(tf_signature) + + def _convert_to_tflite(self, concrete_fn): + """Converts a concrete function to TFLite via the SavedModel path for robustness.""" + if self.verbose: + print("Using SavedModel conversion path for robustness.") + + with tempfile.TemporaryDirectory() as temp_dir: + tf.saved_model.save( + self.model, temp_dir, signatures={"serving_default": concrete_fn} + ) + + converter = tf.lite.TFLiteConverter.from_saved_model(temp_dir) + + # Apply necessary settings for complex models + converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS, + tf.lite.OpsSet.SELECT_TF_OPS, + ] + converter.allow_custom_ops = True + + if "optimizations" in self.kwargs: + converter.optimizations = self.kwargs["optimizations"] + + return converter.convert() \ No newline at end of file From 88b6a6f1a7d2245bd1c69d61cdd6c74ff1a5820a Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 2 Sep 2025 14:24:21 +0530 Subject: [PATCH 041/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index fe80e8f0104b..478a1925aa1e 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -82,8 +82,12 @@ def _convert_to_tflite(self, concrete_fn): print("Using SavedModel conversion path for robustness.") with tempfile.TemporaryDirectory() as temp_dir: + # --- FIX: Save a bare tf.Module() instead of the full Keras model. --- + # The concrete_fn already captures all necessary variables. Saving a + # simple module avoids the `_DictWrapper` serialization error. + module = tf.Module() tf.saved_model.save( - self.model, temp_dir, signatures={"serving_default": concrete_fn} + module, temp_dir, signatures={"serving_default": concrete_fn} ) converter = tf.lite.TFLiteConverter.from_saved_model(temp_dir) From da13d04b595604515f18e6c757be8f921138083d Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 3 Sep 2025 13:42:01 +0530 Subject: [PATCH 042/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 69 +++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 478a1925aa1e..bbc0f5f541aa 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -76,20 +76,81 @@ def model_fn(input_tensor): return model_fn.get_concrete_function(tf_signature) + def _get_all_variables(self, obj): + """Recursively collect all variables from the object and its submodules.""" + variables = [] + + # Get direct variables + if hasattr(obj, 'variables'): + variables.extend(obj.variables) + + # Recursively get variables from submodules + if hasattr(obj, '_layers'): + for layer in obj._layers: + variables.extend(self._get_all_variables(layer)) + + # Handle other common attributes that might contain submodules + for attr_name in dir(obj): + if attr_name.startswith('_'): + continue + try: + attr = getattr(obj, attr_name) + if hasattr(attr, 'variables') and attr is not obj: + variables.extend(self._get_all_variables(attr)) + except (AttributeError, TypeError): + continue + + # Remove duplicates while preserving order + seen = set() + unique_variables = [] + for var in variables: + if var not in seen: + seen.add(var) + unique_variables.append(var) + + return unique_variables + def _convert_to_tflite(self, concrete_fn): """Converts a concrete function to TFLite via the SavedModel path for robustness.""" if self.verbose: print("Using SavedModel conversion path for robustness.") with tempfile.TemporaryDirectory() as temp_dir: + # --- DEBUG: Check variables before saving --- + if self.verbose: + all_vars = self._get_all_variables(self.model) + num_vars = len(all_vars) + total_size = sum(var.numpy().nbytes for var in all_vars) + print(f"DEBUG: Model has {num_vars} variables (recursive), total size: {total_size / (1024**3):.2f} GB") + # --- FIX: Save a bare tf.Module() instead of the full Keras model. --- # The concrete_fn already captures all necessary variables. Saving a # simple module avoids the `_DictWrapper` serialization error. module = tf.Module() + + # Get all variables recursively + all_vars = self._get_all_variables(self.model) + + # Assign each variable as an attribute to make it trackable + for i, var in enumerate(all_vars): + setattr(module, f'var_{i}', var) + + if self.verbose: + print(f"DEBUG: Assigned {len(all_vars)} variables to module.") + tf.saved_model.save( module, temp_dir, signatures={"serving_default": concrete_fn} ) + # --- DEBUG: Check SavedModel size --- + if self.verbose: + saved_model_size = sum( + os.path.getsize(os.path.join(dirpath, filename)) + for dirpath, _, filenames in os.walk(temp_dir) + for filename in filenames + ) + print(f"DEBUG: SavedModel size: {saved_model_size / (1024**3):.2f} GB") + converter = tf.lite.TFLiteConverter.from_saved_model(temp_dir) # Apply necessary settings for complex models @@ -102,4 +163,10 @@ def _convert_to_tflite(self, concrete_fn): if "optimizations" in self.kwargs: converter.optimizations = self.kwargs["optimizations"] - return converter.convert() \ No newline at end of file + tflite_model = converter.convert() + + # --- DEBUG: Check TFLite size --- + if self.verbose: + print(f"DEBUG: TFLite model size: {len(tflite_model) / (1024**3):.2f} GB") + + return tflite_model \ No newline at end of file From f1f700c0d06eb3e72dfd8adff1a89e9c1c00ae4c Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 8 Sep 2025 14:08:48 +0530 Subject: [PATCH 043/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 486 +++++++++++++++++++++------ 1 file changed, 377 insertions(+), 109 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index bbc0f5f541aa..54d5b0a7083d 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -2,6 +2,8 @@ from keras.src.export.export_utils import get_input_signature, make_tf_tensor_spec import tempfile import os +import numpy as np + class LiteRTExporter: """ @@ -21,21 +23,21 @@ def export(self, filepath): if self.verbose: print("Starting LiteRT export...") - # 1. Get the input signature with a bounded sequence length. - # This is the critical step to prevent memory overflow. + # 1. Ensure the model is built by calling it if necessary + self._ensure_model_built() + + # 2. Resolve / infer input signature with bounded sequence length. if self.input_signature is None: if self.verbose: print(f"Inferring input signature with max_sequence_length={self.max_sequence_length}.") self.input_signature = get_input_signature(self.model, self.max_sequence_length) - # 2. Create a single concrete function from the model's call method. - concrete_fn = self._get_concrete_function(self.input_signature) - - # 3. Convert the concrete function to a TFLite model. - tflite_model = self._convert_to_tflite(concrete_fn) + # 3. Convert the model to TFLite. + tflite_model = self._convert_to_tflite(self.input_signature) if self.verbose: - print(f"LiteRT model converted successfully. Size: {len(tflite_model)} bytes") + final_size_mb = len(tflite_model) / (1024*1024) + print(f"LiteRT model converted successfully. Size: {final_size_mb:.2f} MB") # 4. Save the model to the specified file path. if not filepath.endswith('.tflite'): @@ -47,126 +49,392 @@ def export(self, filepath): if self.verbose: print(f"Exported model to {filepath}") - def _get_concrete_function(self, input_signature): - """Creates a tf.function from the model's call method and gets its concrete function.""" - if isinstance(input_signature, dict): - tf_signature = {k: make_tf_tensor_spec(v) for k, v in input_signature.items()} - - @tf.function - def model_fn(inputs): - return self.model(inputs) + def _ensure_model_built(self): + """Ensure the model is built by calling it with dummy data if necessary.""" + if not self.model.built: + if self.verbose: + print("Model not built, building with dummy data...") - return model_fn.get_concrete_function(tf_signature) + # For Sequential models, we need to build them by calling them + if hasattr(self.model, '_is_graph_network') and not self.model._is_graph_network: + # This is a Sequential model + self._build_sequential_model() + else: + # This is a Functional model + self._build_functional_model() + else: + # Model is already built, but let's make sure it has outputs + if not hasattr(self.model, 'outputs') or not self.model.outputs: + if self.verbose: + print("Model built but no outputs found, rebuilding...") + # For Sequential models, we need to build them by calling them + if hasattr(self.model, '_is_graph_network') and not self.model._is_graph_network: + # This is a Sequential model + self._build_sequential_model() + else: + # This is a Functional model + self._build_functional_model() - elif isinstance(input_signature, list): - tf_signature = [make_tf_tensor_spec(spec) for spec in input_signature] - - @tf.function - def model_fn(*inputs): - return self.model(list(inputs)) + # Always make a prediction call with random inputs to ensure model is fully built + self._make_prediction_call() - return model_fn.get_concrete_function(*tf_signature) - - else: # Assumes a single tensor - tf_signature = make_tf_tensor_spec(input_signature) + def _make_prediction_call(self): + """Make a prediction call with random inputs to ensure model is fully built.""" + try: + if self.verbose: + print("Making prediction call with random inputs...") - @tf.function - def model_fn(input_tensor): - return self.model(input_tensor) - - return model_fn.get_concrete_function(tf_signature) + # Generate random inputs based on model's input specs + if hasattr(self.model, 'inputs') and self.model.inputs: + # Multi-input or single input functional model + dummy_inputs = [] + for input_layer in self.model.inputs: + input_shape = input_layer.shape + # Replace None (batch dimension) with 1 + shape = [1 if dim is None else dim for dim in input_shape] + dummy_input = np.random.random(shape).astype(np.float32) + dummy_inputs.append(dummy_input) + + if len(dummy_inputs) == 1: + _ = self.model.predict(dummy_inputs[0], verbose=0) + else: + _ = self.model.predict(dummy_inputs, verbose=0) + + if self.verbose: + print(f"Prediction call successful with {len(dummy_inputs)} input(s)") + else: + # Sequential model - try to infer input shape + input_shape = self._infer_sequential_input_shape() + if input_shape: + dummy_input = np.random.random(input_shape).astype(np.float32) + _ = self.model.predict(dummy_input, verbose=0) + if self.verbose: + print(f"Prediction call successful with shape: {input_shape}") + + except Exception as e: + if self.verbose: + print(f"Warning: Prediction call failed: {e}") - def _get_all_variables(self, obj): - """Recursively collect all variables from the object and its submodules.""" - variables = [] - - # Get direct variables - if hasattr(obj, 'variables'): - variables.extend(obj.variables) - - # Recursively get variables from submodules - if hasattr(obj, '_layers'): - for layer in obj._layers: - variables.extend(self._get_all_variables(layer)) - - # Handle other common attributes that might contain submodules - for attr_name in dir(obj): - if attr_name.startswith('_'): - continue - try: - attr = getattr(obj, attr_name) - if hasattr(attr, 'variables') and attr is not obj: - variables.extend(self._get_all_variables(attr)) - except (AttributeError, TypeError): - continue - - # Remove duplicates while preserving order - seen = set() - unique_variables = [] - for var in variables: - if var not in seen: - seen.add(var) - unique_variables.append(var) + def _infer_sequential_input_shape(self): + """Infer input shape for Sequential models.""" + try: + # First, look for Input layer + for layer in self.model.layers: + if hasattr(layer, '__class__') and layer.__class__.__name__ == 'InputLayer': + if hasattr(layer, 'batch_input_shape') and layer.batch_input_shape: + input_shape = layer.batch_input_shape + return (1,) + input_shape[1:] if input_shape[0] is None else input_shape + + # If no Input layer, try to get from first layer + if hasattr(self.model, 'layers') and self.model.layers: + first_layer = self.model.layers[0] + + # Check various ways to get input shape + for attr in ['input_shape', 'batch_input_shape', '_batch_input_shape']: + if hasattr(first_layer, attr): + input_shape = getattr(first_layer, attr) + if input_shape: + return (1,) + input_shape[1:] if input_shape[0] is None else input_shape + + # Fallback based on layer type + if hasattr(first_layer, '__class__'): + class_name = first_layer.__class__.__name__ + if class_name == 'Dense': + if hasattr(first_layer, 'input_dim') and first_layer.input_dim: + return (1, first_layer.input_dim) + else: + return (1, 10) # Default for Dense + elif class_name == 'Conv2D': + return (1, 28, 28, 1) # Default for Conv2D + elif 'LSTM' in class_name or 'GRU' in class_name: + return (1, 20, 50) # Default for RNN + + except Exception as e: + if self.verbose: + print(f"Warning: Could not infer Sequential input shape: {e}") - return unique_variables + return None - def _convert_to_tflite(self, concrete_fn): - """Converts a concrete function to TFLite via the SavedModel path for robustness.""" - if self.verbose: - print("Using SavedModel conversion path for robustness.") - - with tempfile.TemporaryDirectory() as temp_dir: - # --- DEBUG: Check variables before saving --- + def _build_sequential_model(self): + """Build a Sequential model by intelligently inferring input shape from layers.""" + try: + # First, look for Input layer in the model (most reliable) + for layer in self.model.layers: + if hasattr(layer, '__class__') and layer.__class__.__name__ == 'InputLayer': + if hasattr(layer, 'batch_input_shape') and layer.batch_input_shape: + input_shape = layer.batch_input_shape + if input_shape[0] is None: + dummy_shape = (1,) + input_shape[1:] + else: + dummy_shape = input_shape + + dummy_input = tf.ones(dummy_shape, dtype=tf.float32) + _ = self.model(dummy_input) + if self.verbose: + print(f"Sequential model built from InputLayer with shape: {dummy_shape}") + return + + # If no Input layer found, try to get input shape from the first layer + if hasattr(self.model, 'layers') and self.model.layers: + first_layer = self.model.layers[0] + + # Try to get input shape from the first layer + input_shape = None + + # Check various ways to get input shape + if hasattr(first_layer, 'input_shape') and first_layer.input_shape: + input_shape = first_layer.input_shape + elif hasattr(first_layer, 'batch_input_shape') and first_layer.batch_input_shape: + input_shape = first_layer.batch_input_shape + elif hasattr(first_layer, '_batch_input_shape') and first_layer._batch_input_shape: + input_shape = first_layer._batch_input_shape + + # If we have an input shape, use it + if input_shape: + # Create dummy input with batch dimension + if input_shape[0] is None: # Batch dimension is None + dummy_shape = (1,) + input_shape[1:] + else: + dummy_shape = input_shape + + dummy_input = tf.ones(dummy_shape, dtype=tf.float32) + _ = self.model(dummy_input) + if self.verbose: + print(f"Sequential model built with shape: {dummy_shape}") + return + + # If no explicit input shape, try to infer from layer configuration + if hasattr(first_layer, 'units') and hasattr(first_layer, '__class__'): + # Dense layer - need to know input dimension + if first_layer.__class__.__name__ == 'Dense': + # For Dense layers, we need to know the input dimension + # Try to infer from layer configuration or use a reasonable default + if hasattr(first_layer, 'input_dim') and first_layer.input_dim: + dummy_shape = (1, first_layer.input_dim) + else: + # Use a reasonable default for Dense layers + dummy_shape = (1, 10) # Common for simple models + + dummy_input = tf.ones(dummy_shape, dtype=tf.float32) + _ = self.model(dummy_input) + if self.verbose: + print(f"Sequential model (Dense) built with shape: {dummy_shape}") + return + + elif hasattr(first_layer, 'filters') and hasattr(first_layer, 'kernel_size'): + # Conv2D layer - need image dimensions + if first_layer.__class__.__name__ == 'Conv2D': + # For Conv2D, we need (height, width, channels) + # Use a reasonable default for image models + dummy_shape = (1, 28, 28, 1) # MNIST-like + + dummy_input = tf.ones(dummy_shape, dtype=tf.float32) + _ = self.model(dummy_input) + if self.verbose: + print(f"Sequential model (Conv2D) built with shape: {dummy_shape}") + return + + elif hasattr(first_layer, 'units') and hasattr(first_layer, 'return_sequences'): + # RNN layer - need sequence dimensions + if 'LSTM' in first_layer.__class__.__name__ or 'GRU' in first_layer.__class__.__name__: + # For RNN layers, we need (sequence_length, features) + # Use reasonable defaults for sequence models + dummy_shape = (1, 20, 50) # Common for sequence models + + dummy_input = tf.ones(dummy_shape, dtype=tf.float32) + _ = self.model(dummy_input) + if self.verbose: + print(f"Sequential model (RNN) built with shape: {dummy_shape}") + return + + except Exception as e: if self.verbose: - all_vars = self._get_all_variables(self.model) - num_vars = len(all_vars) - total_size = sum(var.numpy().nbytes for var in all_vars) - print(f"DEBUG: Model has {num_vars} variables (recursive), total size: {total_size / (1024**3):.2f} GB") + print(f"Warning: Could not build Sequential model: {e}") + + def _build_functional_model(self): + """Build a Functional model using its input specifications.""" + try: + # Create dummy inputs based on input specs + dummy_inputs = [] + for input_spec in self.model.input_spec or []: + if hasattr(input_spec, 'shape') and input_spec.shape: + # Create dummy data with the expected shape + shape = [1] + list(input_spec.shape[1:]) # Add batch dimension + dummy_data = tf.ones(shape, dtype=input_spec.dtype or tf.float32) + dummy_inputs.append(dummy_data) - # --- FIX: Save a bare tf.Module() instead of the full Keras model. --- - # The concrete_fn already captures all necessary variables. Saving a - # simple module avoids the `_DictWrapper` serialization error. - module = tf.Module() + # If we couldn't get specs, try to infer from layers + if not dummy_inputs and hasattr(self.model, 'layers') and self.model.layers: + first_layer = self.model.layers[0] + if hasattr(first_layer, 'input_spec') and first_layer.input_spec: + for spec in first_layer.input_spec: + if hasattr(spec, 'shape') and spec.shape: + shape = [1] + list(spec.shape[1:]) + dummy_data = tf.ones(shape, dtype=spec.dtype or tf.float32) + dummy_inputs.append(dummy_data) - # Get all variables recursively - all_vars = self._get_all_variables(self.model) + # Build the model + if dummy_inputs: + try: + if len(dummy_inputs) == 1: + _ = self.model(dummy_inputs[0]) + else: + _ = self.model(dummy_inputs) + except Exception as e: + if self.verbose: + print(f"Warning: Could not build functional model: {e}") + + except Exception as e: + if self.verbose: + print(f"Warning: Could not build functional model: {e}") + + def _convert_to_tflite(self, input_signature): + """Converts the Keras model to a TFLite model.""" + # Try direct conversion first (simpler approach) + try: + if self.verbose: + print("Converting Keras model directly to TFLite format...") - # Assign each variable as an attribute to make it trackable - for i, var in enumerate(all_vars): - setattr(module, f'var_{i}', var) + converter = tf.lite.TFLiteConverter.from_keras_model(self.model) + converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS, # Enable TensorFlow Lite ops. + tf.lite.OpsSet.SELECT_TF_OPS, # Enable TensorFlow ops. + ] + # Ensure variables are embedded + converter.experimental_enable_resource_variables = False + tflite_model = converter.convert() if self.verbose: - print(f"DEBUG: Assigned {len(all_vars)} variables to module.") - - tf.saved_model.save( - module, temp_dir, signatures={"serving_default": concrete_fn} - ) + print("Direct conversion successful") + return tflite_model - # --- DEBUG: Check SavedModel size --- + except Exception as direct_error: if self.verbose: - saved_model_size = sum( - os.path.getsize(os.path.join(dirpath, filename)) - for dirpath, _, filenames in os.walk(temp_dir) - for filename in filenames - ) - print(f"DEBUG: SavedModel size: {saved_model_size / (1024**3):.2f} GB") + print(f"Direct conversion failed: {direct_error}") + print("Trying wrapper-based conversion...") - converter = tf.lite.TFLiteConverter.from_saved_model(temp_dir) + # Fallback to wrapper approach + # 1. Wrap the Keras model in our clean tf.Module. + wrapper = _KerasModelWrapper(self.model) + + # 2. Get a concrete function from the wrapper. + # Handle both single and multiple input signatures + if not isinstance(input_signature, (list, tuple)): + input_signature = [input_signature] - # Apply necessary settings for complex models - converter.target_spec.supported_ops = [ - tf.lite.OpsSet.TFLITE_BUILTINS, - tf.lite.OpsSet.SELECT_TF_OPS, - ] - converter.allow_custom_ops = True + # Convert InputSpec objects to TensorSpec objects for get_concrete_function + tensor_specs = [make_tf_tensor_spec(spec) for spec in input_signature] - if "optimizations" in self.kwargs: - converter.optimizations = self.kwargs["optimizations"] + # Create input arguments based on the model's expected signature + input_args = self._create_input_args(tensor_specs) + + concrete_func = wrapper.__call__.get_concrete_function(**input_args) + + # 3. Convert directly from the concrete function to TFLite. + if self.verbose: + print("Converting concrete function to TFLite format...") + # Use the wrapper as trackable_obj to avoid deprecation warning + converter = tf.lite.TFLiteConverter.from_concrete_functions( + [concrete_func], + trackable_obj=wrapper + ) + converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS, # Enable TensorFlow Lite ops. + tf.lite.OpsSet.SELECT_TF_OPS, # Enable TensorFlow ops. + ] + # Ensure variables are embedded + converter.experimental_enable_resource_variables = False tflite_model = converter.convert() + + return tflite_model + + def _create_input_args(self, tensor_specs): + """Create proper input arguments for the model's call signature.""" + # Determine if this is a single-input or multi-input model + num_inputs = len(self.model.inputs) if hasattr(self.model, 'inputs') else 1 + + if num_inputs == 1: + # Single input model - use 'inputs' as the argument name + if len(tensor_specs) == 1: + return {"inputs": tensor_specs[0]} + else: + # Multiple specs for single input (shouldn't happen, but handle gracefully) + return {"inputs": tensor_specs[0]} + else: + # Multi-input model - use the actual input names or create generic names + input_args = {} - # --- DEBUG: Check TFLite size --- - if self.verbose: - print(f"DEBUG: TFLite model size: {len(tflite_model) / (1024**3):.2f} GB") + if hasattr(self.model, 'inputs') and self.model.inputs: + # Use the actual input names from the model + for i, (input_layer, spec) in enumerate(zip(self.model.inputs, tensor_specs)): + input_name = input_layer.name + input_args[input_name] = spec + else: + # Fallback to generic names + for i, spec in enumerate(tensor_specs): + input_args[f"input_{i}"] = spec - return tflite_model \ No newline at end of file + return input_args + + +class _KerasModelWrapper(tf.Module): + """ + A tf.Module wrapper for a Keras model. + + This wrapper is designed to be a clean, serializable interface for TFLite + conversion. It holds the Keras model and exposes a single `__call__` + method that is decorated with `tf.function`. Crucially, it also ensures + all variables from the Keras model are tracked by the SavedModel format, + which is key to including them in the final TFLite model. + """ + + def __init__(self, model): + super().__init__() + # Store the model reference in a way that TensorFlow won't try to track it + # This prevents the _DictWrapper error during SavedModel serialization + object.__setattr__(self, '_model', model) + + # Explicitly track all variables from the Keras model by assigning + # them as individual attributes of this wrapper. This ensures they are + # properly included in the SavedModel and TFLite conversion. + for i, var in enumerate(model.variables): + setattr(self, f'_var_{i}', var) + + @tf.function + def __call__(self, *args, **kwargs): + """The single entry point for the exported model.""" + # Handle both single and multi-input cases + if args and not kwargs: + # Called with positional arguments + if len(args) == 1: + return self._model(args[0]) + else: + return self._model(list(args)) + elif kwargs and not args: + # Called with keyword arguments + if len(kwargs) == 1 and 'inputs' in kwargs: + # Single input case + return self._model(kwargs['inputs']) + else: + # Multi-input case - convert to list/dict format expected by model + if hasattr(self._model, 'inputs') and len(self._model.inputs) > 1: + # Multi-input functional model + input_list = [] + for input_layer in self._model.inputs: + input_name = input_layer.name + if input_name in kwargs: + input_list.append(kwargs[input_name]) + else: + # Try to match by position + keys = list(kwargs.keys()) + idx = len(input_list) + if idx < len(keys): + input_list.append(kwargs[keys[idx]]) + return self._model(input_list) + else: + # Single input model called with named arguments + return self._model(list(kwargs.values())[0]) + else: + # Fallback to original call + return self._model(*args, **kwargs) \ No newline at end of file From 59447805f2cf70678d169f7ddcb4e0314fba2842 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 9 Sep 2025 13:54:19 +0530 Subject: [PATCH 044/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 401 +++++++++------------------ 1 file changed, 138 insertions(+), 263 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 54d5b0a7083d..5b1034b0ff47 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -22,6 +22,7 @@ def export(self, filepath): """Exports the Keras model to a TFLite file.""" if self.verbose: print("Starting LiteRT export...") + print(f"Model: {type(self.model)} - built: {self.model.built}") # 1. Ensure the model is built by calling it if necessary self._ensure_model_built() @@ -50,70 +51,82 @@ def export(self, filepath): print(f"Exported model to {filepath}") def _ensure_model_built(self): - """Ensure the model is built by calling it with dummy data if necessary.""" - if not self.model.built: - if self.verbose: - print("Model not built, building with dummy data...") - - # For Sequential models, we need to build them by calling them - if hasattr(self.model, '_is_graph_network') and not self.model._is_graph_network: - # This is a Sequential model - self._build_sequential_model() - else: - # This is a Functional model - self._build_functional_model() - else: - # Model is already built, but let's make sure it has outputs - if not hasattr(self.model, 'outputs') or not self.model.outputs: - if self.verbose: - print("Model built but no outputs found, rebuilding...") - # For Sequential models, we need to build them by calling them - if hasattr(self.model, '_is_graph_network') and not self.model._is_graph_network: - # This is a Sequential model - self._build_sequential_model() - else: - # This is a Functional model - self._build_functional_model() - - # Always make a prediction call with random inputs to ensure model is fully built - self._make_prediction_call() + """ + Ensures the model is fully traced by performing a forward pass. + + This is critical because `model.built` can be True even if the model + has not been traced with concrete input shapes, which is required for + TFLite conversion. This method guarantees a forward pass happens. + """ + if self.verbose: + print("Ensuring model is traced by performing a forward pass...") - def _make_prediction_call(self): - """Make a prediction call with random inputs to ensure model is fully built.""" try: + # Debug information if self.verbose: - print("Making prediction call with random inputs...") - - # Generate random inputs based on model's input specs + print(f"Model type: {type(self.model)}") + print(f"Model built: {self.model.built}") + if hasattr(self.model, '_functional'): + print(f"Sequential _functional: {self.model._functional}") + + # Generate dummy inputs based on the model's specification + dummy_inputs = [] + # Prioritize `model.inputs` as it's the most reliable source if hasattr(self.model, 'inputs') and self.model.inputs: - # Multi-input or single input functional model - dummy_inputs = [] + if self.verbose: + print(f"Generating inputs from `model.inputs` ({len(self.model.inputs)} input(s)).") for input_layer in self.model.inputs: - input_shape = input_layer.shape - # Replace None (batch dimension) with 1 - shape = [1 if dim is None else dim for dim in input_shape] - dummy_input = np.random.random(shape).astype(np.float32) + shape = [1 if dim is None else dim for dim in input_layer.shape] + dummy_input = tf.zeros(shape, dtype=input_layer.dtype or tf.float32) dummy_inputs.append(dummy_input) - - if len(dummy_inputs) == 1: - _ = self.model.predict(dummy_inputs[0], verbose=0) - else: - _ = self.model.predict(dummy_inputs, verbose=0) - - if self.verbose: - print(f"Prediction call successful with {len(dummy_inputs)} input(s)") + if self.verbose: + print(f" Input shape: {shape}, dtype: {input_layer.dtype or tf.float32}") else: - # Sequential model - try to infer input shape + # Fallback for pure Sequential models without an Input layer + if self.verbose: + print("Model has no `inputs` attribute. Assuming pure Sequential and inferring shape.") input_shape = self._infer_sequential_input_shape() if input_shape: - dummy_input = np.random.random(input_shape).astype(np.float32) - _ = self.model.predict(dummy_input, verbose=0) if self.verbose: - print(f"Prediction call successful with shape: {input_shape}") - + print(f"Inferred input shape for Sequential model: {input_shape}") + dummy_inputs.append(tf.zeros(input_shape, dtype=tf.float32)) + else: + raise ValueError( + "Cannot build Sequential model: unable to infer input shape. " + "Please add an `Input` layer or specify `input_shape` in the first layer." + ) + + # Debug the dummy inputs + if self.verbose: + print(f"About to call model with {len(dummy_inputs)} inputs") + for i, inp in enumerate(dummy_inputs): + print(f" Input {i}: shape={inp.shape}, dtype={inp.dtype}") + + # Perform a direct call in inference mode to trace the model. + # This is more robust than a simple call() and avoids the + # overhead of model.predict(). + if len(dummy_inputs) == 1: + result = self.model(dummy_inputs[0], training=False) + else: + result = self.model(dummy_inputs, training=False) + + if self.verbose: + print("Model successfully traced via direct call with training=False.") + print(f"Output shape: {result.shape if hasattr(result, 'shape') else type(result)}") + except Exception as e: if self.verbose: - print(f"Warning: Prediction call failed: {e}") + print(f"Error during model call: {e}") + import traceback + traceback.print_exc() + raise ValueError(f"Failed to trace model with error: {e}") + + # Final, critical check + if not self.model.built: + raise ValueError( + "Model could not be built even after a direct call. " + "Please check the model's definition and input specification." + ) def _infer_sequential_input_shape(self): """Infer input shape for Sequential models.""" @@ -124,258 +137,120 @@ def _infer_sequential_input_shape(self): if hasattr(layer, 'batch_input_shape') and layer.batch_input_shape: input_shape = layer.batch_input_shape return (1,) + input_shape[1:] if input_shape[0] is None else input_shape - + # If no Input layer, try to get from first layer if hasattr(self.model, 'layers') and self.model.layers: first_layer = self.model.layers[0] - + # Check various ways to get input shape for attr in ['input_shape', 'batch_input_shape', '_batch_input_shape']: if hasattr(first_layer, attr): input_shape = getattr(first_layer, attr) if input_shape: return (1,) + input_shape[1:] if input_shape[0] is None else input_shape - - # Fallback based on layer type + + # Try to infer from layer configuration without hardcoded fallbacks if hasattr(first_layer, '__class__'): class_name = first_layer.__class__.__name__ + if class_name == 'Dense': + # For Dense layers, try to infer from input_dim if hasattr(first_layer, 'input_dim') and first_layer.input_dim: return (1, first_layer.input_dim) - else: - return (1, 10) # Default for Dense - elif class_name == 'Conv2D': - return (1, 28, 28, 1) # Default for Conv2D - elif 'LSTM' in class_name or 'GRU' in class_name: - return (1, 20, 50) # Default for RNN - - except Exception as e: - if self.verbose: - print(f"Warning: Could not infer Sequential input shape: {e}") - - return None - def _build_sequential_model(self): - """Build a Sequential model by intelligently inferring input shape from layers.""" - try: - # First, look for Input layer in the model (most reliable) - for layer in self.model.layers: - if hasattr(layer, '__class__') and layer.__class__.__name__ == 'InputLayer': - if hasattr(layer, 'batch_input_shape') and layer.batch_input_shape: - input_shape = layer.batch_input_shape - if input_shape[0] is None: - dummy_shape = (1,) + input_shape[1:] - else: - dummy_shape = input_shape - - dummy_input = tf.ones(dummy_shape, dtype=tf.float32) - _ = self.model(dummy_input) - if self.verbose: - print(f"Sequential model built from InputLayer with shape: {dummy_shape}") - return - - # If no Input layer found, try to get input shape from the first layer - if hasattr(self.model, 'layers') and self.model.layers: - first_layer = self.model.layers[0] - - # Try to get input shape from the first layer - input_shape = None - - # Check various ways to get input shape - if hasattr(first_layer, 'input_shape') and first_layer.input_shape: - input_shape = first_layer.input_shape - elif hasattr(first_layer, 'batch_input_shape') and first_layer.batch_input_shape: - input_shape = first_layer.batch_input_shape - elif hasattr(first_layer, '_batch_input_shape') and first_layer._batch_input_shape: - input_shape = first_layer._batch_input_shape - - # If we have an input shape, use it - if input_shape: - # Create dummy input with batch dimension - if input_shape[0] is None: # Batch dimension is None - dummy_shape = (1,) + input_shape[1:] - else: - dummy_shape = input_shape - - dummy_input = tf.ones(dummy_shape, dtype=tf.float32) - _ = self.model(dummy_input) - if self.verbose: - print(f"Sequential model built with shape: {dummy_shape}") - return - - # If no explicit input shape, try to infer from layer configuration - if hasattr(first_layer, 'units') and hasattr(first_layer, '__class__'): - # Dense layer - need to know input dimension - if first_layer.__class__.__name__ == 'Dense': - # For Dense layers, we need to know the input dimension - # Try to infer from layer configuration or use a reasonable default - if hasattr(first_layer, 'input_dim') and first_layer.input_dim: - dummy_shape = (1, first_layer.input_dim) - else: - # Use a reasonable default for Dense layers - dummy_shape = (1, 10) # Common for simple models - - dummy_input = tf.ones(dummy_shape, dtype=tf.float32) - _ = self.model(dummy_input) - if self.verbose: - print(f"Sequential model (Dense) built with shape: {dummy_shape}") - return - - elif hasattr(first_layer, 'filters') and hasattr(first_layer, 'kernel_size'): - # Conv2D layer - need image dimensions - if first_layer.__class__.__name__ == 'Conv2D': - # For Conv2D, we need (height, width, channels) - # Use a reasonable default for image models - dummy_shape = (1, 28, 28, 1) # MNIST-like - - dummy_input = tf.ones(dummy_shape, dtype=tf.float32) - _ = self.model(dummy_input) - if self.verbose: - print(f"Sequential model (Conv2D) built with shape: {dummy_shape}") - return - - elif hasattr(first_layer, 'units') and hasattr(first_layer, 'return_sequences'): - # RNN layer - need sequence dimensions - if 'LSTM' in first_layer.__class__.__name__ or 'GRU' in first_layer.__class__.__name__: - # For RNN layers, we need (sequence_length, features) - # Use reasonable defaults for sequence models - dummy_shape = (1, 20, 50) # Common for sequence models - - dummy_input = tf.ones(dummy_shape, dtype=tf.float32) - _ = self.model(dummy_input) - if self.verbose: - print(f"Sequential model (RNN) built with shape: {dummy_shape}") - return - - except Exception as e: - if self.verbose: - print(f"Warning: Could not build Sequential model: {e}") + elif class_name == 'Dropout': + # For Dropout, look at the next layer to infer shape + if len(self.model.layers) > 1: + next_layer = self.model.layers[1] + if hasattr(next_layer, '__class__'): + next_class = next_layer.__class__.__name__ + if next_class == 'Dense': + if hasattr(next_layer, 'input_dim') and next_layer.input_dim: + return (1, next_layer.input_dim) - def _build_functional_model(self): - """Build a Functional model using its input specifications.""" - try: - # Create dummy inputs based on input specs - dummy_inputs = [] - for input_spec in self.model.input_spec or []: - if hasattr(input_spec, 'shape') and input_spec.shape: - # Create dummy data with the expected shape - shape = [1] + list(input_spec.shape[1:]) # Add batch dimension - dummy_data = tf.ones(shape, dtype=input_spec.dtype or tf.float32) - dummy_inputs.append(dummy_data) - - # If we couldn't get specs, try to infer from layers - if not dummy_inputs and hasattr(self.model, 'layers') and self.model.layers: - first_layer = self.model.layers[0] - if hasattr(first_layer, 'input_spec') and first_layer.input_spec: - for spec in first_layer.input_spec: - if hasattr(spec, 'shape') and spec.shape: - shape = [1] + list(spec.shape[1:]) - dummy_data = tf.ones(shape, dtype=spec.dtype or tf.float32) - dummy_inputs.append(dummy_data) - - # Build the model - if dummy_inputs: - try: - if len(dummy_inputs) == 1: - _ = self.model(dummy_inputs[0]) - else: - _ = self.model(dummy_inputs) - except Exception as e: + elif class_name in ['BatchNormalization', 'LayerNormalization']: + # For normalization layers, try to infer from previous layer + if len(self.model.layers) > 1: + prev_layer = self.model.layers[0] # The normalization layer itself + if hasattr(prev_layer, 'units'): + return (1, prev_layer.units) + + # For other layer types, we cannot reliably infer without hardcoded values + # Return None to indicate inference failed if self.verbose: - print(f"Warning: Could not build functional model: {e}") - + print(f"Cannot infer input shape for layer type: {class_name}") + except Exception as e: if self.verbose: - print(f"Warning: Could not build functional model: {e}") + print(f"Warning: Could not infer Sequential input shape: {e}") + + return None def _convert_to_tflite(self, input_signature): """Converts the Keras model to a TFLite model.""" - # Try direct conversion first (simpler approach) + is_sequential = isinstance(self.model, tf.keras.Sequential) + + # For Sequential models, direct conversion is unreliable. + # We will always use the wrapper-based approach. + if is_sequential: + if self.verbose: + print("Sequential model detected. Using wrapper-based conversion for reliability.") + return self._convert_with_wrapper(input_signature) + + # For Functional models, try direct conversion first. try: if self.verbose: - print("Converting Keras model directly to TFLite format...") + print("Functional model detected. Trying direct conversion...") converter = tf.lite.TFLiteConverter.from_keras_model(self.model) converter.target_spec.supported_ops = [ - tf.lite.OpsSet.TFLITE_BUILTINS, # Enable TensorFlow Lite ops. - tf.lite.OpsSet.SELECT_TF_OPS, # Enable TensorFlow ops. + tf.lite.OpsSet.TFLITE_BUILTINS, + tf.lite.OpsSet.SELECT_TF_OPS, ] - # Ensure variables are embedded converter.experimental_enable_resource_variables = False tflite_model = converter.convert() if self.verbose: - print("Direct conversion successful") + print("Direct conversion successful.") return tflite_model except Exception as direct_error: if self.verbose: - print(f"Direct conversion failed: {direct_error}") - print("Trying wrapper-based conversion...") - - # Fallback to wrapper approach - # 1. Wrap the Keras model in our clean tf.Module. - wrapper = _KerasModelWrapper(self.model) - - # 2. Get a concrete function from the wrapper. - # Handle both single and multiple input signatures - if not isinstance(input_signature, (list, tuple)): - input_signature = [input_signature] - - # Convert InputSpec objects to TensorSpec objects for get_concrete_function - tensor_specs = [make_tf_tensor_spec(spec) for spec in input_signature] + print(f"Direct conversion failed for Functional model: {direct_error}") + print("Falling back to wrapper-based conversion...") - # Create input arguments based on the model's expected signature - input_args = self._create_input_args(tensor_specs) + return self._convert_with_wrapper(input_signature) - concrete_func = wrapper.__call__.get_concrete_function(**input_args) - - # 3. Convert directly from the concrete function to TFLite. - if self.verbose: - print("Converting concrete function to TFLite format...") - - # Use the wrapper as trackable_obj to avoid deprecation warning - converter = tf.lite.TFLiteConverter.from_concrete_functions( - [concrete_func], - trackable_obj=wrapper - ) - converter.target_spec.supported_ops = [ - tf.lite.OpsSet.TFLITE_BUILTINS, # Enable TensorFlow Lite ops. - tf.lite.OpsSet.SELECT_TF_OPS, # Enable TensorFlow ops. - ] - # Ensure variables are embedded - converter.experimental_enable_resource_variables = False - tflite_model = converter.convert() + def _convert_with_wrapper(self, input_signature): + """Converts the model to TFLite using the tf.Module wrapper.""" + # 1. Wrap the Keras model in our clean tf.Module. + wrapper = _KerasModelWrapper(self.model) - return tflite_model + # 2. Get a concrete function from the wrapper. + if not isinstance(input_signature, (list, tuple)): + input_signature = [input_signature] + + tensor_specs = [make_tf_tensor_spec(spec) for spec in input_signature] + + # Pass tensor specs as positional arguments to get the concrete function. + concrete_func = wrapper.__call__.get_concrete_function(*tensor_specs) - def _create_input_args(self, tensor_specs): - """Create proper input arguments for the model's call signature.""" - # Determine if this is a single-input or multi-input model - num_inputs = len(self.model.inputs) if hasattr(self.model, 'inputs') else 1 + # 3. Convert from the concrete function. + if self.verbose: + print("Converting concrete function to TFLite format...") - if num_inputs == 1: - # Single input model - use 'inputs' as the argument name - if len(tensor_specs) == 1: - return {"inputs": tensor_specs[0]} - else: - # Multiple specs for single input (shouldn't happen, but handle gracefully) - return {"inputs": tensor_specs[0]} - else: - # Multi-input model - use the actual input names or create generic names - input_args = {} - - if hasattr(self.model, 'inputs') and self.model.inputs: - # Use the actual input names from the model - for i, (input_layer, spec) in enumerate(zip(self.model.inputs, tensor_specs)): - input_name = input_layer.name - input_args[input_name] = spec - else: - # Fallback to generic names - for i, spec in enumerate(tensor_specs): - input_args[f"input_{i}"] = spec - - return input_args + converter = tf.lite.TFLiteConverter.from_concrete_functions( + [concrete_func], + trackable_obj=wrapper + ) + converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS, + tf.lite.OpsSet.SELECT_TF_OPS, + ] + converter.experimental_enable_resource_variables = False + tflite_model = converter.convert() + + return tflite_model class _KerasModelWrapper(tf.Module): From 4404c39752afccd12ccef23fa3ffbb38017c5d11 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 9 Sep 2025 14:06:15 +0530 Subject: [PATCH 045/115] Update lite_rt_exporter.py Working well with keras --- keras/src/export/lite_rt_exporter.py | 203 +++++++++++++++++++++++---- 1 file changed, 173 insertions(+), 30 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 5b1034b0ff47..f6596b0b9039 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -4,6 +4,15 @@ import os import numpy as np +# Try to import LiteRT AOT compilation if available +try: + from litert.python.aot import aot_compile + from litert.python.aot.core import types as litert_types + from litert.python.aot.vendors import import_vendor + LITERT_AVAILABLE = True +except ImportError: + LITERT_AVAILABLE = False + class LiteRTExporter: """ @@ -11,15 +20,17 @@ class LiteRTExporter: callable signature for `model.call`. """ - def __init__(self, model, input_signature=None, verbose=None, max_sequence_length=512, **kwargs): + def __init__(self, model, input_signature=None, verbose=None, max_sequence_length=512, + aot_compile_targets=None, **kwargs): self.model = model self.input_signature = input_signature self.verbose = verbose or 0 self.max_sequence_length = max_sequence_length + self.aot_compile_targets = aot_compile_targets # List of LiteRT targets for AOT compilation self.kwargs = kwargs def export(self, filepath): - """Exports the Keras model to a TFLite file.""" + """Exports the Keras model to a TFLite file and optionally performs AOT compilation.""" if self.verbose: print("Starting LiteRT export...") print(f"Model: {type(self.model)} - built: {self.model.built}") @@ -38,9 +49,9 @@ def export(self, filepath): if self.verbose: final_size_mb = len(tflite_model) / (1024*1024) - print(f"LiteRT model converted successfully. Size: {final_size_mb:.2f} MB") + print(f"TFLite model converted successfully. Size: {final_size_mb:.2f} MB") - # 4. Save the model to the specified file path. + # 4. Save the initial TFLite model to the specified file path. if not filepath.endswith('.tflite'): filepath += '.tflite' @@ -48,7 +59,24 @@ def export(self, filepath): f.write(tflite_model) if self.verbose: - print(f"Exported model to {filepath}") + print(f"TFLite model saved to {filepath}") + + # 5. Perform AOT compilation if targets are specified and LiteRT is available + compiled_models = None + if self.aot_compile_targets and LITERT_AVAILABLE: + if self.verbose: + print("Performing AOT compilation for LiteRT targets...") + compiled_models = self._aot_compile(filepath) + elif self.aot_compile_targets and not LITERT_AVAILABLE: + if self.verbose: + print("Warning: AOT compilation requested but LiteRT is not available. Skipping.") + + if self.verbose: + print(f"LiteRT export completed. Base model: {filepath}") + if compiled_models: + print(f"AOT compiled models: {len(compiled_models.models)} variants") + + return compiled_models if compiled_models else filepath def _ensure_model_built(self): """ @@ -190,17 +218,11 @@ def _convert_to_tflite(self, input_signature): """Converts the Keras model to a TFLite model.""" is_sequential = isinstance(self.model, tf.keras.Sequential) - # For Sequential models, direct conversion is unreliable. - # We will always use the wrapper-based approach. - if is_sequential: - if self.verbose: - print("Sequential model detected. Using wrapper-based conversion for reliability.") - return self._convert_with_wrapper(input_signature) - - # For Functional models, try direct conversion first. + # Try direct conversion first for all models try: if self.verbose: - print("Functional model detected. Trying direct conversion...") + model_type = "Sequential" if is_sequential else "Functional" + print(f"{model_type} model detected. Trying direct conversion...") converter = tf.lite.TFLiteConverter.from_keras_model(self.model) converter.target_spec.supported_ops = [ @@ -216,7 +238,8 @@ def _convert_to_tflite(self, input_signature): except Exception as direct_error: if self.verbose: - print(f"Direct conversion failed for Functional model: {direct_error}") + model_type = "Sequential" if is_sequential else "Functional" + print(f"Direct conversion failed for {model_type} model: {direct_error}") print("Falling back to wrapper-based conversion...") return self._convert_with_wrapper(input_signature) @@ -239,18 +262,137 @@ def _convert_with_wrapper(self, input_signature): if self.verbose: print("Converting concrete function to TFLite format...") - converter = tf.lite.TFLiteConverter.from_concrete_functions( - [concrete_func], - trackable_obj=wrapper - ) - converter.target_spec.supported_ops = [ - tf.lite.OpsSet.TFLITE_BUILTINS, - tf.lite.OpsSet.SELECT_TF_OPS, + # Try multiple conversion strategies for better inference compatibility + conversion_strategies = [ + {"experimental_enable_resource_variables": False, "name": "without resource variables"}, + {"experimental_enable_resource_variables": True, "name": "with resource variables"}, ] - converter.experimental_enable_resource_variables = False - tflite_model = converter.convert() + + for strategy in conversion_strategies: + try: + converter = tf.lite.TFLiteConverter.from_concrete_functions( + [concrete_func], + trackable_obj=wrapper + ) + converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS, + tf.lite.OpsSet.SELECT_TF_OPS, + ] + converter.experimental_enable_resource_variables = strategy["experimental_enable_resource_variables"] + + if self.verbose: + print(f"Trying conversion {strategy['name']}...") + + tflite_model = converter.convert() + + if self.verbose: + print(f"Conversion successful {strategy['name']}!") + + return tflite_model + + except Exception as e: + if self.verbose: + print(f"Conversion failed {strategy['name']}: {e}") + continue + + # If all strategies fail, raise the last error + raise RuntimeError("All conversion strategies failed for wrapper-based conversion") + + def _aot_compile(self, tflite_filepath): + """Performs AOT compilation using LiteRT.""" + if not LITERT_AVAILABLE: + raise RuntimeError("LiteRT is not available for AOT compilation") + + try: + # Create a LiteRT model from the TFLite file + litert_model = litert_types.Model.create_from_path(tflite_filepath) + + # Determine output directory + base_dir = os.path.dirname(tflite_filepath) + model_name = os.path.splitext(os.path.basename(tflite_filepath))[0] + output_dir = os.path.join(base_dir, f"{model_name}_compiled") + + if self.verbose: + print(f"AOT compiling for targets: {self.aot_compile_targets}") + print(f"Output directory: {output_dir}") + + # Perform AOT compilation + result = aot_compile.aot_compile( + input_model=litert_model, + output_dir=output_dir, + target=self.aot_compile_targets, + keep_going=True # Continue even if some targets fail + ) + + if self.verbose: + print(f"AOT compilation completed: {len(result.models)} successful, {len(result.failed_backends)} failed") + if result.failed_backends: + for backend, error in result.failed_backends: + print(f" Failed: {backend.id()} - {error}") + + # Print compilation report if available + try: + report = result.compilation_report() + if report: + print("Compilation Report:") + print(report) + except: + pass + + return result + + except Exception as e: + if self.verbose: + print(f"AOT compilation failed: {e}") + import traceback + traceback.print_exc() + raise RuntimeError(f"AOT compilation failed: {e}") + + def _get_available_litert_targets(self): + """Get available LiteRT targets for AOT compilation.""" + if not LITERT_AVAILABLE: + return [] + + try: + # Get all registered targets + targets = import_vendor.AllRegisteredTarget() + return targets if isinstance(targets, list) else [targets] + except Exception as e: + if self.verbose: + print(f"Failed to get available targets: {e}") + return [] - return tflite_model + @classmethod + def export_with_aot(cls, model, filepath, targets=None, verbose=True, **kwargs): + """ + Convenience method to export a Keras model with AOT compilation. + + Args: + model: Keras model to export + filepath: Output file path + targets: List of LiteRT targets for AOT compilation (e.g., ['qualcomm', 'mediatek']) + verbose: Whether to print verbose output + **kwargs: Additional arguments for the exporter + + Returns: + CompilationResult if AOT compilation is performed, otherwise the filepath + """ + exporter = cls( + model=model, + verbose=verbose, + aot_compile_targets=targets, + **kwargs + ) + return exporter.export(filepath) + + @classmethod + def get_available_targets(cls): + """Get list of available LiteRT AOT compilation targets.""" + if not LITERT_AVAILABLE: + return [] + + dummy_exporter = cls(model=None) + return dummy_exporter._get_available_litert_targets() class _KerasModelWrapper(tf.Module): @@ -270,11 +412,12 @@ def __init__(self, model): # This prevents the _DictWrapper error during SavedModel serialization object.__setattr__(self, '_model', model) - # Explicitly track all variables from the Keras model by assigning - # them as individual attributes of this wrapper. This ensures they are - # properly included in the SavedModel and TFLite conversion. - for i, var in enumerate(model.variables): - setattr(self, f'_var_{i}', var) + # Track all variables from the Keras model using proper tf.Module methods + # This ensures proper variable handling for stateful layers like BatchNorm + with self.name_scope: + for i, var in enumerate(model.variables): + # Use a different attribute name to avoid conflicts with tf.Module's variables property + setattr(self, f'model_var_{i}', var) @tf.function def __call__(self, *args, **kwargs): From 6a119fbe1610475493edd721abf5423cd74ce660 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 15 Sep 2025 14:17:35 +0530 Subject: [PATCH 046/115] Update lite_rt_exporter.py --- keras/src/export/lite_rt_exporter.py | 46 +++++++++++++--------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index f6596b0b9039..02252bc27aa2 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -20,20 +20,36 @@ class LiteRTExporter: callable signature for `model.call`. """ - def __init__(self, model, input_signature=None, verbose=None, max_sequence_length=512, + def __init__(self, model, input_signature=None, verbose=0, max_sequence_length=512, aot_compile_targets=None, **kwargs): + """Initialize the LiteRT exporter. + + Args: + model: The Keras model to export + input_signature: Input signature specification + verbose: Verbosity level (0=quiet, 1=info) + max_sequence_length: Maximum sequence length for inference + aot_compile_targets: List of LiteRT targets for AOT compilation + **kwargs: Additional export parameters + """ self.model = model self.input_signature = input_signature - self.verbose = verbose or 0 + self.verbose = verbose self.max_sequence_length = max_sequence_length - self.aot_compile_targets = aot_compile_targets # List of LiteRT targets for AOT compilation + self.aot_compile_targets = aot_compile_targets self.kwargs = kwargs def export(self, filepath): - """Exports the Keras model to a TFLite file and optionally performs AOT compilation.""" + """Exports the Keras model to a TFLite file and optionally performs AOT compilation. + + Args: + filepath: Output path for the exported model + + Returns: + Path to exported model or compiled models if AOT compilation is performed + """ if self.verbose: print("Starting LiteRT export...") - print(f"Model: {type(self.model)} - built: {self.model.built}") # 1. Ensure the model is built by calling it if necessary self._ensure_model_built() @@ -90,13 +106,6 @@ def _ensure_model_built(self): print("Ensuring model is traced by performing a forward pass...") try: - # Debug information - if self.verbose: - print(f"Model type: {type(self.model)}") - print(f"Model built: {self.model.built}") - if hasattr(self.model, '_functional'): - print(f"Sequential _functional: {self.model._functional}") - # Generate dummy inputs based on the model's specification dummy_inputs = [] # Prioritize `model.inputs` as it's the most reliable source @@ -107,8 +116,6 @@ def _ensure_model_built(self): shape = [1 if dim is None else dim for dim in input_layer.shape] dummy_input = tf.zeros(shape, dtype=input_layer.dtype or tf.float32) dummy_inputs.append(dummy_input) - if self.verbose: - print(f" Input shape: {shape}, dtype: {input_layer.dtype or tf.float32}") else: # Fallback for pure Sequential models without an Input layer if self.verbose: @@ -124,15 +131,7 @@ def _ensure_model_built(self): "Please add an `Input` layer or specify `input_shape` in the first layer." ) - # Debug the dummy inputs - if self.verbose: - print(f"About to call model with {len(dummy_inputs)} inputs") - for i, inp in enumerate(dummy_inputs): - print(f" Input {i}: shape={inp.shape}, dtype={inp.dtype}") - # Perform a direct call in inference mode to trace the model. - # This is more robust than a simple call() and avoids the - # overhead of model.predict(). if len(dummy_inputs) == 1: result = self.model(dummy_inputs[0], training=False) else: @@ -140,13 +139,10 @@ def _ensure_model_built(self): if self.verbose: print("Model successfully traced via direct call with training=False.") - print(f"Output shape: {result.shape if hasattr(result, 'shape') else type(result)}") except Exception as e: if self.verbose: print(f"Error during model call: {e}") - import traceback - traceback.print_exc() raise ValueError(f"Failed to trace model with error: {e}") # Final, critical check From 51a1c7f716b39516d028dd91c5f4e5d52661b947 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 17 Sep 2025 14:39:58 +0530 Subject: [PATCH 047/115] Remove sequence length bounding from export utils Eliminates the logic for bounding sequence length in model export utilities and related code paths. The max_sequence_length parameter and associated shape bounding for large vocabulary models are removed from export_utils.py and lite_rt_exporter.py. Updates model export documentation accordingly. Adds a comprehensive test script for Keras Hub LiteRT export, verifying numerical accuracy between original and exported models. --- keras/src/export/export_utils.py | 120 +------- keras/src/export/lite_rt_exporter.py | 10 +- keras/src/models/model.py | 5 +- test_keras_hub_export.py | 436 +++++++++++++++++++++++++++ 4 files changed, 456 insertions(+), 115 deletions(-) create mode 100644 test_keras_hub_export.py diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index a71cbca4127c..219ac3350147 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -6,17 +6,11 @@ from keras.src.utils.module_utils import tensorflow as tf -def get_input_signature(model, max_sequence_length=512): +def get_input_signature(model): """Get input signature for model export. Args: model: A Keras Model instance. - max_sequence_length: Maximum sequence length for sequence models (transformers). - Only applied when the model is detected as a sequence model based on input - names (e.g., 'token_ids', 'input_ids') or shape patterns. For non-sequence - models (e.g., image models), this parameter is ignored and dimensions remain - unbounded. For large vocabulary models, this may be automatically reduced - to prevent tensor size overflow. Defaults to 512. Returns: Input signature suitable for model export. @@ -32,16 +26,13 @@ def get_input_signature(model, max_sequence_length=512): "before export." ) - # For large vocabulary models, adjust sequence length to prevent overflow - effective_max_length = _get_safe_sequence_length(model, max_sequence_length) - if isinstance(model, models.Functional): input_signature = tree.map_structure(make_input_spec, model._inputs_struct) elif isinstance(model, models.Sequential): input_signature = tree.map_structure(make_input_spec, model.inputs) else: # For subclassed models, try multiple approaches - input_signature = _infer_input_signature_from_model(model, effective_max_length) + input_signature = _infer_input_signature_from_model(model) if not input_signature: # Fallback: Try to get from model.inputs if available if hasattr(model, 'inputs') and model.inputs: @@ -55,67 +46,11 @@ def get_input_signature(model, max_sequence_length=512): return input_signature -def _get_safe_sequence_length(model, max_sequence_length): - """Get a safe sequence length that won't cause tensor size overflow.""" - model_class_name = getattr(model, '__class__', type(None)).__name__.lower() - model_module = getattr(getattr(model, '__class__', type(None)), '__module__', '').lower() - - # Check if this is a large vocabulary model - large_vocab_indicators = ['gemma', 'llama', 'palm', 'gpt'] - is_large_vocab = ( - any(indicator in model_class_name for indicator in large_vocab_indicators) or - 'keras_hub' in model_module - ) - - if is_large_vocab: - # Estimate tensor size: seq_len × vocab_size × 4 bytes (float32) - # Conservative vocab size estimate for large models - estimated_vocab_size = 256000 - estimated_bytes = max_sequence_length * estimated_vocab_size * 4 - - # If estimated size > 512MB, reduce sequence length - max_safe_bytes = 512 * 1024 * 1024 # 512MB - if estimated_bytes > max_safe_bytes: - safe_length = max_safe_bytes // (estimated_vocab_size * 4) - safe_length = max(32, min(safe_length, max_sequence_length)) # At least 32, at most original - if safe_length < max_sequence_length: - print(f"Warning: Reducing max_sequence_length from {max_sequence_length} to {safe_length} " - f"for large vocabulary model to prevent tensor size overflow.") - return safe_length - - return max_sequence_length - - -def _infer_input_signature_from_model(model, max_sequence_length=512): +def _infer_input_signature_from_model(model): shapes_dict = getattr(model, "_build_shapes_dict", None) if not shapes_dict: return None - # Use the safe sequence length to prevent overflow - safe_sequence_length = _get_safe_sequence_length(model, max_sequence_length) - - def _is_sequence_model(): - """Detect if this is a sequence model based on input names and shapes.""" - if not shapes_dict: - return False - - # Check input names for sequence model indicators - input_names = list(shapes_dict.keys()) - sequence_indicators = ['token_ids', 'input_ids', 'tokens', 'input_tokens', - 'padding_mask', 'attention_mask', 'segment_ids'] - - if any(indicator in name.lower() for name in input_names for indicator in sequence_indicators): - return True - - # Check if any input has shape with 2+ dimensions where second dim is None - # This is typical for sequence models: (batch_size, seq_len, ...) - for shape in shapes_dict.values(): - if isinstance(shape, (tuple, list)) and len(shape) >= 2: - if shape[0] is None and shape[1] is None: # (None, None, ...) - return True - - return False - def _make_input_spec(structure): # We need to turn wrapper structures like TrackingDict or _DictWrapper # into plain Python structures because they don't work with jax2tf/JAX. @@ -123,24 +58,15 @@ def _make_input_spec(structure): return {k: _make_input_spec(v) for k, v in structure.items()} elif isinstance(structure, tuple): if all(isinstance(d, (int, type(None))) for d in structure): - # Handle shape bounding based on model type - is_sequence_model = _is_sequence_model() + # Keep batch dimension unbounded, keep other dimensions as they are bounded_shape = [] for i, dim in enumerate(structure): - if dim is None: - if i == 0: - # Always keep batch dimension as None - bounded_shape.append(None) - elif is_sequence_model and i == 1: - # For sequence models, bound the sequence length dimension - # Using safe sequence length to prevent overflow - bounded_shape.append(safe_sequence_length) - else: - # For non-sequence models or non-sequence dimensions, keep unbounded - # This prevents breaking image models, etc. - bounded_shape.append(None) + if dim is None and i == 0: + # Always keep batch dimension as None + bounded_shape.append(None) else: + # Keep other dimensions as they are (None or specific size) bounded_shape.append(dim) return layers.InputSpec( @@ -149,23 +75,15 @@ def _make_input_spec(structure): return tuple(_make_input_spec(v) for v in structure) elif isinstance(structure, list): if all(isinstance(d, (int, type(None))) for d in structure): - # Handle shape bounding based on model type - is_sequence_model = _is_sequence_model() + # Keep batch dimension unbounded, keep other dimensions as they are bounded_shape = [] for i, dim in enumerate(structure): - if dim is None: - if i == 0: - # Always keep batch dimension as None - bounded_shape.append(None) - elif is_sequence_model and i == 1: - # For sequence models, bound the sequence length dimension - # Using safe sequence length to prevent overflow - bounded_shape.append(safe_sequence_length) - else: - # For non-sequence models or non-sequence dimensions, keep unbounded - bounded_shape.append(None) + if dim is None and i == 0: + # Always keep batch dimension as None + bounded_shape.append(None) else: + # Keep other dimensions as they are bounded_shape.append(dim) return layers.InputSpec( @@ -183,16 +101,8 @@ def _make_input_spec(structure): return _make_input_spec(list(shapes_dict.values())[0]) else: # Multiple inputs - try to determine if it's a dict or list structure - # For Keras-Hub models like Gemma3, inputs are typically dictionaries - input_keys = list(shapes_dict.keys()) - - # Common patterns for multi-input models - if any(key in ['token_ids', 'padding_mask', 'input_ids', 'attention_mask'] for key in input_keys): - # Dictionary input structure (common for transformers) - return {key: _make_input_spec(shape) for key, shape in shapes_dict.items()} - else: - # List input structure - return [_make_input_spec(shape) for shape in shapes_dict.values()] + # Return as dictionary by default to preserve input names + return {key: _make_input_spec(shape) for key, shape in shapes_dict.items()} def make_input_spec(x): diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/lite_rt_exporter.py index 02252bc27aa2..7721d7095824 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/lite_rt_exporter.py @@ -20,7 +20,7 @@ class LiteRTExporter: callable signature for `model.call`. """ - def __init__(self, model, input_signature=None, verbose=0, max_sequence_length=512, + def __init__(self, model, input_signature=None, verbose=0, aot_compile_targets=None, **kwargs): """Initialize the LiteRT exporter. @@ -28,14 +28,12 @@ def __init__(self, model, input_signature=None, verbose=0, max_sequence_length=5 model: The Keras model to export input_signature: Input signature specification verbose: Verbosity level (0=quiet, 1=info) - max_sequence_length: Maximum sequence length for inference aot_compile_targets: List of LiteRT targets for AOT compilation **kwargs: Additional export parameters """ self.model = model self.input_signature = input_signature self.verbose = verbose - self.max_sequence_length = max_sequence_length self.aot_compile_targets = aot_compile_targets self.kwargs = kwargs @@ -54,11 +52,11 @@ def export(self, filepath): # 1. Ensure the model is built by calling it if necessary self._ensure_model_built() - # 2. Resolve / infer input signature with bounded sequence length. + # 2. Resolve / infer input signature if self.input_signature is None: if self.verbose: - print(f"Inferring input signature with max_sequence_length={self.max_sequence_length}.") - self.input_signature = get_input_signature(self.model, self.max_sequence_length) + print("Inferring input signature from model.") + self.input_signature = get_input_signature(self.model) # 3. Convert the model to TFLite. tflite_model = self._convert_to_tflite(self.input_signature) diff --git a/keras/src/models/model.py b/keras/src/models/model.py index bde30e44bdda..a3a384643d42 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -593,14 +593,11 @@ def export( - `opset_version`: Optional `int`. Specific to `format="onnx"`. An integer value that specifies the ONNX opset version. - `allow_custom_ops`: Optional `bool`. Specific to `format="lite_rt"`. - Whether to allow custom operations during conversion. Defaults to `False`.a + Whether to allow custom operations during conversion. Defaults to `False`. - `enable_select_tf_ops`: Optional `bool`. Specific to `format="lite_rt"`. Whether to enable TensorFlow Select ops for unsupported operations. Defaults to `False`. - `optimizations`: Optional `list`. Specific to `format="lite_rt"`. List of optimizations to apply (e.g., `[tf.lite.Optimize.DEFAULT]`). - - `max_sequence_length`: Optional `int`. Specific to `format="lite_rt"`. - Maximum sequence length for transformer models to avoid unbounded shapes. - Defaults to `512`. **Note:** This feature is currently supported only with TensorFlow, JAX and Torch backends. diff --git a/test_keras_hub_export.py b/test_keras_hub_export.py new file mode 100644 index 000000000000..67ec2108d2f6 --- /dev/null +++ b/test_keras_hub_export.py @@ -0,0 +1,436 @@ +#!/usr/bin/env python3 +""" +Test script for Keras Hub LiteRT export functionality. + +This script loads a Keras Hub model, exports it to LiteRT format, +and verifies numerical accuracy between original and exported models. + +Change MODEL_PRESET in load_model() to test different models. +""" + +import os +import time +import tempfile +import numpy as np +from pathlib import Path + +# Configure environment +print("🔧 Configuring Keras to use the TensorFlow backend...") +os.environ["KERAS_BACKEND"] = "tensorflow" +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" +os.environ["TF_NUM_INTEROP_THREADS"] = "1" +os.environ["TF_NUM_INTRAOP_THREADS"] = "1" +os.environ["KAGGLE_KEY"]="20fd7df00ecb83cf98c73dc97029f650" +os.environ["KAGGLE_USERNAME"]="pctablet505" + +import tensorflow as tf +tf.config.threading.set_inter_op_parallelism_threads(1) +tf.config.threading.set_intra_op_parallelism_threads(1) + +# Configure GPU if available +physical_devices = tf.config.list_physical_devices('GPU') +if physical_devices: + try: + for device in physical_devices: + tf.config.experimental.set_memory_growth(device, True) + except: + pass + +import keras +import keras_hub + +# Check LiteRT availability +try: + import ai_edge_litert + LITERT_AVAILABLE = True + print("✅ ai_edge_litert is available") +except ImportError: + try: + import tensorflow.lite as ai_edge_litert + LITERT_AVAILABLE = True + print("✅ Using tensorflow.lite as ai_edge_litert") + except ImportError: + LITERT_AVAILABLE = False + print("❌ LiteRT not available") + +def load_model(): + """Load the specified model with random weights.""" + print("\n📦 Loading model...") + + # Change this to test different models + MODEL_PRESET = "llama3.2_1b" + MODEL_PRESET = "gemma3_1b" + # Examples: "gpt2_base_en", "gemma_2b_en", "mistral_7b_en", "phi3_mini_4k_instruct_en" + + model_name = MODEL_PRESET.replace(".", "_").replace("/", "_") + + # # Try to load existing saved model + # if os.path.exists(saved_model_path): + # print(f"✅ Loading existing model from {saved_model_path}") + # try: + # model = keras.models.load_model(saved_model_path) + # print(f"📏 Sequence length: {model.preprocessor.sequence_length}") + # model.summary() + # return model + # except Exception as e: + # print(f"⚠️ Failed to load saved model: {e}") + + # Load from preset + try: + if "gpt" in MODEL_PRESET.lower(): + model = keras_hub.models.GptCausalLM.from_preset(MODEL_PRESET, load_weights=False) + elif "gemma" in MODEL_PRESET.lower(): + model = keras_hub.models.Gemma3CausalLM.from_preset(MODEL_PRESET, load_weights=False) + elif "llama" in MODEL_PRESET.lower(): + model = keras_hub.models.Llama3CausalLM.from_preset(MODEL_PRESET, load_weights=False) + elif "mistral" in MODEL_PRESET.lower(): + model = keras_hub.models.MistralCausalLM.from_preset(MODEL_PRESET, load_weights=False) + elif "phi" in MODEL_PRESET.lower(): + model = keras_hub.models.Phi3CausalLM.from_preset(MODEL_PRESET, load_weights=False) + else: + # Generic fallback + import keras_hub.models as models + for model_class_name in ['GptCausalLM', 'Gemma3CausalLM', 'Llama3CausalLM', + 'MistralCausalLM', 'Phi3CausalLM']: + if hasattr(models, model_class_name): + try: + model_class = getattr(models, model_class_name) + model = model_class.from_preset(MODEL_PRESET, load_weights=False) + break + except: + continue + else: + raise ValueError(f"No compatible model class for '{MODEL_PRESET}'") + + model.preprocessor.sequence_length = 128 + print(f"✅ Loaded '{MODEL_PRESET}' with sequence length {model.preprocessor.sequence_length}") + + + model.summary() + return model + + except Exception as e: + print(f"❌ Failed to load '{MODEL_PRESET}': {e}") + raise + +def create_test_inputs(model): + """Create test inputs for the model.""" + print("\n🎯 Creating test inputs...") + + # Instead of using preprocessor, create direct inputs that match model expectations + batch_size = 1 # Use batch size 1 to match the exported model + sequence_length = 128 + + # Create random token IDs (use proper vocab range for Llama3) + # Llama3 typically has vocab_size around 128,256, so use 1-32000 as safe range + token_ids = tf.random.uniform( + shape=(batch_size, sequence_length), + minval=1, # Avoid 0 which is usually padding + maxval=32000, # Use a reasonable vocab range for Llama3 + dtype=tf.int32 + ) + + # Create padding mask (all True for simplicity - no padding) + padding_mask = tf.ones_like(token_ids, dtype=tf.bool) + + # Create the input dictionary that matches model expectations + test_inputs = { + "token_ids": token_ids, + "padding_mask": padding_mask + } + + print(f"✅ Test inputs created:") + print(f" Batch size: {batch_size}") + print(f" Sequence length: {sequence_length}") + print(f" Input keys: {list(test_inputs.keys())}") + + for key, value in test_inputs.items(): + print(f" {key}: shape={value.shape}, dtype={value.dtype}") + + return test_inputs + +def export_to_litert(model, model_name): + """Export the model to LiteRT format.""" + print(f"\n🚀 Exporting '{model_name}' to LiteRT...") + + litert_model_path = f"{model_name}_test_model.tflite" + + # Check if export already exists + if os.path.exists(litert_model_path): + print(f"✅ Found existing LiteRT model: {litert_model_path}") + return litert_model_path + + try: + start_time = time.time() + exported_path = model.export(litert_model_path, "lite_rt") + end_time = time.time() + + export_time = end_time - start_time + if os.path.exists(litert_model_path): + file_size = os.path.getsize(litert_model_path) + print(f"📊 LiteRT model size: {file_size / (1024*1024):.2f} MB") + return litert_model_path + print(f"✅ Export successful!") + print(f"⏱️ Export time: {export_time:.2f} seconds") + print(f"� Model size: {model_size_mb:.2f} MB") + print(f"💾 Saved to: {exported_path}") + + return exported_path + + except Exception as e: + print(f"❌ Export failed: {e}") + raise + + +def load_litert_interpreter(tflite_path): + """Load the LiteRT interpreter.""" + print("\n🔧 Loading LiteRT interpreter...") + + if not LITERT_AVAILABLE: + raise ImportError("LiteRT interpreter not available") + + try: + interpreter = ai_edge_litert.Interpreter(model_path=tflite_path) + interpreter.allocate_tensors() + + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + + print("✅ LiteRT interpreter loaded") + print(f"📥 Input tensors: {len(input_details)}") + for i, detail in enumerate(input_details): + print(f" Input {i}: {detail['name']} - {detail['shape']}, {detail['dtype']}") + + print(f"📤 Output tensors: {len(output_details)}") + for i, detail in enumerate(output_details): + print(f" Output {i}: {detail['name']} - {detail['shape']}, {detail['dtype']}") + + return interpreter, input_details, output_details + + except Exception as e: + print(f"❌ Failed to load LiteRT interpreter: {e}") + raise + + +def run_keras_inference(model, inputs): + """Run inference with the original Keras model.""" + print("\n🧠 Running Keras inference...") + + start_time = time.time() + + try: + keras_outputs = model(inputs) + except Exception as e: + print(f"ℹ️ Dictionary input failed: {e}") + try: + keras_outputs = model(inputs["token_ids"], inputs["padding_mask"]) + except Exception as e2: + print(f"❌ Both input methods failed:") + print(f" Dict method: {e}") + print(f" Positional method: {e2}") + raise ValueError("Could not run Keras model inference") + + end_time = time.time() + + print(f"✅ Keras inference completed in {end_time - start_time:.4f} seconds") + print(f"📊 Output: {keras_outputs.shape}, {keras_outputs.dtype}") + + return keras_outputs + + +def run_litert_inference(interpreter, input_details, output_details, inputs): + """Run inference with the LiteRT interpreter.""" + print("\n⚡ Running LiteRT inference...") + + print(f"🔍 Available inputs: {list(inputs.keys())}") + print(f"🔍 Expected inputs: {[detail['name'] for detail in input_details]}") + + # Set input tensors + for i, input_detail in enumerate(input_details): + input_name = input_detail['name'] + input_data = None + + print(f"🔍 Mapping input {i}: {input_name}") + + # Direct name matching + for key in inputs.keys(): + if key.lower() in input_name.lower() or input_name.lower() in key.lower(): + input_data = inputs[key] + print(f" ✅ Mapped by name: {key} -> {input_name}") + break + + # Pattern matching + if input_data is None: + if 'token' in input_name.lower() or 'input_1' == input_name or i == 0: + if 'token_ids' in inputs: + input_data = inputs['token_ids'] + print(f" ✅ Mapped by pattern: token_ids -> {input_name}") + elif 'mask' in input_name.lower() or 'input_2' == input_name or i == 1: + if 'padding_mask' in inputs: + input_data = inputs['padding_mask'] + print(f" ✅ Mapped by pattern: padding_mask -> {input_name}") + + # By order + if input_data is None: + input_keys = list(inputs.keys()) + if i < len(input_keys): + input_data = inputs[input_keys[i]] + print(f" ✅ Mapped by order: {input_keys[i]} -> {input_name}") + + if input_data is None: + raise ValueError(f"Cannot map input: {input_name} (index {i})") + + # Ensure correct data type + expected_dtype = input_detail['dtype'] + if hasattr(input_data, 'dtype') and input_data.dtype != expected_dtype: + print(f" 🔄 Converting dtype: {input_data.dtype} -> {expected_dtype}") + input_data = tf.cast(input_data, expected_dtype) + + # Convert to numpy + if hasattr(input_data, 'numpy'): + input_numpy = input_data.numpy() + else: + input_numpy = np.array(input_data) + + interpreter.set_tensor(input_detail['index'], input_numpy) + print(f" ✅ Set input {input_name}: {input_numpy.shape}, {input_numpy.dtype}") + + # Run inference + start_time = time.time() + interpreter.invoke() + end_time = time.time() + + # Get outputs + litert_outputs = [] + for output_detail in output_details: + output = interpreter.get_tensor(output_detail['index']) + litert_outputs.append(output) + print(f"📤 Output {output_detail['name']}: {output.shape}, {output.dtype}") + + print(f"✅ LiteRT inference completed in {end_time - start_time:.4f} seconds") + + return litert_outputs[0] if len(litert_outputs) == 1 else litert_outputs + + +def compare_outputs(keras_output, litert_output, tolerance=1e-3): + """Compare outputs from Keras and LiteRT models.""" + print("\n🔍 Comparing outputs...") + + # Convert to numpy arrays + keras_np = keras_output.numpy() if hasattr(keras_output, 'numpy') else keras_output + litert_np = litert_output if isinstance(litert_output, np.ndarray) else np.array(litert_output) + + print(f"📊 Keras output: {keras_np.shape}") + print(f"📊 LiteRT output: {litert_np.shape}") + + # Check shapes match + if keras_np.shape != litert_np.shape: + print(f"❌ Shape mismatch: Keras {keras_np.shape} vs LiteRT {litert_np.shape}") + return False + + # Calculate differences + abs_diff = np.abs(keras_np - litert_np) + max_abs_diff = np.max(abs_diff) + mean_abs_diff = np.mean(abs_diff) + + rel_diff = np.abs(keras_np - litert_np) / (np.abs(keras_np) + 1e-8) + max_rel_diff = np.max(rel_diff) + mean_rel_diff = np.mean(rel_diff) + + print(f"📈 Max absolute difference: {max_abs_diff:.6f}") + print(f"📈 Mean absolute difference: {mean_abs_diff:.6f}") + print(f"📈 Max relative difference: {max_rel_diff:.6f}") + print(f"📈 Mean relative difference: {mean_rel_diff:.6f}") + + outputs_match = max_abs_diff < tolerance + + if outputs_match: + print(f"✅ Outputs match within tolerance ({tolerance})") + else: + print(f"❌ Outputs differ by more than tolerance ({tolerance})") + + print("\n🔍 Sample comparisons:") + flat_keras = keras_np.flatten() + flat_litert = litert_np.flatten() + + for i in range(min(10, len(flat_keras))): + diff = abs(flat_keras[i] - flat_litert[i]) + print(f" Index {i}: Keras={flat_keras[i]:.6f}, LiteRT={flat_litert[i]:.6f}, diff={diff:.6f}") + + return outputs_match + + +def main(): + """Main test function.""" + print("🎯 Starting Keras Hub LiteRT Export Test") + print("=" * 60) + + # Test basic functionality + print("\n🔍 Testing basic functionality...") + try: + print(f"✅ TensorFlow version: {tf.__version__}") + print(f"✅ Keras version: {keras.__version__}") + print(f"✅ Keras Hub available: {hasattr(keras_hub, 'models')}") + print(f"✅ LiteRT available: {LITERT_AVAILABLE}") + + # Test basic TF operations + print("🧪 Testing basic TensorFlow operations...") + x = tf.constant([1, 2, 3, 4]) + y = tf.square(x) + print(f" tf.square([1,2,3,4]) = {y.numpy()}") + print("✅ Basic TensorFlow operations work") + + except Exception as e: + print(f"❌ Basic functionality test failed: {e}") + return False + + try: + # Load model + model = load_model() + + # Create test inputs + test_inputs = create_test_inputs(model) + + # Test Keras inference + print("\n🧪 Testing Keras inference before export...") + keras_output = run_keras_inference(model, test_inputs) + + # Export to LiteRT + model_name = type(model).__name__.lower().replace("causal", "").replace("lm", "") + export_dir = f"{model_name}" + os.makedirs(export_dir, exist_ok=True) + export_path = os.path.join(export_dir, f"{model_name}_model") + tflite_path = export_to_litert(model, export_path) + + # Load LiteRT interpreter + interpreter, input_details, output_details = load_litert_interpreter(tflite_path) + + # Run LiteRT inference + litert_output = run_litert_inference(interpreter, input_details, output_details, test_inputs) + + # Compare outputs + outputs_match = compare_outputs(keras_output, litert_output) + + print("\n" + "=" * 60) + if outputs_match: + print("🎉 SUCCESS: Export test passed! Outputs match between Keras and LiteRT.") + else: + print("❌ FAILED: Export test failed! Outputs don't match.") + print("ℹ️ This might be due to numerical precision differences.") + print("=" * 60) + print(f"\n💡 Models saved in:") + print(f" 📁 TFLite: {export_dir}") + + + return outputs_match + + except Exception as e: + print(f"\n💥 Test failed: {e}") + import traceback + traceback.print_exc() + return False + + +if __name__ == "__main__": + success = main() + exit(0 if success else 1) \ No newline at end of file From e1fca24b8e493649577d00d5702080cf4d5274d7 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 17 Sep 2025 14:49:40 +0530 Subject: [PATCH 048/115] Delete test_keras_hub_export.py --- test_keras_hub_export.py | 436 --------------------------------------- 1 file changed, 436 deletions(-) delete mode 100644 test_keras_hub_export.py diff --git a/test_keras_hub_export.py b/test_keras_hub_export.py deleted file mode 100644 index 67ec2108d2f6..000000000000 --- a/test_keras_hub_export.py +++ /dev/null @@ -1,436 +0,0 @@ -#!/usr/bin/env python3 -""" -Test script for Keras Hub LiteRT export functionality. - -This script loads a Keras Hub model, exports it to LiteRT format, -and verifies numerical accuracy between original and exported models. - -Change MODEL_PRESET in load_model() to test different models. -""" - -import os -import time -import tempfile -import numpy as np -from pathlib import Path - -# Configure environment -print("🔧 Configuring Keras to use the TensorFlow backend...") -os.environ["KERAS_BACKEND"] = "tensorflow" -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" -os.environ["TF_NUM_INTEROP_THREADS"] = "1" -os.environ["TF_NUM_INTRAOP_THREADS"] = "1" -os.environ["KAGGLE_KEY"]="20fd7df00ecb83cf98c73dc97029f650" -os.environ["KAGGLE_USERNAME"]="pctablet505" - -import tensorflow as tf -tf.config.threading.set_inter_op_parallelism_threads(1) -tf.config.threading.set_intra_op_parallelism_threads(1) - -# Configure GPU if available -physical_devices = tf.config.list_physical_devices('GPU') -if physical_devices: - try: - for device in physical_devices: - tf.config.experimental.set_memory_growth(device, True) - except: - pass - -import keras -import keras_hub - -# Check LiteRT availability -try: - import ai_edge_litert - LITERT_AVAILABLE = True - print("✅ ai_edge_litert is available") -except ImportError: - try: - import tensorflow.lite as ai_edge_litert - LITERT_AVAILABLE = True - print("✅ Using tensorflow.lite as ai_edge_litert") - except ImportError: - LITERT_AVAILABLE = False - print("❌ LiteRT not available") - -def load_model(): - """Load the specified model with random weights.""" - print("\n📦 Loading model...") - - # Change this to test different models - MODEL_PRESET = "llama3.2_1b" - MODEL_PRESET = "gemma3_1b" - # Examples: "gpt2_base_en", "gemma_2b_en", "mistral_7b_en", "phi3_mini_4k_instruct_en" - - model_name = MODEL_PRESET.replace(".", "_").replace("/", "_") - - # # Try to load existing saved model - # if os.path.exists(saved_model_path): - # print(f"✅ Loading existing model from {saved_model_path}") - # try: - # model = keras.models.load_model(saved_model_path) - # print(f"📏 Sequence length: {model.preprocessor.sequence_length}") - # model.summary() - # return model - # except Exception as e: - # print(f"⚠️ Failed to load saved model: {e}") - - # Load from preset - try: - if "gpt" in MODEL_PRESET.lower(): - model = keras_hub.models.GptCausalLM.from_preset(MODEL_PRESET, load_weights=False) - elif "gemma" in MODEL_PRESET.lower(): - model = keras_hub.models.Gemma3CausalLM.from_preset(MODEL_PRESET, load_weights=False) - elif "llama" in MODEL_PRESET.lower(): - model = keras_hub.models.Llama3CausalLM.from_preset(MODEL_PRESET, load_weights=False) - elif "mistral" in MODEL_PRESET.lower(): - model = keras_hub.models.MistralCausalLM.from_preset(MODEL_PRESET, load_weights=False) - elif "phi" in MODEL_PRESET.lower(): - model = keras_hub.models.Phi3CausalLM.from_preset(MODEL_PRESET, load_weights=False) - else: - # Generic fallback - import keras_hub.models as models - for model_class_name in ['GptCausalLM', 'Gemma3CausalLM', 'Llama3CausalLM', - 'MistralCausalLM', 'Phi3CausalLM']: - if hasattr(models, model_class_name): - try: - model_class = getattr(models, model_class_name) - model = model_class.from_preset(MODEL_PRESET, load_weights=False) - break - except: - continue - else: - raise ValueError(f"No compatible model class for '{MODEL_PRESET}'") - - model.preprocessor.sequence_length = 128 - print(f"✅ Loaded '{MODEL_PRESET}' with sequence length {model.preprocessor.sequence_length}") - - - model.summary() - return model - - except Exception as e: - print(f"❌ Failed to load '{MODEL_PRESET}': {e}") - raise - -def create_test_inputs(model): - """Create test inputs for the model.""" - print("\n🎯 Creating test inputs...") - - # Instead of using preprocessor, create direct inputs that match model expectations - batch_size = 1 # Use batch size 1 to match the exported model - sequence_length = 128 - - # Create random token IDs (use proper vocab range for Llama3) - # Llama3 typically has vocab_size around 128,256, so use 1-32000 as safe range - token_ids = tf.random.uniform( - shape=(batch_size, sequence_length), - minval=1, # Avoid 0 which is usually padding - maxval=32000, # Use a reasonable vocab range for Llama3 - dtype=tf.int32 - ) - - # Create padding mask (all True for simplicity - no padding) - padding_mask = tf.ones_like(token_ids, dtype=tf.bool) - - # Create the input dictionary that matches model expectations - test_inputs = { - "token_ids": token_ids, - "padding_mask": padding_mask - } - - print(f"✅ Test inputs created:") - print(f" Batch size: {batch_size}") - print(f" Sequence length: {sequence_length}") - print(f" Input keys: {list(test_inputs.keys())}") - - for key, value in test_inputs.items(): - print(f" {key}: shape={value.shape}, dtype={value.dtype}") - - return test_inputs - -def export_to_litert(model, model_name): - """Export the model to LiteRT format.""" - print(f"\n🚀 Exporting '{model_name}' to LiteRT...") - - litert_model_path = f"{model_name}_test_model.tflite" - - # Check if export already exists - if os.path.exists(litert_model_path): - print(f"✅ Found existing LiteRT model: {litert_model_path}") - return litert_model_path - - try: - start_time = time.time() - exported_path = model.export(litert_model_path, "lite_rt") - end_time = time.time() - - export_time = end_time - start_time - if os.path.exists(litert_model_path): - file_size = os.path.getsize(litert_model_path) - print(f"📊 LiteRT model size: {file_size / (1024*1024):.2f} MB") - return litert_model_path - print(f"✅ Export successful!") - print(f"⏱️ Export time: {export_time:.2f} seconds") - print(f"� Model size: {model_size_mb:.2f} MB") - print(f"💾 Saved to: {exported_path}") - - return exported_path - - except Exception as e: - print(f"❌ Export failed: {e}") - raise - - -def load_litert_interpreter(tflite_path): - """Load the LiteRT interpreter.""" - print("\n🔧 Loading LiteRT interpreter...") - - if not LITERT_AVAILABLE: - raise ImportError("LiteRT interpreter not available") - - try: - interpreter = ai_edge_litert.Interpreter(model_path=tflite_path) - interpreter.allocate_tensors() - - input_details = interpreter.get_input_details() - output_details = interpreter.get_output_details() - - print("✅ LiteRT interpreter loaded") - print(f"📥 Input tensors: {len(input_details)}") - for i, detail in enumerate(input_details): - print(f" Input {i}: {detail['name']} - {detail['shape']}, {detail['dtype']}") - - print(f"📤 Output tensors: {len(output_details)}") - for i, detail in enumerate(output_details): - print(f" Output {i}: {detail['name']} - {detail['shape']}, {detail['dtype']}") - - return interpreter, input_details, output_details - - except Exception as e: - print(f"❌ Failed to load LiteRT interpreter: {e}") - raise - - -def run_keras_inference(model, inputs): - """Run inference with the original Keras model.""" - print("\n🧠 Running Keras inference...") - - start_time = time.time() - - try: - keras_outputs = model(inputs) - except Exception as e: - print(f"ℹ️ Dictionary input failed: {e}") - try: - keras_outputs = model(inputs["token_ids"], inputs["padding_mask"]) - except Exception as e2: - print(f"❌ Both input methods failed:") - print(f" Dict method: {e}") - print(f" Positional method: {e2}") - raise ValueError("Could not run Keras model inference") - - end_time = time.time() - - print(f"✅ Keras inference completed in {end_time - start_time:.4f} seconds") - print(f"📊 Output: {keras_outputs.shape}, {keras_outputs.dtype}") - - return keras_outputs - - -def run_litert_inference(interpreter, input_details, output_details, inputs): - """Run inference with the LiteRT interpreter.""" - print("\n⚡ Running LiteRT inference...") - - print(f"🔍 Available inputs: {list(inputs.keys())}") - print(f"🔍 Expected inputs: {[detail['name'] for detail in input_details]}") - - # Set input tensors - for i, input_detail in enumerate(input_details): - input_name = input_detail['name'] - input_data = None - - print(f"🔍 Mapping input {i}: {input_name}") - - # Direct name matching - for key in inputs.keys(): - if key.lower() in input_name.lower() or input_name.lower() in key.lower(): - input_data = inputs[key] - print(f" ✅ Mapped by name: {key} -> {input_name}") - break - - # Pattern matching - if input_data is None: - if 'token' in input_name.lower() or 'input_1' == input_name or i == 0: - if 'token_ids' in inputs: - input_data = inputs['token_ids'] - print(f" ✅ Mapped by pattern: token_ids -> {input_name}") - elif 'mask' in input_name.lower() or 'input_2' == input_name or i == 1: - if 'padding_mask' in inputs: - input_data = inputs['padding_mask'] - print(f" ✅ Mapped by pattern: padding_mask -> {input_name}") - - # By order - if input_data is None: - input_keys = list(inputs.keys()) - if i < len(input_keys): - input_data = inputs[input_keys[i]] - print(f" ✅ Mapped by order: {input_keys[i]} -> {input_name}") - - if input_data is None: - raise ValueError(f"Cannot map input: {input_name} (index {i})") - - # Ensure correct data type - expected_dtype = input_detail['dtype'] - if hasattr(input_data, 'dtype') and input_data.dtype != expected_dtype: - print(f" 🔄 Converting dtype: {input_data.dtype} -> {expected_dtype}") - input_data = tf.cast(input_data, expected_dtype) - - # Convert to numpy - if hasattr(input_data, 'numpy'): - input_numpy = input_data.numpy() - else: - input_numpy = np.array(input_data) - - interpreter.set_tensor(input_detail['index'], input_numpy) - print(f" ✅ Set input {input_name}: {input_numpy.shape}, {input_numpy.dtype}") - - # Run inference - start_time = time.time() - interpreter.invoke() - end_time = time.time() - - # Get outputs - litert_outputs = [] - for output_detail in output_details: - output = interpreter.get_tensor(output_detail['index']) - litert_outputs.append(output) - print(f"📤 Output {output_detail['name']}: {output.shape}, {output.dtype}") - - print(f"✅ LiteRT inference completed in {end_time - start_time:.4f} seconds") - - return litert_outputs[0] if len(litert_outputs) == 1 else litert_outputs - - -def compare_outputs(keras_output, litert_output, tolerance=1e-3): - """Compare outputs from Keras and LiteRT models.""" - print("\n🔍 Comparing outputs...") - - # Convert to numpy arrays - keras_np = keras_output.numpy() if hasattr(keras_output, 'numpy') else keras_output - litert_np = litert_output if isinstance(litert_output, np.ndarray) else np.array(litert_output) - - print(f"📊 Keras output: {keras_np.shape}") - print(f"📊 LiteRT output: {litert_np.shape}") - - # Check shapes match - if keras_np.shape != litert_np.shape: - print(f"❌ Shape mismatch: Keras {keras_np.shape} vs LiteRT {litert_np.shape}") - return False - - # Calculate differences - abs_diff = np.abs(keras_np - litert_np) - max_abs_diff = np.max(abs_diff) - mean_abs_diff = np.mean(abs_diff) - - rel_diff = np.abs(keras_np - litert_np) / (np.abs(keras_np) + 1e-8) - max_rel_diff = np.max(rel_diff) - mean_rel_diff = np.mean(rel_diff) - - print(f"📈 Max absolute difference: {max_abs_diff:.6f}") - print(f"📈 Mean absolute difference: {mean_abs_diff:.6f}") - print(f"📈 Max relative difference: {max_rel_diff:.6f}") - print(f"📈 Mean relative difference: {mean_rel_diff:.6f}") - - outputs_match = max_abs_diff < tolerance - - if outputs_match: - print(f"✅ Outputs match within tolerance ({tolerance})") - else: - print(f"❌ Outputs differ by more than tolerance ({tolerance})") - - print("\n🔍 Sample comparisons:") - flat_keras = keras_np.flatten() - flat_litert = litert_np.flatten() - - for i in range(min(10, len(flat_keras))): - diff = abs(flat_keras[i] - flat_litert[i]) - print(f" Index {i}: Keras={flat_keras[i]:.6f}, LiteRT={flat_litert[i]:.6f}, diff={diff:.6f}") - - return outputs_match - - -def main(): - """Main test function.""" - print("🎯 Starting Keras Hub LiteRT Export Test") - print("=" * 60) - - # Test basic functionality - print("\n🔍 Testing basic functionality...") - try: - print(f"✅ TensorFlow version: {tf.__version__}") - print(f"✅ Keras version: {keras.__version__}") - print(f"✅ Keras Hub available: {hasattr(keras_hub, 'models')}") - print(f"✅ LiteRT available: {LITERT_AVAILABLE}") - - # Test basic TF operations - print("🧪 Testing basic TensorFlow operations...") - x = tf.constant([1, 2, 3, 4]) - y = tf.square(x) - print(f" tf.square([1,2,3,4]) = {y.numpy()}") - print("✅ Basic TensorFlow operations work") - - except Exception as e: - print(f"❌ Basic functionality test failed: {e}") - return False - - try: - # Load model - model = load_model() - - # Create test inputs - test_inputs = create_test_inputs(model) - - # Test Keras inference - print("\n🧪 Testing Keras inference before export...") - keras_output = run_keras_inference(model, test_inputs) - - # Export to LiteRT - model_name = type(model).__name__.lower().replace("causal", "").replace("lm", "") - export_dir = f"{model_name}" - os.makedirs(export_dir, exist_ok=True) - export_path = os.path.join(export_dir, f"{model_name}_model") - tflite_path = export_to_litert(model, export_path) - - # Load LiteRT interpreter - interpreter, input_details, output_details = load_litert_interpreter(tflite_path) - - # Run LiteRT inference - litert_output = run_litert_inference(interpreter, input_details, output_details, test_inputs) - - # Compare outputs - outputs_match = compare_outputs(keras_output, litert_output) - - print("\n" + "=" * 60) - if outputs_match: - print("🎉 SUCCESS: Export test passed! Outputs match between Keras and LiteRT.") - else: - print("❌ FAILED: Export test failed! Outputs don't match.") - print("ℹ️ This might be due to numerical precision differences.") - print("=" * 60) - print(f"\n💡 Models saved in:") - print(f" 📁 TFLite: {export_dir}") - - - return outputs_match - - except Exception as e: - print(f"\n💥 Test failed: {e}") - import traceback - traceback.print_exc() - return False - - -if __name__ == "__main__": - success = main() - exit(0 if success else 1) \ No newline at end of file From 73f00f1e77c122567977acb448185725aadc2035 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 29 Sep 2025 13:23:46 +0530 Subject: [PATCH 049/115] Rename LiteRT exporter to Litert and update references Renames the exporter module and class from 'lite_rt_exporter' and 'LiteRTExporter' to 'litert_exporter' and 'LitertExporter', respectively. Updates all references, registry keys, and documentation to use 'litert' instead of 'lite_rt'. Adds the new 'export_litert' function and corresponding tests for Litert export functionality. --- keras/src/export/__init__.py | 3 +- keras/src/export/export_utils.py | 16 +- ...lite_rt_exporter.py => litert_exporter.py} | 93 ++++-- keras/src/export/litert_exporter_test.py | 291 ++++++++++++++++++ keras/src/models/model.py | 19 +- 5 files changed, 384 insertions(+), 38 deletions(-) rename keras/src/export/{lite_rt_exporter.py => litert_exporter.py} (85%) create mode 100644 keras/src/export/litert_exporter_test.py diff --git a/keras/src/export/__init__.py b/keras/src/export/__init__.py index 1e5979264e10..72c7124463c8 100644 --- a/keras/src/export/__init__.py +++ b/keras/src/export/__init__.py @@ -3,4 +3,5 @@ from keras.src.export.saved_model import ExportArchive from keras.src.export.saved_model import export_saved_model from keras.src.export.tfsm_layer import TFSMLayer -from keras.src.export.lite_rt_exporter import LiteRTExporter +from keras.src.export.litert_exporter import LitertExporter +from keras.src.export.litert_exporter import export_litert diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 219ac3350147..6157000f2037 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -152,7 +152,7 @@ def convert_spec_to_tensor(spec, replace_none_number=None): # Registry for export formats EXPORT_FORMATS = { "tf_saved_model": "keras.src.export.saved_model:export_saved_model", - "lite_rt": "keras.src.export.lite_rt_exporter:LiteRTExporter", + "litert": "keras.src.export.litert_exporter:export_litert", # Add other formats as needed } @@ -175,10 +175,10 @@ def _get_exporter(format_name): def export_model(model, filepath, format="tf_saved_model", **kwargs): """Export a model to the specified format.""" - exporter_cls = _get_exporter(format) - if format == "tf_saved_model": - # Handle tf_saved_model differently if it's a function - exporter_cls(model, filepath, **kwargs) - else: - exporter = exporter_cls(model, **kwargs) - exporter.export(filepath) \ No newline at end of file + exporter = _get_exporter(format) + + if isinstance(exporter, type): + exporter_instance = exporter(model, **kwargs) + return exporter_instance.export(filepath) + + return exporter(model, filepath, **kwargs) \ No newline at end of file diff --git a/keras/src/export/lite_rt_exporter.py b/keras/src/export/litert_exporter.py similarity index 85% rename from keras/src/export/lite_rt_exporter.py rename to keras/src/export/litert_exporter.py index 7721d7095824..8443947147f5 100644 --- a/keras/src/export/lite_rt_exporter.py +++ b/keras/src/export/litert_exporter.py @@ -1,5 +1,6 @@ import tensorflow as tf from keras.src.export.export_utils import get_input_signature, make_tf_tensor_spec +from keras.src.utils import io_utils import tempfile import os import numpy as np @@ -14,26 +15,72 @@ LITERT_AVAILABLE = False -class LiteRTExporter: +def export_litert( + model, + filepath, + verbose=None, + input_signature=None, + aot_compile_targets=None, + **kwargs, +): + """Export the model as a Litert artifact for inference. + + Args: + model: The Keras model to export. + filepath: The path to save the exported artifact. + verbose: Optional; whether to log progress messages. Defaults to + ``False`` when ``None`` is provided. + input_signature: Optional input signature specification. If + ``None``, it will be inferred. + aot_compile_targets: Optional list of Litert targets for AOT + compilation. + **kwargs: Additional keyword arguments passed to the exporter. + + Returns: + The filepath to the exported artifact, or the compilation result when + AOT compilation is requested. """ - Exporter for the LiteRT (TFLite) format that creates a single, + + actual_verbose = bool(verbose) if verbose is not None else False + exporter = LitertExporter( + model=model, + input_signature=input_signature, + verbose=actual_verbose, + aot_compile_targets=aot_compile_targets, + **kwargs, + ) + result = exporter.export(filepath) + if actual_verbose: + if hasattr(result, "models"): + io_utils.print_msg( + f"Saved artifact at '{filepath}'. AOT compiled " + f"{len(result.models)} variant(s)." + ) + else: + io_utils.print_msg(f"Saved artifact at '{result}'.") + return result + + +class LitertExporter: + """ + Exporter for the Litert (TFLite) format that creates a single, callable signature for `model.call`. """ - def __init__(self, model, input_signature=None, verbose=0, + def __init__(self, model, input_signature=None, verbose=False, aot_compile_targets=None, **kwargs): - """Initialize the LiteRT exporter. + """Initialize the Litert exporter. Args: model: The Keras model to export input_signature: Input signature specification - verbose: Verbosity level (0=quiet, 1=info) - aot_compile_targets: List of LiteRT targets for AOT compilation + verbose: Whether to print progress messages during export. + aot_compile_targets: List of Litert targets for AOT compilation **kwargs: Additional export parameters """ self.model = model self.input_signature = input_signature - self.verbose = verbose + self.verbose = bool(verbose) self.aot_compile_targets = aot_compile_targets self.kwargs = kwargs @@ -47,7 +94,7 @@ def export(self, filepath): Path to exported model or compiled models if AOT compilation is performed """ if self.verbose: - print("Starting LiteRT export...") + print("Starting Litert export...") # 1. Ensure the model is built by calling it if necessary self._ensure_model_built() @@ -79,14 +126,14 @@ def export(self, filepath): compiled_models = None if self.aot_compile_targets and LITERT_AVAILABLE: if self.verbose: - print("Performing AOT compilation for LiteRT targets...") + print("Performing AOT compilation for Litert targets...") compiled_models = self._aot_compile(filepath) elif self.aot_compile_targets and not LITERT_AVAILABLE: if self.verbose: print("Warning: AOT compilation requested but LiteRT is not available. Skipping.") if self.verbose: - print(f"LiteRT export completed. Base model: {filepath}") + print(f"Litert export completed. Base model: {filepath}") if compiled_models: print(f"AOT compiled models: {len(compiled_models.models)} variants") @@ -191,11 +238,14 @@ def _infer_sequential_input_shape(self): return (1, next_layer.input_dim) elif class_name in ['BatchNormalization', 'LayerNormalization']: - # For normalization layers, try to infer from previous layer + # For normalization layers, look at the next layer to infer shape if len(self.model.layers) > 1: - prev_layer = self.model.layers[0] # The normalization layer itself - if hasattr(prev_layer, 'units'): - return (1, prev_layer.units) + next_layer = self.model.layers[1] + if hasattr(next_layer, '__class__'): + next_class = next_layer.__class__.__name__ + if next_class == 'Dense': + if hasattr(next_layer, 'input_dim') and next_layer.input_dim: + return (1, next_layer.input_dim) # For other layer types, we cannot reliably infer without hardcoded values # Return None to indicate inference failed @@ -433,16 +483,21 @@ def __call__(self, *args, **kwargs): if hasattr(self._model, 'inputs') and len(self._model.inputs) > 1: # Multi-input functional model input_list = [] + missing_inputs = [] for input_layer in self._model.inputs: input_name = input_layer.name if input_name in kwargs: input_list.append(kwargs[input_name]) else: - # Try to match by position - keys = list(kwargs.keys()) - idx = len(input_list) - if idx < len(keys): - input_list.append(kwargs[keys[idx]]) + missing_inputs.append(input_name) + + if missing_inputs: + raise ValueError( + f"Missing required inputs for multi-input model: {missing_inputs}. " + f"Available kwargs: {list(kwargs.keys())}. " + f"Please provide all inputs by name." + ) + return self._model(input_list) else: # Single input model called with named arguments diff --git a/keras/src/export/litert_exporter_test.py b/keras/src/export/litert_exporter_test.py new file mode 100644 index 000000000000..cb6136181a5e --- /dev/null +++ b/keras/src/export/litert_exporter_test.py @@ -0,0 +1,291 @@ +"""Tests for LiteRT exporting utilities.""" + +import os + +import numpy as np +import pytest +import tensorflow as tf +from absl.testing import parameterized + +# Try to use AI Edge LiteRT interpreter, fallback to TensorFlow Lite +try: + from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter + litert_available = True + print("Using AI Edge LiteRT interpreter") +except ImportError: + # Fallback to TensorFlow Lite interpreter + LiteRtInterpreter = tf.lite.Interpreter + litert_available = True + print("Using TensorFlow Lite interpreter as fallback") + +from keras.src import backend +from keras.src import layers +from keras.src import models +from keras.src import ops +from keras.src import testing +from keras.src import tree +from keras.src.export import export_litert +from keras.src.saving import saving_lib +from keras.src.testing.test_utils import named_product + + +class CustomModel(models.Model): + def __init__(self, layer_list): + super().__init__() + self.layer_list = layer_list + + def call(self, input): + output = input + for layer in self.layer_list: + output = layer(output) + return output + + +def get_model(type="sequential", input_shape=(10,), layer_list=None): + layer_list = layer_list or [ + layers.Dense(10, activation="relu"), + layers.BatchNormalization(), + layers.Dense(1, activation="sigmoid"), + ] + if type == "sequential": + model = models.Sequential(layer_list) + model.build(input_shape=(None,) + input_shape) + return model + if type == "functional": + input = output = tree.map_shape_structure(layers.Input, input_shape) + for layer in layer_list: + output = layer(output) + return models.Model(inputs=input, outputs=output) + if type == "subclass": + model = CustomModel(layer_list) + model.build(input_shape=(None,) + input_shape) + # Trace the model with dummy data to ensure it's properly built for export + dummy_input = np.zeros((1,) + input_shape, dtype=np.float32) + _ = model(dummy_input) # This traces the model + return model + if type == "lstm": + inputs = layers.Input((4, 10)) + x = layers.Bidirectional( + layers.LSTM( + 10, + kernel_initializer="he_normal", + return_sequences=True, + kernel_regularizer=None, + ), + merge_mode="sum", + )(inputs) + outputs = layers.Bidirectional( + layers.LSTM( + 10, + kernel_initializer="he_normal", + return_sequences=True, + kernel_regularizer=None, + ), + merge_mode="concat", + )(x) + return models.Model(inputs=inputs, outputs=outputs) + raise ValueError(f"Unknown model type: {type}") + + + + + +def _convert_to_numpy(structure): + return tree.map_structure( + lambda x: x.numpy() if hasattr(x, "numpy") else np.array(x), structure + ) + + +def _normalize_name(name): + normalized = name.split(":")[0] + if normalized.startswith("serving_default_"): + normalized = normalized[len("serving_default_") :] + return normalized + + +def _set_interpreter_inputs(interpreter, inputs): + input_details = interpreter.get_input_details() + if isinstance(inputs, dict): + for detail in input_details: + key = _normalize_name(detail["name"]) + if key in inputs: + value = inputs[key] + else: + matched_key = None + for candidate in inputs: + if key.endswith(candidate) or candidate.endswith(key): + matched_key = candidate + break + if matched_key is None: + raise KeyError( + f"Unable to match input '{detail['name']}' in provided inputs" + ) + value = inputs[matched_key] + interpreter.set_tensor(detail["index"], value) + else: + values = inputs + if not isinstance(values, (list, tuple)): + values = [values] + if len(values) != len(input_details): + raise ValueError( + "Number of provided inputs does not match interpreter signature" + ) + for detail, value in zip(input_details, values): + interpreter.set_tensor(detail["index"], value) + + +def _get_interpreter_outputs(interpreter): + output_details = interpreter.get_output_details() + outputs = [interpreter.get_tensor(detail["index"]) for detail in output_details] + return outputs[0] if len(outputs) == 1 else outputs + + +@pytest.mark.skipif( + backend.backend() != "tensorflow", + reason="`export_litert` currently supports the tensorflow backend only.", +) +@pytest.mark.skipif( + testing.tensorflow_uses_gpu(), + reason="LiteRT export tests are only run on CPU to avoid CI issues.", +) +# Note: Tests use AI Edge LiteRT interpreter when available, +# fallback to TensorFlow Lite interpreter otherwise +class ExportLitertTest(testing.TestCase): + @parameterized.named_parameters( + named_product( + model_type=["sequential", "functional", "lstm"] + ) + ) + def test_standard_model_export(self, model_type): + temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + model = get_model(model_type) + batch_size = 1 # TFLite expects batch_size=1 + if model_type == "lstm": + ref_input = np.random.normal(size=(batch_size, 4, 10)) + else: + ref_input = np.random.normal(size=(batch_size, 10)) + ref_input = ref_input.astype("float32") + ref_output = _convert_to_numpy(model(ref_input)) + + export_path = export_litert(model, temp_filepath) + self.assertTrue(export_path.endswith(".tflite")) + self.assertTrue(os.path.exists(export_path)) + + interpreter = LiteRtInterpreter(model_path=export_path) + interpreter.allocate_tensors() + _set_interpreter_inputs(interpreter, ref_input) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + + @parameterized.named_parameters( + named_product(struct_type=["tuple", "array", "dict"]) + ) + def test_model_with_input_structure(self, struct_type): + batch_size = 1 # TFLite expects batch_size=1 + base_input = np.random.normal(size=(batch_size, 10)).astype("float32") + + if struct_type == "tuple": + # Use Functional API for proper Input layer handling + input1 = layers.Input(shape=(10,), name="input_1") + input2 = layers.Input(shape=(10,), name="input_2") + output = layers.Add()([input1, input2]) + model = models.Model(inputs=[input1, input2], outputs=output) + ref_input = (base_input, base_input * 2) + elif struct_type == "array": + # Use Functional API for proper Input layer handling + input1 = layers.Input(shape=(10,), name="input_1") + input2 = layers.Input(shape=(10,), name="input_2") + output = layers.Add()([input1, input2]) + model = models.Model(inputs=[input1, input2], outputs=output) + ref_input = [base_input, base_input * 2] + elif struct_type == "dict": + # Use Functional API for proper Input layer handling + input1 = layers.Input(shape=(10,), name="x") + input2 = layers.Input(shape=(10,), name="y") + output = layers.Add()([input1, input2]) + model = models.Model(inputs={"x": input1, "y": input2}, outputs=output) + ref_input = {"x": base_input, "y": base_input * 2} + else: + raise AssertionError("Unexpected structure type") + + temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + ref_output = _convert_to_numpy( + model(tree.map_structure(ops.convert_to_tensor, ref_input)) + ) + + export_path = export_litert(model, temp_filepath) + interpreter = LiteRtInterpreter(model_path=export_path) + interpreter.allocate_tensors() + + feed_inputs = ref_input + if isinstance(feed_inputs, tuple): + feed_inputs = list(feed_inputs) + _set_interpreter_inputs(interpreter, feed_inputs) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + + # Verify export still works after saving/loading via saving_lib. + archive_path = os.path.join(self.get_temp_dir(), "revived.keras") + saving_lib.save_model(model, archive_path) + revived_model = saving_lib.load_model(archive_path) + revived_output = _convert_to_numpy(revived_model(ref_input)) + self.assertAllClose(ref_output, revived_output) + + def test_model_with_multiple_inputs(self): + temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + + # Use Functional API for proper Input layer handling + input_x = layers.Input(shape=(10,), name="x") + input_y = layers.Input(shape=(10,), name="y") + output = layers.Add()([input_x, input_y]) + model = models.Model(inputs=[input_x, input_y], outputs=output) + + batch_size = 1 # TFLite expects batch_size=1 + ref_input_x = np.random.normal(size=(batch_size, 10)).astype("float32") + ref_input_y = np.random.normal(size=(batch_size, 10)).astype("float32") + ref_output = _convert_to_numpy(model([ref_input_x, ref_input_y])) + + export_path = export_litert(model, temp_filepath) + interpreter = LiteRtInterpreter(model_path=export_path) + interpreter.allocate_tensors() + + _set_interpreter_inputs(interpreter, [ref_input_x, ref_input_y]) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + + # Test with a different batch size by resizing interpreter inputs. + larger_x = np.concatenate([ref_input_x, ref_input_x], axis=0) + larger_y = np.concatenate([ref_input_y, ref_input_y], axis=0) + input_details = interpreter.get_input_details() + interpreter.resize_tensor_input(input_details[0]["index"], larger_x.shape) + interpreter.resize_tensor_input(input_details[1]["index"], larger_y.shape) + interpreter.allocate_tensors() + _set_interpreter_inputs(interpreter, [larger_x, larger_y]) + interpreter.invoke() + larger_output = _get_interpreter_outputs(interpreter) + larger_ref_output = _convert_to_numpy(model([larger_x, larger_y])) + self.assertAllClose(larger_ref_output, larger_output, atol=1e-4, rtol=1e-4) + + def test_export_with_custom_input_signature(self): + model = get_model("sequential") + temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + input_signature = [layers.InputSpec(shape=(None, 10), dtype="float32")] + + export_path = export_litert( + model, + temp_filepath, + input_signature=input_signature, + ) + self.assertTrue(os.path.exists(export_path)) + + interpreter = LiteRtInterpreter(model_path=export_path) + interpreter.allocate_tensors() + input_details = interpreter.get_input_details() + self.assertEqual(len(input_details), 1) + self.assertEqual(tuple(input_details[0]["shape"][1:]), (10,)) \ No newline at end of file diff --git a/keras/src/models/model.py b/keras/src/models/model.py index a3a384643d42..d29e644e29f5 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -568,7 +568,7 @@ def export( filepath: `str` or `pathlib.Path` object. The path to save the artifact. format: `str`. The export format. Supported values: - `"tf_saved_model"`, `"onnx"`, `"openvino"`, and `"lite_rt"`. Defaults to + `"tf_saved_model"`, `"onnx"`, `"openvino"`, and `"litert"`. Defaults to `"tf_saved_model"`. verbose: `bool`. Whether to print a message during export. Defaults to `None`, which uses the default value set by different @@ -592,11 +592,11 @@ def export( provided, they will be automatically computed. - `opset_version`: Optional `int`. Specific to `format="onnx"`. An integer value that specifies the ONNX opset version. - - `allow_custom_ops`: Optional `bool`. Specific to `format="lite_rt"`. + - `allow_custom_ops`: Optional `bool`. Specific to `format="litert"`. Whether to allow custom operations during conversion. Defaults to `False`. - - `enable_select_tf_ops`: Optional `bool`. Specific to `format="lite_rt"`. + - `enable_select_tf_ops`: Optional `bool`. Specific to `format="litert"`. Whether to enable TensorFlow Select ops for unsupported operations. Defaults to `False`. - - `optimizations`: Optional `list`. Specific to `format="lite_rt"`. + - `optimizations`: Optional `list`. Specific to `format="litert"`. List of optimizations to apply (e.g., `[tf.lite.Optimize.DEFAULT]`). **Note:** This feature is currently supported only with TensorFlow, JAX @@ -637,7 +637,7 @@ def export( ```python # Export the model as a LiteRT artifact - model.export("path/to/location", format="lite_rt") + model.export("path/to/location", format="litert") # Load the artifact in a different process/environment interpreter = tf.lite.Interpreter(model_path="path/to/location") @@ -647,11 +647,12 @@ def export( output_data = interpreter.get_tensor(interpreter.get_output_details()[0]['index']) ``` """ + from keras.src.export import export_litert from keras.src.export import export_onnx from keras.src.export import export_openvino from keras.src.export import export_saved_model - available_formats = ("tf_saved_model", "onnx", "openvino", "lite_rt") + available_formats = ("tf_saved_model", "onnx", "openvino", "litert") if format not in available_formats: raise ValueError( f"Unrecognized format={format}. Supported formats are: " @@ -682,12 +683,10 @@ def export( input_signature=input_signature, **kwargs, ) - elif format == "lite_rt": - from keras.src.export.export_utils import export_model - export_model( + elif format == "litert": + export_litert( self, filepath, - format="lite_rt", verbose=verbose, input_signature=input_signature, **kwargs, From ebf11e22fb010f5adf6fb217fb5b02fd04b0c406 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 29 Sep 2025 14:23:30 +0530 Subject: [PATCH 050/115] Enhance LiteRT exporter and expand export tests Refactors and improves the LiteRT exporter for better input signature inference, error handling, and verbose output. Updates and expands the test suite to cover multi-input, multi-output, and error scenarios, and ensures the Model.export() API is used consistently. Also improves docstrings and formatting for clarity and maintainability. --- keras/src/export/__init__.py | 4 +- keras/src/export/export_utils.py | 44 +-- keras/src/export/litert_exporter.py | 340 +++++++++++++++-------- keras/src/export/litert_exporter_test.py | 148 ++++++++-- keras/src/export/saved_model.py | 3 + keras/src/models/model.py | 30 +- 6 files changed, 396 insertions(+), 173 deletions(-) diff --git a/keras/src/export/__init__.py b/keras/src/export/__init__.py index 72c7124463c8..97f4b361fb3e 100644 --- a/keras/src/export/__init__.py +++ b/keras/src/export/__init__.py @@ -1,7 +1,7 @@ +from keras.src.export.litert_exporter import LitertExporter +from keras.src.export.litert_exporter import export_litert from keras.src.export.onnx import export_onnx from keras.src.export.openvino import export_openvino from keras.src.export.saved_model import ExportArchive from keras.src.export.saved_model import export_saved_model from keras.src.export.tfsm_layer import TFSMLayer -from keras.src.export.litert_exporter import LitertExporter -from keras.src.export.litert_exporter import export_litert diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 6157000f2037..cb28bf9fbbe2 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -8,10 +8,10 @@ def get_input_signature(model): """Get input signature for model export. - + Args: model: A Keras Model instance. - + Returns: Input signature suitable for model export. """ @@ -25,9 +25,11 @@ def get_input_signature(model): "The model provided has not yet been built. It must be built " "before export." ) - + if isinstance(model, models.Functional): - input_signature = tree.map_structure(make_input_spec, model._inputs_struct) + input_signature = tree.map_structure( + make_input_spec, model._inputs_struct + ) elif isinstance(model, models.Sequential): input_signature = tree.map_structure(make_input_spec, model.inputs) else: @@ -35,13 +37,16 @@ def get_input_signature(model): input_signature = _infer_input_signature_from_model(model) if not input_signature: # Fallback: Try to get from model.inputs if available - if hasattr(model, 'inputs') and model.inputs: - input_signature = tree.map_structure(make_input_spec, model.inputs) + if hasattr(model, "inputs") and model.inputs: + input_signature = tree.map_structure( + make_input_spec, model.inputs + ) elif not model._called: raise ValueError( "The model provided has never been called and has no " - "detectable input structure. It must be called at least once " - "before export, or you must provide explicit input_signature." + "detectable input structure. It must be called at least " + "once before export, or you must provide explicit " + "input_signature." ) return input_signature @@ -58,26 +63,29 @@ def _make_input_spec(structure): return {k: _make_input_spec(v) for k, v in structure.items()} elif isinstance(structure, tuple): if all(isinstance(d, (int, type(None))) for d in structure): - # Keep batch dimension unbounded, keep other dimensions as they are + # Keep batch dimension unbounded, keep other dimensions as they + # are bounded_shape = [] - + for i, dim in enumerate(structure): if dim is None and i == 0: # Always keep batch dimension as None bounded_shape.append(None) else: - # Keep other dimensions as they are (None or specific size) + # Keep other dimensions as they are (None or specific + # size) bounded_shape.append(dim) - + return layers.InputSpec( shape=tuple(bounded_shape), dtype=model.input_dtype ) return tuple(_make_input_spec(v) for v in structure) elif isinstance(structure, list): if all(isinstance(d, (int, type(None))) for d in structure): - # Keep batch dimension unbounded, keep other dimensions as they are + # Keep batch dimension unbounded, keep other dimensions as they + # are bounded_shape = [] - + for i, dim in enumerate(structure): if dim is None and i == 0: # Always keep batch dimension as None @@ -85,7 +93,7 @@ def _make_input_spec(structure): else: # Keep other dimensions as they are bounded_shape.append(dim) - + return layers.InputSpec( shape=bounded_shape, dtype=model.input_dtype ) @@ -102,7 +110,9 @@ def _make_input_spec(structure): else: # Multiple inputs - try to determine if it's a dict or list structure # Return as dictionary by default to preserve input names - return {key: _make_input_spec(shape) for key, shape in shapes_dict.items()} + return { + key: _make_input_spec(shape) for key, shape in shapes_dict.items() + } def make_input_spec(x): @@ -181,4 +191,4 @@ def export_model(model, filepath, format="tf_saved_model", **kwargs): exporter_instance = exporter(model, **kwargs) return exporter_instance.export(filepath) - return exporter(model, filepath, **kwargs) \ No newline at end of file + return exporter(model, filepath, **kwargs) diff --git a/keras/src/export/litert_exporter.py b/keras/src/export/litert_exporter.py index 8443947147f5..022b2ef42d78 100644 --- a/keras/src/export/litert_exporter.py +++ b/keras/src/export/litert_exporter.py @@ -1,15 +1,17 @@ +import os + import tensorflow as tf -from keras.src.export.export_utils import get_input_signature, make_tf_tensor_spec + +from keras.src.export.export_utils import get_input_signature +from keras.src.export.export_utils import make_tf_tensor_spec from keras.src.utils import io_utils -import tempfile -import os -import numpy as np # Try to import LiteRT AOT compilation if available try: from litert.python.aot import aot_compile from litert.python.aot.core import types as litert_types from litert.python.aot.vendors import import_vendor + LITERT_AVAILABLE = True except ImportError: LITERT_AVAILABLE = False @@ -67,10 +69,16 @@ class LitertExporter: callable signature for `model.call`. """ - def __init__(self, model, input_signature=None, verbose=False, - aot_compile_targets=None, **kwargs): + def __init__( + self, + model, + input_signature=None, + verbose=False, + aot_compile_targets=None, + **kwargs, + ): """Initialize the Litert exporter. - + Args: model: The Keras model to export input_signature: Input signature specification @@ -85,13 +93,15 @@ def __init__(self, model, input_signature=None, verbose=False, self.kwargs = kwargs def export(self, filepath): - """Exports the Keras model to a TFLite file and optionally performs AOT compilation. - + """Exports the Keras model to a TFLite file and optionally performs AOT + compilation. + Args: filepath: Output path for the exported model - + Returns: - Path to exported model or compiled models if AOT compilation is performed + Path to exported model or compiled models if AOT compilation is + performed """ if self.verbose: print("Starting Litert export...") @@ -104,25 +114,29 @@ def export(self, filepath): if self.verbose: print("Inferring input signature from model.") self.input_signature = get_input_signature(self.model) - + # 3. Convert the model to TFLite. tflite_model = self._convert_to_tflite(self.input_signature) - + if self.verbose: - final_size_mb = len(tflite_model) / (1024*1024) - print(f"TFLite model converted successfully. Size: {final_size_mb:.2f} MB") - + final_size_mb = len(tflite_model) / (1024 * 1024) + print( + f"TFLite model converted successfully. Size: " + f"{final_size_mb:.2f} MB" + ) + # 4. Save the initial TFLite model to the specified file path. - if not filepath.endswith('.tflite'): - filepath += '.tflite' - + if not filepath.endswith(".tflite"): + filepath += ".tflite" + with open(filepath, "wb") as f: f.write(tflite_model) - + if self.verbose: print(f"TFLite model saved to {filepath}") - # 5. Perform AOT compilation if targets are specified and LiteRT is available + # 5. Perform AOT compilation if targets are specified and LiteRT is + # available compiled_models = None if self.aot_compile_targets and LITERT_AVAILABLE: if self.verbose: @@ -130,12 +144,18 @@ def export(self, filepath): compiled_models = self._aot_compile(filepath) elif self.aot_compile_targets and not LITERT_AVAILABLE: if self.verbose: - print("Warning: AOT compilation requested but LiteRT is not available. Skipping.") - + print( + "Warning: AOT compilation requested but LiteRT is not " + "available. Skipping." + ) + if self.verbose: print(f"Litert export completed. Base model: {filepath}") if compiled_models: - print(f"AOT compiled models: {len(compiled_models.models)} variants") + print( + f"AOT compiled models: {len(compiled_models.models)} " + "variants" + ) return compiled_models if compiled_models else filepath @@ -154,36 +174,53 @@ def _ensure_model_built(self): # Generate dummy inputs based on the model's specification dummy_inputs = [] # Prioritize `model.inputs` as it's the most reliable source - if hasattr(self.model, 'inputs') and self.model.inputs: + if hasattr(self.model, "inputs") and self.model.inputs: if self.verbose: - print(f"Generating inputs from `model.inputs` ({len(self.model.inputs)} input(s)).") + print( + f"Generating inputs from `model.inputs` " + f"({len(self.model.inputs)} input(s))." + ) for input_layer in self.model.inputs: - shape = [1 if dim is None else dim for dim in input_layer.shape] - dummy_input = tf.zeros(shape, dtype=input_layer.dtype or tf.float32) + shape = [ + 1 if dim is None else dim for dim in input_layer.shape + ] + dummy_input = tf.zeros( + shape, dtype=input_layer.dtype or tf.float32 + ) dummy_inputs.append(dummy_input) else: # Fallback for pure Sequential models without an Input layer if self.verbose: - print("Model has no `inputs` attribute. Assuming pure Sequential and inferring shape.") + print( + "Model has no `inputs` attribute. Assuming pure " + "Sequential and inferring shape." + ) input_shape = self._infer_sequential_input_shape() if input_shape: if self.verbose: - print(f"Inferred input shape for Sequential model: {input_shape}") + print( + f"Inferred input shape for Sequential model: " + f"{input_shape}" + ) dummy_inputs.append(tf.zeros(input_shape, dtype=tf.float32)) else: raise ValueError( - "Cannot build Sequential model: unable to infer input shape. " - "Please add an `Input` layer or specify `input_shape` in the first layer." + "Cannot build Sequential model: unable to infer input " + "shape. Please add an `Input` layer or specify " + "`input_shape` in the first layer." ) # Perform a direct call in inference mode to trace the model. if len(dummy_inputs) == 1: - result = self.model(dummy_inputs[0], training=False) + self.model(dummy_inputs[0], training=False) else: - result = self.model(dummy_inputs, training=False) + self.model(dummy_inputs, training=False) if self.verbose: - print("Model successfully traced via direct call with training=False.") + print( + "Model successfully traced via direct call with " + "training=False." + ) except Exception as e: if self.verbose: @@ -202,55 +239,91 @@ def _infer_sequential_input_shape(self): try: # First, look for Input layer for layer in self.model.layers: - if hasattr(layer, '__class__') and layer.__class__.__name__ == 'InputLayer': - if hasattr(layer, 'batch_input_shape') and layer.batch_input_shape: + if ( + hasattr(layer, "__class__") + and layer.__class__.__name__ == "InputLayer" + ): + if ( + hasattr(layer, "batch_input_shape") + and layer.batch_input_shape + ): input_shape = layer.batch_input_shape - return (1,) + input_shape[1:] if input_shape[0] is None else input_shape + return ( + (1,) + input_shape[1:] + if input_shape[0] is None + else input_shape + ) # If no Input layer, try to get from first layer - if hasattr(self.model, 'layers') and self.model.layers: + if hasattr(self.model, "layers") and self.model.layers: first_layer = self.model.layers[0] # Check various ways to get input shape - for attr in ['input_shape', 'batch_input_shape', '_batch_input_shape']: + for attr in [ + "input_shape", + "batch_input_shape", + "_batch_input_shape", + ]: if hasattr(first_layer, attr): input_shape = getattr(first_layer, attr) if input_shape: - return (1,) + input_shape[1:] if input_shape[0] is None else input_shape - - # Try to infer from layer configuration without hardcoded fallbacks - if hasattr(first_layer, '__class__'): + return ( + (1,) + input_shape[1:] + if input_shape[0] is None + else input_shape + ) + + # Try to infer from layer configuration without hardcoded + # fallbacks + if hasattr(first_layer, "__class__"): class_name = first_layer.__class__.__name__ - if class_name == 'Dense': + if class_name == "Dense": # For Dense layers, try to infer from input_dim - if hasattr(first_layer, 'input_dim') and first_layer.input_dim: + if ( + hasattr(first_layer, "input_dim") + and first_layer.input_dim + ): return (1, first_layer.input_dim) - elif class_name == 'Dropout': + elif class_name == "Dropout": # For Dropout, look at the next layer to infer shape if len(self.model.layers) > 1: next_layer = self.model.layers[1] - if hasattr(next_layer, '__class__'): + if hasattr(next_layer, "__class__"): next_class = next_layer.__class__.__name__ - if next_class == 'Dense': - if hasattr(next_layer, 'input_dim') and next_layer.input_dim: + if next_class == "Dense": + if ( + hasattr(next_layer, "input_dim") + and next_layer.input_dim + ): return (1, next_layer.input_dim) - elif class_name in ['BatchNormalization', 'LayerNormalization']: - # For normalization layers, look at the next layer to infer shape + elif class_name in [ + "BatchNormalization", + "LayerNormalization", + ]: + # For normalization layers, look at the next layer to + # infer shape if len(self.model.layers) > 1: next_layer = self.model.layers[1] - if hasattr(next_layer, '__class__'): + if hasattr(next_layer, "__class__"): next_class = next_layer.__class__.__name__ - if next_class == 'Dense': - if hasattr(next_layer, 'input_dim') and next_layer.input_dim: + if next_class == "Dense": + if ( + hasattr(next_layer, "input_dim") + and next_layer.input_dim + ): return (1, next_layer.input_dim) - # For other layer types, we cannot reliably infer without hardcoded values + # For other layer types, we cannot reliably infer without + # hardcoded values # Return None to indicate inference failed if self.verbose: - print(f"Cannot infer input shape for layer type: {class_name}") + print( + f"Cannot infer input shape for layer type: " + f"{class_name}" + ) except Exception as e: if self.verbose: @@ -266,8 +339,10 @@ def _convert_to_tflite(self, input_signature): try: if self.verbose: model_type = "Sequential" if is_sequential else "Functional" - print(f"{model_type} model detected. Trying direct conversion...") - + print( + f"{model_type} model detected. Trying direct conversion..." + ) + converter = tf.lite.TFLiteConverter.from_keras_model(self.model) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, @@ -275,17 +350,20 @@ def _convert_to_tflite(self, input_signature): ] converter.experimental_enable_resource_variables = False tflite_model = converter.convert() - + if self.verbose: print("Direct conversion successful.") return tflite_model - + except Exception as direct_error: if self.verbose: model_type = "Sequential" if is_sequential else "Functional" - print(f"Direct conversion failed for {model_type} model: {direct_error}") + print( + f"Direct conversion failed for {model_type} model: " + f"{direct_error}" + ) print("Falling back to wrapper-based conversion...") - + return self._convert_with_wrapper(input_signature) def _convert_with_wrapper(self, input_signature): @@ -296,84 +374,97 @@ def _convert_with_wrapper(self, input_signature): # 2. Get a concrete function from the wrapper. if not isinstance(input_signature, (list, tuple)): input_signature = [input_signature] - + tensor_specs = [make_tf_tensor_spec(spec) for spec in input_signature] - - # Pass tensor specs as positional arguments to get the concrete function. + + # Pass tensor specs as positional arguments to get the concrete + # function. concrete_func = wrapper.__call__.get_concrete_function(*tensor_specs) # 3. Convert from the concrete function. if self.verbose: print("Converting concrete function to TFLite format...") - + # Try multiple conversion strategies for better inference compatibility conversion_strategies = [ - {"experimental_enable_resource_variables": False, "name": "without resource variables"}, - {"experimental_enable_resource_variables": True, "name": "with resource variables"}, + { + "experimental_enable_resource_variables": False, + "name": "without resource variables", + }, + { + "experimental_enable_resource_variables": True, + "name": "with resource variables", + }, ] - + for strategy in conversion_strategies: try: converter = tf.lite.TFLiteConverter.from_concrete_functions( - [concrete_func], - trackable_obj=wrapper + [concrete_func], trackable_obj=wrapper ) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS, ] - converter.experimental_enable_resource_variables = strategy["experimental_enable_resource_variables"] - + converter.experimental_enable_resource_variables = strategy[ + "experimental_enable_resource_variables" + ] + if self.verbose: print(f"Trying conversion {strategy['name']}...") - + tflite_model = converter.convert() - + if self.verbose: print(f"Conversion successful {strategy['name']}!") - + return tflite_model - + except Exception as e: if self.verbose: print(f"Conversion failed {strategy['name']}: {e}") continue - + # If all strategies fail, raise the last error - raise RuntimeError("All conversion strategies failed for wrapper-based conversion") + raise RuntimeError( + "All conversion strategies failed for wrapper-based conversion" + ) def _aot_compile(self, tflite_filepath): """Performs AOT compilation using LiteRT.""" if not LITERT_AVAILABLE: raise RuntimeError("LiteRT is not available for AOT compilation") - + try: # Create a LiteRT model from the TFLite file litert_model = litert_types.Model.create_from_path(tflite_filepath) - + # Determine output directory base_dir = os.path.dirname(tflite_filepath) model_name = os.path.splitext(os.path.basename(tflite_filepath))[0] output_dir = os.path.join(base_dir, f"{model_name}_compiled") - + if self.verbose: print(f"AOT compiling for targets: {self.aot_compile_targets}") print(f"Output directory: {output_dir}") - + # Perform AOT compilation result = aot_compile.aot_compile( input_model=litert_model, output_dir=output_dir, target=self.aot_compile_targets, - keep_going=True # Continue even if some targets fail + keep_going=True, # Continue even if some targets fail ) - + if self.verbose: - print(f"AOT compilation completed: {len(result.models)} successful, {len(result.failed_backends)} failed") + print( + f"AOT compilation completed: {len(result.models)} " + f"successful, {len(result.failed_backends)} failed" + ) if result.failed_backends: for backend, error in result.failed_backends: print(f" Failed: {backend.id()} - {error}") - + # Print compilation report if available try: report = result.compilation_report() @@ -382,13 +473,14 @@ def _aot_compile(self, tflite_filepath): print(report) except: pass - + return result - + except Exception as e: if self.verbose: print(f"AOT compilation failed: {e}") import traceback + traceback.print_exc() raise RuntimeError(f"AOT compilation failed: {e}") @@ -396,7 +488,7 @@ def _get_available_litert_targets(self): """Get available LiteRT targets for AOT compilation.""" if not LITERT_AVAILABLE: return [] - + try: # Get all registered targets targets = import_vendor.AllRegisteredTarget() @@ -407,34 +499,35 @@ def _get_available_litert_targets(self): return [] @classmethod - def export_with_aot(cls, model, filepath, targets=None, verbose=True, **kwargs): + def export_with_aot( + cls, model, filepath, targets=None, verbose=True, **kwargs + ): """ Convenience method to export a Keras model with AOT compilation. - + Args: model: Keras model to export - filepath: Output file path - targets: List of LiteRT targets for AOT compilation (e.g., ['qualcomm', 'mediatek']) + filepath: Output file path + targets: List of LiteRT targets for AOT compilation (e.g., + ['qualcomm', 'mediatek']) verbose: Whether to print verbose output **kwargs: Additional arguments for the exporter - + Returns: - CompilationResult if AOT compilation is performed, otherwise the filepath + CompilationResult if AOT compilation is performed, otherwise the + filepath """ exporter = cls( - model=model, - verbose=verbose, - aot_compile_targets=targets, - **kwargs + model=model, verbose=verbose, aot_compile_targets=targets, **kwargs ) return exporter.export(filepath) - @classmethod + @classmethod def get_available_targets(cls): """Get list of available LiteRT AOT compilation targets.""" if not LITERT_AVAILABLE: return [] - + dummy_exporter = cls(model=None) return dummy_exporter._get_available_litert_targets() @@ -452,16 +545,20 @@ class _KerasModelWrapper(tf.Module): def __init__(self, model): super().__init__() - # Store the model reference in a way that TensorFlow won't try to track it + # Store the model reference in a way that TensorFlow won't try to + # track it # This prevents the _DictWrapper error during SavedModel serialization - object.__setattr__(self, '_model', model) + object.__setattr__(self, "_model", model) - # Track all variables from the Keras model using proper tf.Module methods - # This ensures proper variable handling for stateful layers like BatchNorm + # Track all variables from the Keras model using proper tf.Module + # methods + # This ensures proper variable handling for stateful layers like + # BatchNorm with self.name_scope: for i, var in enumerate(model.variables): - # Use a different attribute name to avoid conflicts with tf.Module's variables property - setattr(self, f'model_var_{i}', var) + # Use a different attribute name to avoid conflicts with + # tf.Module's variables property + setattr(self, f"model_var_{i}", var) @tf.function def __call__(self, *args, **kwargs): @@ -475,12 +572,16 @@ def __call__(self, *args, **kwargs): return self._model(list(args)) elif kwargs and not args: # Called with keyword arguments - if len(kwargs) == 1 and 'inputs' in kwargs: + if len(kwargs) == 1 and "inputs" in kwargs: # Single input case - return self._model(kwargs['inputs']) + return self._model(kwargs["inputs"]) else: - # Multi-input case - convert to list/dict format expected by model - if hasattr(self._model, 'inputs') and len(self._model.inputs) > 1: + # Multi-input case - convert to list/dict format expected by + # model + if ( + hasattr(self._model, "inputs") + and len(self._model.inputs) > 1 + ): # Multi-input functional model input_list = [] missing_inputs = [] @@ -490,18 +591,19 @@ def __call__(self, *args, **kwargs): input_list.append(kwargs[input_name]) else: missing_inputs.append(input_name) - + if missing_inputs: raise ValueError( - f"Missing required inputs for multi-input model: {missing_inputs}. " - f"Available kwargs: {list(kwargs.keys())}. " - f"Please provide all inputs by name." + f"Missing required inputs for multi-input model: " + f"{missing_inputs}. Available kwargs: " + f"{list(kwargs.keys())}. Please provide all inputs " + f"by name." ) - + return self._model(input_list) else: # Single input model called with named arguments return self._model(list(kwargs.values())[0]) else: # Fallback to original call - return self._model(*args, **kwargs) \ No newline at end of file + return self._model(*args, **kwargs) diff --git a/keras/src/export/litert_exporter_test.py b/keras/src/export/litert_exporter_test.py index cb6136181a5e..4bd941d8116e 100644 --- a/keras/src/export/litert_exporter_test.py +++ b/keras/src/export/litert_exporter_test.py @@ -10,6 +10,7 @@ # Try to use AI Edge LiteRT interpreter, fallback to TensorFlow Lite try: from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter + litert_available = True print("Using AI Edge LiteRT interpreter") except ImportError: @@ -24,7 +25,6 @@ from keras.src import ops from keras.src import testing from keras.src import tree -from keras.src.export import export_litert from keras.src.saving import saving_lib from keras.src.testing.test_utils import named_product @@ -59,7 +59,8 @@ def get_model(type="sequential", input_shape=(10,), layer_list=None): if type == "subclass": model = CustomModel(layer_list) model.build(input_shape=(None,) + input_shape) - # Trace the model with dummy data to ensure it's properly built for export + # Trace the model with dummy data to ensure it's properly built for + # export dummy_input = np.zeros((1,) + input_shape, dtype=np.float32) _ = model(dummy_input) # This traces the model return model @@ -84,12 +85,23 @@ def get_model(type="sequential", input_shape=(10,), layer_list=None): merge_mode="concat", )(x) return models.Model(inputs=inputs, outputs=outputs) + if type == "multi_input": + input1 = layers.Input(shape=input_shape, name="input1") + input2 = layers.Input(shape=input_shape, name="input2") + x1 = layers.Dense(10, activation="relu")(input1) + x2 = layers.Dense(10, activation="relu")(input2) + combined = layers.concatenate([x1, x2]) + output = layers.Dense(1, activation="sigmoid")(combined) + return models.Model(inputs=[input1, input2], outputs=output) + if type == "multi_output": + inputs = layers.Input(shape=input_shape) + shared = layers.Dense(20, activation="relu")(inputs) + output1 = layers.Dense(1, activation="sigmoid", name="output1")(shared) + output2 = layers.Dense(3, activation="softmax", name="output2")(shared) + return models.Model(inputs=inputs, outputs=[output1, output2]) raise ValueError(f"Unknown model type: {type}") - - - def _convert_to_numpy(structure): return tree.map_structure( lambda x: x.numpy() if hasattr(x, "numpy") else np.array(x), structure @@ -118,7 +130,8 @@ def _set_interpreter_inputs(interpreter, inputs): break if matched_key is None: raise KeyError( - f"Unable to match input '{detail['name']}' in provided inputs" + f"Unable to match input '{detail['name']}' in provided " + f"inputs" ) value = inputs[matched_key] interpreter.set_tensor(detail["index"], value) @@ -136,7 +149,9 @@ def _set_interpreter_inputs(interpreter, inputs): def _get_interpreter_outputs(interpreter): output_details = interpreter.get_output_details() - outputs = [interpreter.get_tensor(detail["index"]) for detail in output_details] + outputs = [ + interpreter.get_tensor(detail["index"]) for detail in output_details + ] return outputs[0] if len(outputs) == 1 else outputs @@ -148,13 +163,11 @@ def _get_interpreter_outputs(interpreter): testing.tensorflow_uses_gpu(), reason="LiteRT export tests are only run on CPU to avoid CI issues.", ) -# Note: Tests use AI Edge LiteRT interpreter when available, +# Note: Tests use AI Edge LiteRT interpreter when available, # fallback to TensorFlow Lite interpreter otherwise class ExportLitertTest(testing.TestCase): @parameterized.named_parameters( - named_product( - model_type=["sequential", "functional", "lstm"] - ) + named_product(model_type=["sequential", "functional", "lstm"]) ) def test_standard_model_export(self, model_type): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") @@ -167,8 +180,9 @@ def test_standard_model_export(self, model_type): ref_input = ref_input.astype("float32") ref_output = _convert_to_numpy(model(ref_input)) - export_path = export_litert(model, temp_filepath) - self.assertTrue(export_path.endswith(".tflite")) + # Test with model.export() + model.export(temp_filepath, format="litert") + export_path = f"{temp_filepath}.tflite" self.assertTrue(os.path.exists(export_path)) interpreter = LiteRtInterpreter(model_path=export_path) @@ -185,7 +199,7 @@ def test_standard_model_export(self, model_type): def test_model_with_input_structure(self, struct_type): batch_size = 1 # TFLite expects batch_size=1 base_input = np.random.normal(size=(batch_size, 10)).astype("float32") - + if struct_type == "tuple": # Use Functional API for proper Input layer handling input1 = layers.Input(shape=(10,), name="input_1") @@ -205,7 +219,9 @@ def test_model_with_input_structure(self, struct_type): input1 = layers.Input(shape=(10,), name="x") input2 = layers.Input(shape=(10,), name="y") output = layers.Add()([input1, input2]) - model = models.Model(inputs={"x": input1, "y": input2}, outputs=output) + model = models.Model( + inputs={"x": input1, "y": input2}, outputs=output + ) ref_input = {"x": base_input, "y": base_input * 2} else: raise AssertionError("Unexpected structure type") @@ -215,7 +231,9 @@ def test_model_with_input_structure(self, struct_type): model(tree.map_structure(ops.convert_to_tensor, ref_input)) ) - export_path = export_litert(model, temp_filepath) + # Test with model.export() + model.export(temp_filepath, format="litert") + export_path = f"{temp_filepath}.tflite" interpreter = LiteRtInterpreter(model_path=export_path) interpreter.allocate_tensors() @@ -237,19 +255,21 @@ def test_model_with_input_structure(self, struct_type): def test_model_with_multiple_inputs(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") - + # Use Functional API for proper Input layer handling input_x = layers.Input(shape=(10,), name="x") input_y = layers.Input(shape=(10,), name="y") output = layers.Add()([input_x, input_y]) model = models.Model(inputs=[input_x, input_y], outputs=output) - + batch_size = 1 # TFLite expects batch_size=1 ref_input_x = np.random.normal(size=(batch_size, 10)).astype("float32") ref_input_y = np.random.normal(size=(batch_size, 10)).astype("float32") ref_output = _convert_to_numpy(model([ref_input_x, ref_input_y])) - export_path = export_litert(model, temp_filepath) + # Test with model.export() + model.export(temp_filepath, format="litert") + export_path = f"{temp_filepath}.tflite" interpreter = LiteRtInterpreter(model_path=export_path) interpreter.allocate_tensors() @@ -263,29 +283,107 @@ def test_model_with_multiple_inputs(self): larger_x = np.concatenate([ref_input_x, ref_input_x], axis=0) larger_y = np.concatenate([ref_input_y, ref_input_y], axis=0) input_details = interpreter.get_input_details() - interpreter.resize_tensor_input(input_details[0]["index"], larger_x.shape) - interpreter.resize_tensor_input(input_details[1]["index"], larger_y.shape) + interpreter.resize_tensor_input( + input_details[0]["index"], larger_x.shape + ) + interpreter.resize_tensor_input( + input_details[1]["index"], larger_y.shape + ) interpreter.allocate_tensors() _set_interpreter_inputs(interpreter, [larger_x, larger_y]) interpreter.invoke() larger_output = _get_interpreter_outputs(interpreter) larger_ref_output = _convert_to_numpy(model([larger_x, larger_y])) - self.assertAllClose(larger_ref_output, larger_output, atol=1e-4, rtol=1e-4) + self.assertAllClose( + larger_ref_output, larger_output, atol=1e-4, rtol=1e-4 + ) def test_export_with_custom_input_signature(self): model = get_model("sequential") temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") input_signature = [layers.InputSpec(shape=(None, 10), dtype="float32")] - export_path = export_litert( - model, + # Test with model.export() + model.export( temp_filepath, + format="litert", input_signature=input_signature, ) + export_path = f"{temp_filepath}.tflite" self.assertTrue(os.path.exists(export_path)) interpreter = LiteRtInterpreter(model_path=export_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(len(input_details), 1) - self.assertEqual(tuple(input_details[0]["shape"][1:]), (10,)) \ No newline at end of file + self.assertEqual(tuple(input_details[0]["shape"][1:]), (10,)) + + def test_multi_output_model_export(self): + """Test exporting multi-output models.""" + model = get_model("multi_output") + + # Build the model + ref_input = np.random.normal(size=(3, 10)).astype("float32") + model(ref_input) + + temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + model.export(temp_filepath, format="litert") + + tflite_path = f"{temp_filepath}.tflite" + self.assertTrue(os.path.exists(tflite_path)) + + # Test inference + interpreter = LiteRtInterpreter(model_path=tflite_path) + interpreter.allocate_tensors() + + input_details = interpreter.get_input_details() + output_details = interpreter.get_output_details() + + self.assertEqual(len(output_details), 2) + + test_input = np.random.random(input_details[0]["shape"]).astype( + np.float32 + ) + interpreter.set_tensor(input_details[0]["index"], test_input) + interpreter.invoke() + + for detail in output_details: + output = interpreter.get_tensor(detail["index"]) + self.assertIsInstance(output, np.ndarray) + + def test_export_with_verbose(self): + """Test export with verbose output.""" + model = get_model("sequential") + dummy_input = np.random.random((3, 10)).astype(np.float32) + model(dummy_input) + + temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + + # Export with verbose=True + model.export(temp_filepath, format="litert", verbose=True) + + tflite_path = f"{temp_filepath}.tflite" + self.assertTrue(os.path.exists(tflite_path)) + + # Verify the exported model works + interpreter = LiteRtInterpreter(model_path=tflite_path) + interpreter.allocate_tensors() + + input_details = interpreter.get_input_details() + self.assertEqual(len(input_details), 1) + + def test_export_error_handling(self): + """Test error handling in export API.""" + model = get_model("sequential") + dummy_input = np.random.random((3, 10)).astype(np.float32) + model(dummy_input) + + temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + + # Test with invalid format + with self.assertRaises(ValueError): + model.export(temp_filepath, format="invalid_format") + + +if __name__ == "__main__": + pytest.main([__file__]) diff --git a/keras/src/export/saved_model.py b/keras/src/export/saved_model.py index 32e42d846729..c9686fafd243 100644 --- a/keras/src/export/saved_model.py +++ b/keras/src/export/saved_model.py @@ -357,6 +357,7 @@ def serving_fn(x): return decorated_fn from keras.src.export.export_utils import make_tf_tensor_spec + input_signature = tree.map_structure( make_tf_tensor_spec, input_signature ) @@ -415,6 +416,7 @@ def track_and_add_endpoint(self, name, resource, input_signature, **kwargs): ) from keras.src.export.export_utils import make_tf_tensor_spec + input_signature = tree.map_structure( make_tf_tensor_spec, input_signature ) @@ -649,6 +651,7 @@ def export_saved_model( export_archive = ExportArchive() if input_signature is None: from keras.src.export.export_utils import get_input_signature + input_signature = get_input_signature(model) export_archive.track_and_add_endpoint( diff --git a/keras/src/models/model.py b/keras/src/models/model.py index d29e644e29f5..a0976fa092c3 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -568,8 +568,8 @@ def export( filepath: `str` or `pathlib.Path` object. The path to save the artifact. format: `str`. The export format. Supported values: - `"tf_saved_model"`, `"onnx"`, `"openvino"`, and `"litert"`. Defaults to - `"tf_saved_model"`. + `"tf_saved_model"`, `"onnx"`, `"openvino"`, and `"litert"`. + Defaults to `"tf_saved_model"`. verbose: `bool`. Whether to print a message during export. Defaults to `None`, which uses the default value set by different backends and formats. @@ -592,12 +592,18 @@ def export( provided, they will be automatically computed. - `opset_version`: Optional `int`. Specific to `format="onnx"`. An integer value that specifies the ONNX opset version. - - `allow_custom_ops`: Optional `bool`. Specific to `format="litert"`. - Whether to allow custom operations during conversion. Defaults to `False`. - - `enable_select_tf_ops`: Optional `bool`. Specific to `format="litert"`. - Whether to enable TensorFlow Select ops for unsupported operations. Defaults to `False`. - - `optimizations`: Optional `list`. Specific to `format="litert"`. - List of optimizations to apply (e.g., `[tf.lite.Optimize.DEFAULT]`). + - `allow_custom_ops`: Optional `bool`. Specific to + `format="litert"`. + Whether to allow custom operations during conversion. + Defaults to `False`. + - `enable_select_tf_ops`: Optional `bool`. Specific to + `format="litert"`. + Whether to enable TensorFlow Select ops for unsupported + operations. Defaults to `False`. + - `optimizations`: Optional `list`. Specific to + `format="litert"`. + List of optimizations to apply (e.g., + `[tf.lite.Optimize.DEFAULT]`). **Note:** This feature is currently supported only with TensorFlow, JAX and Torch backends. @@ -642,9 +648,13 @@ def export( # Load the artifact in a different process/environment interpreter = tf.lite.Interpreter(model_path="path/to/location") interpreter.allocate_tensors() - interpreter.set_tensor(interpreter.get_input_details()[0]['index'], input_data) + interpreter.set_tensor( + interpreter.get_input_details()[0]['index'], input_data + ) interpreter.invoke() - output_data = interpreter.get_tensor(interpreter.get_output_details()[0]['index']) + output_data = interpreter.get_tensor( + interpreter.get_output_details()[0]['index'] + ) ``` """ from keras.src.export import export_litert From c6f0c70a88e0d58794cbeb92c5e670e5ae774e70 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 30 Sep 2025 09:58:21 +0530 Subject: [PATCH 051/115] Refactor LiteRT exporter to use module_utils.litert Replaces direct imports and availability checks for LiteRT with the LazyModule-based `litert` utility from `module_utils`. Updates tests and exporter logic to require '.tflite' filepaths and consistently use the new import pattern, improving modularity and error handling. --- keras/src/export/litert_exporter.py | 52 +++++++++--------- keras/src/export/litert_exporter_test.py | 68 ++++++++++++++---------- keras/src/utils/module_utils.py | 1 + 3 files changed, 68 insertions(+), 53 deletions(-) diff --git a/keras/src/export/litert_exporter.py b/keras/src/export/litert_exporter.py index 022b2ef42d78..04874e6e8550 100644 --- a/keras/src/export/litert_exporter.py +++ b/keras/src/export/litert_exporter.py @@ -5,16 +5,7 @@ from keras.src.export.export_utils import get_input_signature from keras.src.export.export_utils import make_tf_tensor_spec from keras.src.utils import io_utils - -# Try to import LiteRT AOT compilation if available -try: - from litert.python.aot import aot_compile - from litert.python.aot.core import types as litert_types - from litert.python.aot.vendors import import_vendor - - LITERT_AVAILABLE = True -except ImportError: - LITERT_AVAILABLE = False +from keras.src.utils.module_utils import litert def export_litert( @@ -30,8 +21,9 @@ def export_litert( Args: model: The Keras model to export. filepath: The path to save the exported artifact. - verbose: Optional; whether to log progress messages. Defaults to - ``False`` when ``None`` is provided. + verbose: `bool`. Whether to print a message during export. Defaults to + `None`, which uses the default value set by different backends and + formats. input_signature: Optional input signature specification. If ``None``, it will be inferred. aot_compile_targets: Optional list of Litert targets for AOT @@ -43,16 +35,18 @@ def export_litert( AOT compilation is requested. """ - actual_verbose = bool(verbose) if verbose is not None else False + if verbose is None: + verbose = True # Defaults to `True` for all backends. + exporter = LitertExporter( model=model, input_signature=input_signature, - verbose=actual_verbose, + verbose=verbose, aot_compile_targets=aot_compile_targets, **kwargs, ) result = exporter.export(filepath) - if actual_verbose: + if verbose: if hasattr(result, "models"): io_utils.print_msg( f"Saved artifact at '{filepath}'. AOT compiled " @@ -88,7 +82,7 @@ def __init__( """ self.model = model self.input_signature = input_signature - self.verbose = bool(verbose) + self.verbose = verbose self.aot_compile_targets = aot_compile_targets self.kwargs = kwargs @@ -126,8 +120,10 @@ def export(self, filepath): ) # 4. Save the initial TFLite model to the specified file path. - if not filepath.endswith(".tflite"): - filepath += ".tflite" + assert filepath.endswith(".tflite"), ( + "The LiteRT export requires the filepath to end with '.tflite'. " + f"Got: {filepath}" + ) with open(filepath, "wb") as f: f.write(tflite_model) @@ -138,11 +134,11 @@ def export(self, filepath): # 5. Perform AOT compilation if targets are specified and LiteRT is # available compiled_models = None - if self.aot_compile_targets and LITERT_AVAILABLE: + if self.aot_compile_targets and litert.available: if self.verbose: print("Performing AOT compilation for Litert targets...") compiled_models = self._aot_compile(filepath) - elif self.aot_compile_targets and not LITERT_AVAILABLE: + elif self.aot_compile_targets and not litert.available: if self.verbose: print( "Warning: AOT compilation requested but LiteRT is not " @@ -432,12 +428,14 @@ def _convert_with_wrapper(self, input_signature): def _aot_compile(self, tflite_filepath): """Performs AOT compilation using LiteRT.""" - if not LITERT_AVAILABLE: + if not litert.available: raise RuntimeError("LiteRT is not available for AOT compilation") try: # Create a LiteRT model from the TFLite file - litert_model = litert_types.Model.create_from_path(tflite_filepath) + litert_model = litert.python.aot.core.types.Model.create_from_path( + tflite_filepath + ) # Determine output directory base_dir = os.path.dirname(tflite_filepath) @@ -449,7 +447,7 @@ def _aot_compile(self, tflite_filepath): print(f"Output directory: {output_dir}") # Perform AOT compilation - result = aot_compile.aot_compile( + result = litert.python.aot.aot_compile( input_model=litert_model, output_dir=output_dir, target=self.aot_compile_targets, @@ -486,12 +484,14 @@ def _aot_compile(self, tflite_filepath): def _get_available_litert_targets(self): """Get available LiteRT targets for AOT compilation.""" - if not LITERT_AVAILABLE: + if not litert.available: return [] try: # Get all registered targets - targets = import_vendor.AllRegisteredTarget() + targets = ( + litert.python.aot.vendors.import_vendor.AllRegisteredTarget() + ) return targets if isinstance(targets, list) else [targets] except Exception as e: if self.verbose: @@ -525,7 +525,7 @@ def export_with_aot( @classmethod def get_available_targets(cls): """Get list of available LiteRT AOT compilation targets.""" - if not LITERT_AVAILABLE: + if not litert.available: return [] dummy_exporter = cls(model=None) diff --git a/keras/src/export/litert_exporter_test.py b/keras/src/export/litert_exporter_test.py index 4bd941d8116e..5f9f829512e5 100644 --- a/keras/src/export/litert_exporter_test.py +++ b/keras/src/export/litert_exporter_test.py @@ -1,5 +1,3 @@ -"""Tests for LiteRT exporting utilities.""" - import os import numpy as np @@ -7,18 +5,6 @@ import tensorflow as tf from absl.testing import parameterized -# Try to use AI Edge LiteRT interpreter, fallback to TensorFlow Lite -try: - from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter - - litert_available = True - print("Using AI Edge LiteRT interpreter") -except ImportError: - # Fallback to TensorFlow Lite interpreter - LiteRtInterpreter = tf.lite.Interpreter - litert_available = True - print("Using TensorFlow Lite interpreter as fallback") - from keras.src import backend from keras.src import layers from keras.src import models @@ -27,6 +13,20 @@ from keras.src import tree from keras.src.saving import saving_lib from keras.src.testing.test_utils import named_product +from keras.src.utils.module_utils import litert + +# Use AI Edge LiteRT interpreter if available, fallback to TensorFlow Lite +if litert.available: + try: + from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter + + print("Using AI Edge LiteRT interpreter") + except ImportError: + LiteRtInterpreter = tf.lite.Interpreter + print("Using TensorFlow Lite interpreter as fallback") +else: + LiteRtInterpreter = tf.lite.Interpreter + print("Using TensorFlow Lite interpreter as fallback") class CustomModel(models.Model): @@ -170,7 +170,9 @@ class ExportLitertTest(testing.TestCase): named_product(model_type=["sequential", "functional", "lstm"]) ) def test_standard_model_export(self, model_type): - temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + temp_filepath = os.path.join( + self.get_temp_dir(), "exported_model.tflite" + ) model = get_model(model_type) batch_size = 1 # TFLite expects batch_size=1 if model_type == "lstm": @@ -182,7 +184,7 @@ def test_standard_model_export(self, model_type): # Test with model.export() model.export(temp_filepath, format="litert") - export_path = f"{temp_filepath}.tflite" + export_path = temp_filepath self.assertTrue(os.path.exists(export_path)) interpreter = LiteRtInterpreter(model_path=export_path) @@ -226,14 +228,16 @@ def test_model_with_input_structure(self, struct_type): else: raise AssertionError("Unexpected structure type") - temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + temp_filepath = os.path.join( + self.get_temp_dir(), "exported_model.tflite" + ) ref_output = _convert_to_numpy( model(tree.map_structure(ops.convert_to_tensor, ref_input)) ) # Test with model.export() model.export(temp_filepath, format="litert") - export_path = f"{temp_filepath}.tflite" + export_path = temp_filepath interpreter = LiteRtInterpreter(model_path=export_path) interpreter.allocate_tensors() @@ -254,7 +258,9 @@ def test_model_with_input_structure(self, struct_type): self.assertAllClose(ref_output, revived_output) def test_model_with_multiple_inputs(self): - temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + temp_filepath = os.path.join( + self.get_temp_dir(), "exported_model.tflite" + ) # Use Functional API for proper Input layer handling input_x = layers.Input(shape=(10,), name="x") @@ -269,7 +275,7 @@ def test_model_with_multiple_inputs(self): # Test with model.export() model.export(temp_filepath, format="litert") - export_path = f"{temp_filepath}.tflite" + export_path = temp_filepath interpreter = LiteRtInterpreter(model_path=export_path) interpreter.allocate_tensors() @@ -300,7 +306,9 @@ def test_model_with_multiple_inputs(self): def test_export_with_custom_input_signature(self): model = get_model("sequential") - temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + temp_filepath = os.path.join( + self.get_temp_dir(), "exported_model.tflite" + ) input_signature = [layers.InputSpec(shape=(None, 10), dtype="float32")] # Test with model.export() @@ -309,7 +317,7 @@ def test_export_with_custom_input_signature(self): format="litert", input_signature=input_signature, ) - export_path = f"{temp_filepath}.tflite" + export_path = temp_filepath self.assertTrue(os.path.exists(export_path)) interpreter = LiteRtInterpreter(model_path=export_path) @@ -326,10 +334,12 @@ def test_multi_output_model_export(self): ref_input = np.random.normal(size=(3, 10)).astype("float32") model(ref_input) - temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + temp_filepath = os.path.join( + self.get_temp_dir(), "exported_model.tflite" + ) model.export(temp_filepath, format="litert") - tflite_path = f"{temp_filepath}.tflite" + tflite_path = temp_filepath self.assertTrue(os.path.exists(tflite_path)) # Test inference @@ -357,12 +367,14 @@ def test_export_with_verbose(self): dummy_input = np.random.random((3, 10)).astype(np.float32) model(dummy_input) - temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + temp_filepath = os.path.join( + self.get_temp_dir(), "exported_model.tflite" + ) # Export with verbose=True model.export(temp_filepath, format="litert", verbose=True) - tflite_path = f"{temp_filepath}.tflite" + tflite_path = temp_filepath self.assertTrue(os.path.exists(tflite_path)) # Verify the exported model works @@ -378,7 +390,9 @@ def test_export_error_handling(self): dummy_input = np.random.random((3, 10)).astype(np.float32) model(dummy_input) - temp_filepath = os.path.join(self.get_temp_dir(), "exported_model") + temp_filepath = os.path.join( + self.get_temp_dir(), "exported_model.tflite" + ) # Test with invalid format with self.assertRaises(ValueError): diff --git a/keras/src/utils/module_utils.py b/keras/src/utils/module_utils.py index 286394a99358..577d08a7fd42 100644 --- a/keras/src/utils/module_utils.py +++ b/keras/src/utils/module_utils.py @@ -59,3 +59,4 @@ def __repr__(self): dmtree = LazyModule("tree") tf2onnx = LazyModule("tf2onnx") grain = LazyModule("grain") +litert = LazyModule("ai_edge_litert") From 3c1d90a4276c7176c21a9a8b3c41ad94c689f755 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 1 Oct 2025 12:13:29 +0530 Subject: [PATCH 052/115] Simplify export_litert return value and messaging Removed the return value and conditional messaging from export_litert. The function now always prints a simple message upon export and no longer returns the compilation result or filepath. --- keras/src/export/litert_exporter.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/keras/src/export/litert_exporter.py b/keras/src/export/litert_exporter.py index 04874e6e8550..e5217b552b86 100644 --- a/keras/src/export/litert_exporter.py +++ b/keras/src/export/litert_exporter.py @@ -29,10 +29,6 @@ def export_litert( aot_compile_targets: Optional list of Litert targets for AOT compilation. **kwargs: Additional keyword arguments passed to the exporter. - - Returns: - The filepath to the exported artifact, or the compilation result when - AOT compilation is requested. """ if verbose is None: @@ -45,16 +41,9 @@ def export_litert( aot_compile_targets=aot_compile_targets, **kwargs, ) - result = exporter.export(filepath) + exporter.export(filepath) if verbose: - if hasattr(result, "models"): - io_utils.print_msg( - f"Saved artifact at '{filepath}'. AOT compiled " - f"{len(result.models)} variant(s)." - ) - else: - io_utils.print_msg(f"Saved artifact at '{result}'.") - return result + io_utils.print_msg(f"Saved artifact at '{filepath}'.") class LitertExporter: From cd9d063ccdfb7506443cf689d084b2299070382e Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 1 Oct 2025 13:40:50 +0530 Subject: [PATCH 053/115] Update export_utils.py --- keras/src/export/export_utils.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index cb28bf9fbbe2..79230dafa09b 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -13,7 +13,7 @@ def get_input_signature(model): model: A Keras Model instance. Returns: - Input signature suitable for model export. + Input signature suitable for model export (always a tuple or list). """ if not isinstance(model, models.Model): raise TypeError( @@ -30,6 +30,9 @@ def get_input_signature(model): input_signature = tree.map_structure( make_input_spec, model._inputs_struct ) + # Ensure single inputs are wrapped in a tuple for TensorFlow compatibility + if not isinstance(input_signature, (list, tuple)): + input_signature = (input_signature,) elif isinstance(model, models.Sequential): input_signature = tree.map_structure(make_input_spec, model.inputs) else: @@ -48,6 +51,9 @@ def get_input_signature(model): "once before export, or you must provide explicit " "input_signature." ) + # Ensure single inputs are wrapped in a tuple for TensorFlow compatibility + if input_signature and not isinstance(input_signature, (list, tuple)): + input_signature = (input_signature,) return input_signature @@ -59,6 +65,8 @@ def _infer_input_signature_from_model(model): def _make_input_spec(structure): # We need to turn wrapper structures like TrackingDict or _DictWrapper # into plain Python structures because they don't work with jax2tf/JAX. + if structure is None: + return None if isinstance(structure, dict): return {k: _make_input_spec(v) for k, v in structure.items()} elif isinstance(structure, tuple): From fa3d3ed44447ffe641da2d5e2ee222541499c83d Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Fri, 3 Oct 2025 10:28:45 +0530 Subject: [PATCH 054/115] Refactor input signature inference for export Simplifies and standardizes input signature generation for Functional, Sequential, and subclassed models to always use a flexible batch dimension (None) for export. Refactors Sequential input shape inference to remove hardcoded fallbacks and ensure dynamic batching support. Cleans up and clarifies error handling and structure of input signature utilities. --- keras/src/export/export_utils.py | 81 ++++++-------------- keras/src/export/litert_exporter.py | 112 ++++++---------------------- 2 files changed, 42 insertions(+), 151 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 79230dafa09b..fd05f9b7b4a4 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -27,33 +27,23 @@ def get_input_signature(model): ) if isinstance(model, models.Functional): - input_signature = tree.map_structure( - make_input_spec, model._inputs_struct - ) - # Ensure single inputs are wrapped in a tuple for TensorFlow compatibility - if not isinstance(input_signature, (list, tuple)): - input_signature = (input_signature,) + # Functional models expect a single positional argument `inputs` + # containing the full nested input structure. We keep the + # original behavior of returning a single-element list that + # wraps the mapped structure so that downstream exporters + # build a tf.function with one positional argument. + input_signature = [ + tree.map_structure(make_input_spec, model._inputs_struct) + ] elif isinstance(model, models.Sequential): input_signature = tree.map_structure(make_input_spec, model.inputs) else: - # For subclassed models, try multiple approaches + # Subclassed models: rely on recorded shapes from the first call. input_signature = _infer_input_signature_from_model(model) - if not input_signature: - # Fallback: Try to get from model.inputs if available - if hasattr(model, "inputs") and model.inputs: - input_signature = tree.map_structure( - make_input_spec, model.inputs - ) - elif not model._called: - raise ValueError( - "The model provided has never been called and has no " - "detectable input structure. It must be called at least " - "once before export, or you must provide explicit " - "input_signature." - ) - # Ensure single inputs are wrapped in a tuple for TensorFlow compatibility - if input_signature and not isinstance(input_signature, (list, tuple)): - input_signature = (input_signature,) + if not input_signature or not model._called: + raise ValueError( + "The model provided has never called. It must be called at least once before export." + ) return input_signature @@ -71,39 +61,18 @@ def _make_input_spec(structure): return {k: _make_input_spec(v) for k, v in structure.items()} elif isinstance(structure, tuple): if all(isinstance(d, (int, type(None))) for d in structure): - # Keep batch dimension unbounded, keep other dimensions as they - # are - bounded_shape = [] - - for i, dim in enumerate(structure): - if dim is None and i == 0: - # Always keep batch dimension as None - bounded_shape.append(None) - else: - # Keep other dimensions as they are (None or specific - # size) - bounded_shape.append(dim) - + # For export, force batch dimension to None for flexible batching + shape = (None,) + structure[1:] if len(structure) > 0 else structure return layers.InputSpec( - shape=tuple(bounded_shape), dtype=model.input_dtype + shape=shape, dtype=model.input_dtype ) return tuple(_make_input_spec(v) for v in structure) elif isinstance(structure, list): if all(isinstance(d, (int, type(None))) for d in structure): - # Keep batch dimension unbounded, keep other dimensions as they - # are - bounded_shape = [] - - for i, dim in enumerate(structure): - if dim is None and i == 0: - # Always keep batch dimension as None - bounded_shape.append(None) - else: - # Keep other dimensions as they are - bounded_shape.append(dim) - + # For export, force batch dimension to None for flexible batching + shape = (None,) + tuple(structure[1:]) if len(structure) > 0 else tuple(structure) return layers.InputSpec( - shape=bounded_shape, dtype=model.input_dtype + shape=shape, dtype=model.input_dtype ) return [_make_input_spec(v) for v in structure] else: @@ -111,16 +80,8 @@ def _make_input_spec(structure): f"Unsupported type {type(structure)} for {structure}" ) - # Try to reconstruct the input structure from build shapes - if len(shapes_dict) == 1: - # Single input case - return _make_input_spec(list(shapes_dict.values())[0]) - else: - # Multiple inputs - try to determine if it's a dict or list structure - # Return as dictionary by default to preserve input names - return { - key: _make_input_spec(shape) for key, shape in shapes_dict.items() - } + # Always return a flat list preserving the order of shapes_dict values + return [_make_input_spec(value) for value in shapes_dict.values()] def make_input_spec(x): diff --git a/keras/src/export/litert_exporter.py b/keras/src/export/litert_exporter.py index e5217b552b86..aabde7ff1eec 100644 --- a/keras/src/export/litert_exporter.py +++ b/keras/src/export/litert_exporter.py @@ -220,100 +220,30 @@ def _ensure_model_built(self): ) def _infer_sequential_input_shape(self): - """Infer input shape for Sequential models.""" + """Infer input shape for Sequential models. + + Returns the input shape with flexible batch dimension (None) for export, + allowing dynamic batch sizes during inference. + """ try: - # First, look for Input layer - for layer in self.model.layers: - if ( - hasattr(layer, "__class__") - and layer.__class__.__name__ == "InputLayer" - ): - if ( - hasattr(layer, "batch_input_shape") - and layer.batch_input_shape - ): - input_shape = layer.batch_input_shape - return ( - (1,) + input_shape[1:] - if input_shape[0] is None - else input_shape - ) - - # If no Input layer, try to get from first layer - if hasattr(self.model, "layers") and self.model.layers: - first_layer = self.model.layers[0] - - # Check various ways to get input shape - for attr in [ - "input_shape", - "batch_input_shape", - "_batch_input_shape", - ]: - if hasattr(first_layer, attr): - input_shape = getattr(first_layer, attr) - if input_shape: - return ( - (1,) + input_shape[1:] - if input_shape[0] is None - else input_shape - ) - - # Try to infer from layer configuration without hardcoded - # fallbacks - if hasattr(first_layer, "__class__"): - class_name = first_layer.__class__.__name__ - - if class_name == "Dense": - # For Dense layers, try to infer from input_dim - if ( - hasattr(first_layer, "input_dim") - and first_layer.input_dim - ): - return (1, first_layer.input_dim) - - elif class_name == "Dropout": - # For Dropout, look at the next layer to infer shape - if len(self.model.layers) > 1: - next_layer = self.model.layers[1] - if hasattr(next_layer, "__class__"): - next_class = next_layer.__class__.__name__ - if next_class == "Dense": - if ( - hasattr(next_layer, "input_dim") - and next_layer.input_dim - ): - return (1, next_layer.input_dim) - - elif class_name in [ - "BatchNormalization", - "LayerNormalization", - ]: - # For normalization layers, look at the next layer to - # infer shape - if len(self.model.layers) > 1: - next_layer = self.model.layers[1] - if hasattr(next_layer, "__class__"): - next_class = next_layer.__class__.__name__ - if next_class == "Dense": - if ( - hasattr(next_layer, "input_dim") - and next_layer.input_dim - ): - return (1, next_layer.input_dim) - - # For other layer types, we cannot reliably infer without - # hardcoded values - # Return None to indicate inference failed - if self.verbose: - print( - f"Cannot infer input shape for layer type: " - f"{class_name}" - ) - + # For Sequential models, input_shape should be available directly + if hasattr(self.model, 'input_shape') and self.model.input_shape: + input_shape = self.model.input_shape + # For export, always use None batch dimension to allow dynamic batching + return (None,) + input_shape[1:] + + # Fallback: try to get from first layer's batch_shape + if hasattr(self.model, "_layers") and self.model._layers: + first_layer = self.model._layers[0] + if hasattr(first_layer, 'batch_shape') and first_layer.batch_shape: + input_shape = first_layer.batch_shape + # For export, always use None batch dimension to allow dynamic batching + return (None,) + input_shape[1:] + except Exception as e: if self.verbose: print(f"Warning: Could not infer Sequential input shape: {e}") - + return None def _convert_to_tflite(self, input_signature): @@ -458,7 +388,7 @@ def _aot_compile(self, tflite_filepath): if report: print("Compilation Report:") print(report) - except: + except Exception: pass return result From e775ff2c99b0e369cb5e2d351f70ed87f5751120 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Fri, 3 Oct 2025 10:56:55 +0530 Subject: [PATCH 055/115] simplified code --- keras/src/export/litert_exporter.py | 91 +++++++++++++---------------- 1 file changed, 40 insertions(+), 51 deletions(-) diff --git a/keras/src/export/litert_exporter.py b/keras/src/export/litert_exporter.py index aabde7ff1eec..c076120df15c 100644 --- a/keras/src/export/litert_exporter.py +++ b/keras/src/export/litert_exporter.py @@ -146,90 +146,79 @@ def export(self, filepath): def _ensure_model_built(self): """ - Ensures the model is fully traced by performing a forward pass. + Ensures the model is built by calling model.build() if necessary. - This is critical because `model.built` can be True even if the model - has not been traced with concrete input shapes, which is required for - TFLite conversion. This method guarantees a forward pass happens. + This is critical because model.built can be True even if the model + has not been properly initialized with input shapes, which is required + for TFLite conversion. This method uses model.build() which is simpler + and more reliable than generating dummy inputs. """ + if self.model.built: + if self.verbose: + print("Model is already built.") + return + if self.verbose: - print("Ensuring model is traced by performing a forward pass...") + print("Building model with inferred input shapes...") try: - # Generate dummy inputs based on the model's specification - dummy_inputs = [] - # Prioritize `model.inputs` as it's the most reliable source + # Infer input shape(s) and build the model if hasattr(self.model, "inputs") and self.model.inputs: + # Functional/Sequential with Input layer + input_shapes = [inp.shape for inp in self.model.inputs] if self.verbose: print( - f"Generating inputs from `model.inputs` " - f"({len(self.model.inputs)} input(s))." - ) - for input_layer in self.model.inputs: - shape = [ - 1 if dim is None else dim for dim in input_layer.shape - ] - dummy_input = tf.zeros( - shape, dtype=input_layer.dtype or tf.float32 + f"Building model from model.inputs " + f"({len(input_shapes)} input(s)): {input_shapes}" ) - dummy_inputs.append(dummy_input) + if len(input_shapes) == 1: + self.model.build(input_shapes[0]) + else: + self.model.build(input_shapes) else: - # Fallback for pure Sequential models without an Input layer + # Sequential without Input layer or subclassed model if self.verbose: print( - "Model has no `inputs` attribute. Assuming pure " - "Sequential and inferring shape." + "Model has no inputs attribute. Inferring shape for " + "Sequential model." ) input_shape = self._infer_sequential_input_shape() - if input_shape: - if self.verbose: - print( - f"Inferred input shape for Sequential model: " - f"{input_shape}" - ) - dummy_inputs.append(tf.zeros(input_shape, dtype=tf.float32)) - else: + if input_shape is None: raise ValueError( - "Cannot build Sequential model: unable to infer input " - "shape. Please add an `Input` layer or specify " - "`input_shape` in the first layer." + "Cannot build model: unable to infer input shape. " + "Please add an Input layer or specify input_shape " + "in the first layer." ) - - # Perform a direct call in inference mode to trace the model. - if len(dummy_inputs) == 1: - self.model(dummy_inputs[0], training=False) - else: - self.model(dummy_inputs, training=False) + if self.verbose: + print(f"Building model with inferred shape: {input_shape}") + self.model.build(input_shape) if self.verbose: - print( - "Model successfully traced via direct call with " - "training=False." - ) + print("Model successfully built.") except Exception as e: if self.verbose: - print(f"Error during model call: {e}") - raise ValueError(f"Failed to trace model with error: {e}") + print(f"Error during model.build(): {e}") + raise ValueError(f"Failed to build model with error: {e}") - # Final, critical check + # Final check if not self.model.built: raise ValueError( - "Model could not be built even after a direct call. " - "Please check the model's definition and input specification." + "Model could not be built. Please check the model's " + "definition and input specification." ) def _infer_sequential_input_shape(self): """Infer input shape for Sequential models. - Returns the input shape with flexible batch dimension (None) for export, - allowing dynamic batch sizes during inference. + Returns the input shape with flexible batch dimension (None) suitable + for model.build(), allowing dynamic batch sizes during inference. """ try: # For Sequential models, input_shape should be available directly if hasattr(self.model, 'input_shape') and self.model.input_shape: input_shape = self.model.input_shape - # For export, always use None batch dimension to allow dynamic batching + # Always use None batch dimension for model.build() return (None,) + input_shape[1:] # Fallback: try to get from first layer's batch_shape @@ -237,7 +226,7 @@ def _infer_sequential_input_shape(self): first_layer = self.model._layers[0] if hasattr(first_layer, 'batch_shape') and first_layer.batch_shape: input_shape = first_layer.batch_shape - # For export, always use None batch dimension to allow dynamic batching + # Always use None batch dimension for model.build() return (None,) + input_shape[1:] except Exception as e: From 34b662d25b0d2efe4b9a2fb67c16828a58632ee2 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 6 Oct 2025 09:54:37 +0530 Subject: [PATCH 056/115] Refactor LiteRT exporter and update import paths Renamed litert_exporter.py and its test to litert.py and litert_test.py, respectively. Updated all relevant imports to use the new module name. Refactored LitertExporter to simplify model building logic and removed redundant input shape inference for Sequential models. Also cleaned up export_utils.py by removing the export format registry and related functions. --- keras/src/export/__init__.py | 4 +- keras/src/export/export_utils.py | 58 +++--------- .../export/{litert_exporter.py => litert.py} | 89 +++++-------------- ...litert_exporter_test.py => litert_test.py} | 0 4 files changed, 38 insertions(+), 113 deletions(-) rename keras/src/export/{litert_exporter.py => litert.py} (82%) rename keras/src/export/{litert_exporter_test.py => litert_test.py} (100%) diff --git a/keras/src/export/__init__.py b/keras/src/export/__init__.py index 97f4b361fb3e..f0b0be00231f 100644 --- a/keras/src/export/__init__.py +++ b/keras/src/export/__init__.py @@ -1,5 +1,5 @@ -from keras.src.export.litert_exporter import LitertExporter -from keras.src.export.litert_exporter import export_litert +from keras.src.export.litert import LitertExporter +from keras.src.export.litert import export_litert from keras.src.export.onnx import export_onnx from keras.src.export.openvino import export_openvino from keras.src.export.saved_model import ExportArchive diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index fd05f9b7b4a4..641e03e23357 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -42,7 +42,8 @@ def get_input_signature(model): input_signature = _infer_input_signature_from_model(model) if not input_signature or not model._called: raise ValueError( - "The model provided has never called. It must be called at least once before export." + "The model provided has never called. It must be called " + "at least once before export." ) return input_signature @@ -61,19 +62,23 @@ def _make_input_spec(structure): return {k: _make_input_spec(v) for k, v in structure.items()} elif isinstance(structure, tuple): if all(isinstance(d, (int, type(None))) for d in structure): - # For export, force batch dimension to None for flexible batching - shape = (None,) + structure[1:] if len(structure) > 0 else structure - return layers.InputSpec( - shape=shape, dtype=model.input_dtype + # For export, force batch dimension to None for flexible + # batching + shape = ( + (None,) + structure[1:] if len(structure) > 0 else structure ) + return layers.InputSpec(shape=shape, dtype=model.input_dtype) return tuple(_make_input_spec(v) for v in structure) elif isinstance(structure, list): if all(isinstance(d, (int, type(None))) for d in structure): - # For export, force batch dimension to None for flexible batching - shape = (None,) + tuple(structure[1:]) if len(structure) > 0 else tuple(structure) - return layers.InputSpec( - shape=shape, dtype=model.input_dtype + # For export, force batch dimension to None for flexible + # batching + shape = ( + (None,) + tuple(structure[1:]) + if len(structure) > 0 + else tuple(structure) ) + return layers.InputSpec(shape=shape, dtype=model.input_dtype) return [_make_input_spec(v) for v in structure] else: raise ValueError( @@ -126,38 +131,3 @@ def convert_spec_to_tensor(spec, replace_none_number=None): s if s is not None else replace_none_number for s in shape ) return ops.ones(shape, spec.dtype) - - -# Registry for export formats -EXPORT_FORMATS = { - "tf_saved_model": "keras.src.export.saved_model:export_saved_model", - "litert": "keras.src.export.litert_exporter:export_litert", - # Add other formats as needed -} - - -def _get_exporter(format_name): - """Lazy import exporter to avoid circular imports.""" - if format_name not in EXPORT_FORMATS: - raise ValueError(f"Unknown export format: {format_name}") - - exporter = EXPORT_FORMATS[format_name] - if isinstance(exporter, str): - # Lazy import for string references - module_path, attr_name = exporter.split(":") - module = __import__(module_path, fromlist=[attr_name]) - return getattr(module, attr_name) - else: - # Direct reference - return exporter - - -def export_model(model, filepath, format="tf_saved_model", **kwargs): - """Export a model to the specified format.""" - exporter = _get_exporter(format) - - if isinstance(exporter, type): - exporter_instance = exporter(model, **kwargs) - return exporter_instance.export(filepath) - - return exporter(model, filepath, **kwargs) diff --git a/keras/src/export/litert_exporter.py b/keras/src/export/litert.py similarity index 82% rename from keras/src/export/litert_exporter.py rename to keras/src/export/litert.py index c076120df15c..e56d216deb08 100644 --- a/keras/src/export/litert_exporter.py +++ b/keras/src/export/litert.py @@ -2,6 +2,7 @@ import tensorflow as tf +from keras.src import tree from keras.src.export.export_utils import get_input_signature from keras.src.export.export_utils import make_tf_tensor_spec from keras.src.utils import io_utils @@ -146,95 +147,49 @@ def export(self, filepath): def _ensure_model_built(self): """ - Ensures the model is built by calling model.build() if necessary. + Ensures the model is built before conversion. - This is critical because model.built can be True even if the model - has not been properly initialized with input shapes, which is required - for TFLite conversion. This method uses model.build() which is simpler - and more reliable than generating dummy inputs. + For models that are not yet built, this attempts to build them + using the input signature or model.inputs. """ if self.model.built: - if self.verbose: - print("Model is already built.") return if self.verbose: - print("Building model with inferred input shapes...") + print("Building model before conversion...") try: - # Infer input shape(s) and build the model - if hasattr(self.model, "inputs") and self.model.inputs: - # Functional/Sequential with Input layer + # Try to build using input_signature if available + if self.input_signature: + input_shapes = tree.map_structure( + lambda spec: spec.shape, self.input_signature + ) + self.model.build(input_shapes) + # Fall back to model.inputs for Functional/Sequential models + elif hasattr(self.model, "inputs") and self.model.inputs: input_shapes = [inp.shape for inp in self.model.inputs] - if self.verbose: - print( - f"Building model from model.inputs " - f"({len(input_shapes)} input(s)): {input_shapes}" - ) if len(input_shapes) == 1: self.model.build(input_shapes[0]) else: self.model.build(input_shapes) else: - # Sequential without Input layer or subclassed model - if self.verbose: - print( - "Model has no inputs attribute. Inferring shape for " - "Sequential model." - ) - input_shape = self._infer_sequential_input_shape() - if input_shape is None: - raise ValueError( - "Cannot build model: unable to infer input shape. " - "Please add an Input layer or specify input_shape " - "in the first layer." - ) - if self.verbose: - print(f"Building model with inferred shape: {input_shape}") - self.model.build(input_shape) + raise ValueError( + "Cannot build model: no input_signature provided and " + "model has no inputs attribute. Please provide an " + "input_signature or ensure the model is already built." + ) if self.verbose: - print("Model successfully built.") + print("Model built successfully.") except Exception as e: if self.verbose: - print(f"Error during model.build(): {e}") - raise ValueError(f"Failed to build model with error: {e}") - - # Final check - if not self.model.built: + print(f"Error building model: {e}") raise ValueError( - "Model could not be built. Please check the model's " - "definition and input specification." + f"Failed to build model: {e}. Please ensure the model is " + "properly defined or provide an input_signature." ) - def _infer_sequential_input_shape(self): - """Infer input shape for Sequential models. - - Returns the input shape with flexible batch dimension (None) suitable - for model.build(), allowing dynamic batch sizes during inference. - """ - try: - # For Sequential models, input_shape should be available directly - if hasattr(self.model, 'input_shape') and self.model.input_shape: - input_shape = self.model.input_shape - # Always use None batch dimension for model.build() - return (None,) + input_shape[1:] - - # Fallback: try to get from first layer's batch_shape - if hasattr(self.model, "_layers") and self.model._layers: - first_layer = self.model._layers[0] - if hasattr(first_layer, 'batch_shape') and first_layer.batch_shape: - input_shape = first_layer.batch_shape - # Always use None batch dimension for model.build() - return (None,) + input_shape[1:] - - except Exception as e: - if self.verbose: - print(f"Warning: Could not infer Sequential input shape: {e}") - - return None - def _convert_to_tflite(self, input_signature): """Converts the Keras model to a TFLite model.""" is_sequential = isinstance(self.model, tf.keras.Sequential) diff --git a/keras/src/export/litert_exporter_test.py b/keras/src/export/litert_test.py similarity index 100% rename from keras/src/export/litert_exporter_test.py rename to keras/src/export/litert_test.py From cbe0229f98078cfe2667d04b4da1f91475569c59 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 6 Oct 2025 10:08:06 +0530 Subject: [PATCH 057/115] Refactor import statements for export_utils functions Moved imports of get_input_signature and make_tf_tensor_spec to the module level in saved_model.py for consistency and efficiency. In litert.py, imports remain local to preserve lazy loading where needed. --- keras/src/export/litert.py | 4 ++-- keras/src/export/saved_model.py | 10 +++------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index e56d216deb08..f3ea082c6768 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -3,8 +3,6 @@ import tensorflow as tf from keras.src import tree -from keras.src.export.export_utils import get_input_signature -from keras.src.export.export_utils import make_tf_tensor_spec from keras.src.utils import io_utils from keras.src.utils.module_utils import litert @@ -97,6 +95,7 @@ def export(self, filepath): if self.input_signature is None: if self.verbose: print("Inferring input signature from model.") + from keras.src.export.export_utils import get_input_signature self.input_signature = get_input_signature(self.model) # 3. Convert the model to TFLite. @@ -234,6 +233,7 @@ def _convert_with_wrapper(self, input_signature): if not isinstance(input_signature, (list, tuple)): input_signature = [input_signature] + from keras.src.export.export_utils import make_tf_tensor_spec tensor_specs = [make_tf_tensor_spec(spec) for spec in input_signature] # Pass tensor specs as positional arguments to get the concrete diff --git a/keras/src/export/saved_model.py b/keras/src/export/saved_model.py index c9686fafd243..935c8a1c4739 100644 --- a/keras/src/export/saved_model.py +++ b/keras/src/export/saved_model.py @@ -4,6 +4,8 @@ from keras.src import layers from keras.src import tree from keras.src.api_export import keras_export +from keras.src.export.export_utils import get_input_signature +from keras.src.export.export_utils import make_tf_tensor_spec from keras.src.utils import io_utils from keras.src.utils.module_utils import tensorflow as tf @@ -356,8 +358,6 @@ def serving_fn(x): self._endpoint_names.append(name) return decorated_fn - from keras.src.export.export_utils import make_tf_tensor_spec - input_signature = tree.map_structure( make_tf_tensor_spec, input_signature ) @@ -415,8 +415,6 @@ def track_and_add_endpoint(self, name, resource, input_signature, **kwargs): f"the jax backend. Current backend: {backend.backend()}" ) - from keras.src.export.export_utils import make_tf_tensor_spec - input_signature = tree.map_structure( make_tf_tensor_spec, input_signature ) @@ -650,8 +648,6 @@ def export_saved_model( verbose = True # Defaults to `True` for all backends. export_archive = ExportArchive() if input_signature is None: - from keras.src.export.export_utils import get_input_signature - input_signature = get_input_signature(model) export_archive.track_and_add_endpoint( @@ -694,4 +690,4 @@ def _list_variables_used_by_fns(fns): ): non_trainable_variables.append(v) non_trainable_variables_ids.add(id(v)) - return trainable_variables, non_trainable_variables + return trainable_variables, non_trainable_variables \ No newline at end of file From e52de85b86fff7f97854e6cf082e1210b57e7515 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 6 Oct 2025 10:09:50 +0530 Subject: [PATCH 058/115] Update saved_model.py --- keras/src/export/saved_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/export/saved_model.py b/keras/src/export/saved_model.py index 935c8a1c4739..d5009a7ec4a6 100644 --- a/keras/src/export/saved_model.py +++ b/keras/src/export/saved_model.py @@ -690,4 +690,4 @@ def _list_variables_used_by_fns(fns): ): non_trainable_variables.append(v) non_trainable_variables_ids.add(id(v)) - return trainable_variables, non_trainable_variables \ No newline at end of file + return trainable_variables, non_trainable_variables From 87af9ed645f30228cc2c4818d6c4e607171934d1 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 6 Oct 2025 10:13:38 +0530 Subject: [PATCH 059/115] Update litert.py --- keras/src/export/litert.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index f3ea082c6768..398be83b6ee3 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -96,6 +96,7 @@ def export(self, filepath): if self.verbose: print("Inferring input signature from model.") from keras.src.export.export_utils import get_input_signature + self.input_signature = get_input_signature(self.model) # 3. Convert the model to TFLite. @@ -234,6 +235,7 @@ def _convert_with_wrapper(self, input_signature): input_signature = [input_signature] from keras.src.export.export_utils import make_tf_tensor_spec + tensor_specs = [make_tf_tensor_spec(spec) for spec in input_signature] # Pass tensor specs as positional arguments to get the concrete From c643772c181ad3f0f53b9aa0a8098da8ac4a6875 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 6 Oct 2025 10:44:09 +0530 Subject: [PATCH 060/115] Add conditional TensorFlow import for LiteRT export LiteRT export and related tests now import TensorFlow conditionally, allowing Keras to function without TensorFlow installed. If TensorFlow is unavailable, LiteRT export is disabled and tests are skipped, with informative messages and error handling added to guide users. --- keras/src/export/__init__.py | 11 +++++++++-- keras/src/export/litert_test.py | 20 +++++++++++++++++--- keras/src/models/model.py | 7 +++++++ 3 files changed, 33 insertions(+), 5 deletions(-) diff --git a/keras/src/export/__init__.py b/keras/src/export/__init__.py index f0b0be00231f..8782bca44ce4 100644 --- a/keras/src/export/__init__.py +++ b/keras/src/export/__init__.py @@ -1,7 +1,14 @@ -from keras.src.export.litert import LitertExporter -from keras.src.export.litert import export_litert from keras.src.export.onnx import export_onnx from keras.src.export.openvino import export_openvino from keras.src.export.saved_model import ExportArchive from keras.src.export.saved_model import export_saved_model from keras.src.export.tfsm_layer import TFSMLayer + +# LiteRT export requires TensorFlow, so we import conditionally +try: + from keras.src.export.litert import LitertExporter + from keras.src.export.litert import export_litert +except ImportError: + # TensorFlow not available, LiteRT export will not be available + LitertExporter = None + export_litert = None diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 5f9f829512e5..5c8906ae6264 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -2,7 +2,6 @@ import numpy as np import pytest -import tensorflow as tf from absl.testing import parameterized from keras.src import backend @@ -15,8 +14,16 @@ from keras.src.testing.test_utils import named_product from keras.src.utils.module_utils import litert +# Conditional import of TensorFlow for LiteRT tests +try: + import tensorflow as tf + TENSORFLOW_AVAILABLE = True +except ImportError: + tf = None + TENSORFLOW_AVAILABLE = False + # Use AI Edge LiteRT interpreter if available, fallback to TensorFlow Lite -if litert.available: +if litert.available and TENSORFLOW_AVAILABLE: try: from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter @@ -24,9 +31,12 @@ except ImportError: LiteRtInterpreter = tf.lite.Interpreter print("Using TensorFlow Lite interpreter as fallback") -else: +elif TENSORFLOW_AVAILABLE: LiteRtInterpreter = tf.lite.Interpreter print("Using TensorFlow Lite interpreter as fallback") +else: + LiteRtInterpreter = None + print("TensorFlow not available, LiteRT tests will be skipped") class CustomModel(models.Model): @@ -155,6 +165,10 @@ def _get_interpreter_outputs(interpreter): return outputs[0] if len(outputs) == 1 else outputs +@pytest.mark.skipif( + not TENSORFLOW_AVAILABLE, + reason="TensorFlow is required for LiteRT export tests.", +) @pytest.mark.skipif( backend.backend() != "tensorflow", reason="`export_litert` currently supports the tensorflow backend only.", diff --git a/keras/src/models/model.py b/keras/src/models/model.py index a0976fa092c3..c4a100c8a1df 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -668,6 +668,13 @@ def export( f"Unrecognized format={format}. Supported formats are: " f"{list(available_formats)}." ) + + # Check if LiteRT export is available (requires TensorFlow) + if format == "litert" and export_litert is None: + raise ImportError( + "LiteRT export requires TensorFlow to be installed. " + "Please install TensorFlow: `pip install tensorflow`" + ) if format == "tf_saved_model": export_saved_model( From f243a6ebf5513cd97ee63cd6e180395a9f3950d3 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 6 Oct 2025 10:45:46 +0530 Subject: [PATCH 061/115] reformat --- keras/src/export/litert_test.py | 1 + keras/src/models/model.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 5c8906ae6264..2333088d4602 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -17,6 +17,7 @@ # Conditional import of TensorFlow for LiteRT tests try: import tensorflow as tf + TENSORFLOW_AVAILABLE = True except ImportError: tf = None diff --git a/keras/src/models/model.py b/keras/src/models/model.py index c4a100c8a1df..7bfb4d6c95c3 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -668,7 +668,7 @@ def export( f"Unrecognized format={format}. Supported formats are: " f"{list(available_formats)}." ) - + # Check if LiteRT export is available (requires TensorFlow) if format == "litert" and export_litert is None: raise ImportError( From d8236fae509103d8c794d8f7e3bd33d188d23792 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 6 Oct 2025 14:46:26 +0530 Subject: [PATCH 062/115] Update litert_test.py --- keras/src/export/litert_test.py | 86 +++++++++++++++++++++++---------- 1 file changed, 60 insertions(+), 26 deletions(-) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 2333088d4602..59d2fcd7d14e 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -12,32 +12,25 @@ from keras.src import tree from keras.src.saving import saving_lib from keras.src.testing.test_utils import named_product -from keras.src.utils.module_utils import litert +from keras.src.utils.module_utils import tensorflow + +# Check if AI Edge LiteRT interpreter is available and set it up +AI_EDGE_LITERT_AVAILABLE = False +LiteRtInterpreter = None -# Conditional import of TensorFlow for LiteRT tests try: - import tensorflow as tf + from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter - TENSORFLOW_AVAILABLE = True + AI_EDGE_LITERT_AVAILABLE = True except ImportError: - tf = None - TENSORFLOW_AVAILABLE = False - -# Use AI Edge LiteRT interpreter if available, fallback to TensorFlow Lite -if litert.available and TENSORFLOW_AVAILABLE: - try: - from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter - - print("Using AI Edge LiteRT interpreter") - except ImportError: - LiteRtInterpreter = tf.lite.Interpreter - print("Using TensorFlow Lite interpreter as fallback") -elif TENSORFLOW_AVAILABLE: - LiteRtInterpreter = tf.lite.Interpreter - print("Using TensorFlow Lite interpreter as fallback") -else: - LiteRtInterpreter = None - print("TensorFlow not available, LiteRT tests will be skipped") + # Fallback to TensorFlow Lite if available + if tensorflow.available: + LiteRtInterpreter = tensorflow.lite.Interpreter + +# Model types to test (LSTM only if AI Edge LiteRT is available) +model_types = ["sequential", "functional"] +if AI_EDGE_LITERT_AVAILABLE: + model_types.append("lstm") class CustomModel(models.Model): @@ -167,7 +160,7 @@ def _get_interpreter_outputs(interpreter): @pytest.mark.skipif( - not TENSORFLOW_AVAILABLE, + not tensorflow.available, reason="TensorFlow is required for LiteRT export tests.", ) @pytest.mark.skipif( @@ -178,13 +171,15 @@ def _get_interpreter_outputs(interpreter): testing.tensorflow_uses_gpu(), reason="LiteRT export tests are only run on CPU to avoid CI issues.", ) + # Note: Tests use AI Edge LiteRT interpreter when available, # fallback to TensorFlow Lite interpreter otherwise class ExportLitertTest(testing.TestCase): - @parameterized.named_parameters( - named_product(model_type=["sequential", "functional", "lstm"]) - ) + @parameterized.named_parameters(named_product(model_type=model_types)) def test_standard_model_export(self, model_type): + if model_type == "lstm" and not AI_EDGE_LITERT_AVAILABLE: + self.skipTest("LSTM models require AI Edge LiteRT interpreter.") + temp_filepath = os.path.join( self.get_temp_dir(), "exported_model.tflite" ) @@ -413,6 +408,45 @@ def test_export_error_handling(self): with self.assertRaises(ValueError): model.export(temp_filepath, format="invalid_format") + def test_export_invalid_filepath(self): + """Test that export fails with invalid file extension.""" + model = get_model("sequential") + dummy_input = np.random.random((3, 10)).astype(np.float32) + model(dummy_input) + + temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.txt") + + # Should raise AssertionError for wrong extension + with self.assertRaises(AssertionError): + model.export(temp_filepath, format="litert") + + def test_export_subclass_model(self): + """Test exporting subclass models (uses wrapper conversion path).""" + if LiteRtInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + model = get_model("subclass") + temp_filepath = os.path.join( + self.get_temp_dir(), "exported_model.tflite" + ) + + batch_size = 1 + ref_input = np.random.normal(size=(batch_size, 10)).astype("float32") + ref_output = _convert_to_numpy(model(ref_input)) + + # Export subclass model - this tests wrapper-based conversion + model.export(temp_filepath, format="litert") + self.assertTrue(os.path.exists(temp_filepath)) + + # Verify inference + interpreter = LiteRtInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + _set_interpreter_inputs(interpreter, ref_input) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + if __name__ == "__main__": pytest.main([__file__]) From 83577bedca3beaa68aece3d199729bb4bd152fe2 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 6 Oct 2025 14:50:59 +0530 Subject: [PATCH 063/115] Update litert_test.py --- keras/src/export/litert_test.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 59d2fcd7d14e..97670aa70059 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -12,6 +12,7 @@ from keras.src import tree from keras.src.saving import saving_lib from keras.src.testing.test_utils import named_product +from keras.src.utils.module_utils import litert from keras.src.utils.module_utils import tensorflow # Check if AI Edge LiteRT interpreter is available and set it up @@ -19,9 +20,10 @@ LiteRtInterpreter = None try: - from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter + if litert.available: + from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter - AI_EDGE_LITERT_AVAILABLE = True + AI_EDGE_LITERT_AVAILABLE = True except ImportError: # Fallback to TensorFlow Lite if available if tensorflow.available: From c53b2644677e78436cd016c71e3f79c833dad11f Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 6 Oct 2025 14:58:48 +0530 Subject: [PATCH 064/115] Update litert_test.py --- keras/src/export/litert_test.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 97670aa70059..b7255d94c924 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -15,7 +15,9 @@ from keras.src.utils.module_utils import litert from keras.src.utils.module_utils import tensorflow -# Check if AI Edge LiteRT interpreter is available and set it up +# Set up LiteRT interpreter with fallback logic: +# 1. Try AI Edge LiteRT interpreter (preferred) +# 2. Fall back to TensorFlow Lite interpreter if AI Edge LiteRT unavailable AI_EDGE_LITERT_AVAILABLE = False LiteRtInterpreter = None @@ -25,7 +27,7 @@ AI_EDGE_LITERT_AVAILABLE = True except ImportError: - # Fallback to TensorFlow Lite if available + # Fallback to TensorFlow Lite interpreter if AI Edge LiteRT unavailable if tensorflow.available: LiteRtInterpreter = tensorflow.lite.Interpreter @@ -167,18 +169,22 @@ def _get_interpreter_outputs(interpreter): ) @pytest.mark.skipif( backend.backend() != "tensorflow", - reason="`export_litert` currently supports the tensorflow backend only.", + reason="`export_litert` currently supports the TensorFlow backend only.", ) @pytest.mark.skipif( testing.tensorflow_uses_gpu(), reason="LiteRT export tests are only run on CPU to avoid CI issues.", ) - -# Note: Tests use AI Edge LiteRT interpreter when available, -# fallback to TensorFlow Lite interpreter otherwise class ExportLitertTest(testing.TestCase): + """Test suite for LiteRT (TFLite) model export functionality. + + Tests use AI Edge LiteRT interpreter when available, otherwise fall back + to TensorFlow Lite interpreter for validation. + """ + @parameterized.named_parameters(named_product(model_type=model_types)) def test_standard_model_export(self, model_type): + """Test exporting standard model types to LiteRT format.""" if model_type == "lstm" and not AI_EDGE_LITERT_AVAILABLE: self.skipTest("LSTM models require AI Edge LiteRT interpreter.") @@ -186,7 +192,7 @@ def test_standard_model_export(self, model_type): self.get_temp_dir(), "exported_model.tflite" ) model = get_model(model_type) - batch_size = 1 # TFLite expects batch_size=1 + batch_size = 1 # LiteRT expects batch_size=1 if model_type == "lstm": ref_input = np.random.normal(size=(batch_size, 4, 10)) else: @@ -211,7 +217,8 @@ def test_standard_model_export(self, model_type): named_product(struct_type=["tuple", "array", "dict"]) ) def test_model_with_input_structure(self, struct_type): - batch_size = 1 # TFLite expects batch_size=1 + """Test exporting models with structured inputs (tuple/array/dict).""" + batch_size = 1 # LiteRT expects batch_size=1 base_input = np.random.normal(size=(batch_size, 10)).astype("float32") if struct_type == "tuple": @@ -270,6 +277,7 @@ def test_model_with_input_structure(self, struct_type): self.assertAllClose(ref_output, revived_output) def test_model_with_multiple_inputs(self): + """Test exporting models with multiple inputs and batch resizing.""" temp_filepath = os.path.join( self.get_temp_dir(), "exported_model.tflite" ) @@ -280,7 +288,7 @@ def test_model_with_multiple_inputs(self): output = layers.Add()([input_x, input_y]) model = models.Model(inputs=[input_x, input_y], outputs=output) - batch_size = 1 # TFLite expects batch_size=1 + batch_size = 1 # LiteRT expects batch_size=1 ref_input_x = np.random.normal(size=(batch_size, 10)).astype("float32") ref_input_y = np.random.normal(size=(batch_size, 10)).astype("float32") ref_output = _convert_to_numpy(model([ref_input_x, ref_input_y])) @@ -317,6 +325,7 @@ def test_model_with_multiple_inputs(self): ) def test_export_with_custom_input_signature(self): + """Test exporting with custom input signature specification.""" model = get_model("sequential") temp_filepath = os.path.join( self.get_temp_dir(), "exported_model.tflite" From 487184d0739c82b02c044b53914affc86b7223c0 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 7 Oct 2025 09:10:37 +0530 Subject: [PATCH 065/115] Update litert_test.py --- keras/src/export/litert_test.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index b7255d94c924..926a91e1831e 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -21,12 +21,16 @@ AI_EDGE_LITERT_AVAILABLE = False LiteRtInterpreter = None -try: - if litert.available: +if litert.available: + try: from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter AI_EDGE_LITERT_AVAILABLE = True -except ImportError: + except ImportError: + # Fallback to TensorFlow Lite interpreter if AI Edge LiteRT unavailable + if tensorflow.available: + LiteRtInterpreter = tensorflow.lite.Interpreter +else: # Fallback to TensorFlow Lite interpreter if AI Edge LiteRT unavailable if tensorflow.available: LiteRtInterpreter = tensorflow.lite.Interpreter From 374d90bd678e9eadb0df3ad196e7cc385b798065 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 7 Oct 2025 09:14:10 +0530 Subject: [PATCH 066/115] Update requirements-tensorflow-cuda.txt --- requirements-tensorflow-cuda.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements-tensorflow-cuda.txt b/requirements-tensorflow-cuda.txt index f895f0224154..05125b381c91 100644 --- a/requirements-tensorflow-cuda.txt +++ b/requirements-tensorflow-cuda.txt @@ -1,6 +1,7 @@ # Tensorflow with cuda support. tensorflow[and-cuda]~=2.18.1 tf2onnx +ai-edge-litert # Torch cpu-only version (needed for testing). --extra-index-url https://download.pytorch.org/whl/cpu From f99a103f9743773006704ddcf4b7e2f06ada9df2 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 13 Oct 2025 11:12:31 +0530 Subject: [PATCH 067/115] Add litert_kwargs support to LiteRT export Introduces a litert_kwargs parameter for LiteRT model export, allowing users to specify custom export options such as allow_custom_ops, enable_select_tf_ops, and optimizations. This enhances flexibility when exporting models to the LiteRT format. --- keras/src/models/model.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/keras/src/models/model.py b/keras/src/models/model.py index 7bfb4d6c95c3..ad8df6097acb 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -592,6 +592,17 @@ def export( provided, they will be automatically computed. - `opset_version`: Optional `int`. Specific to `format="onnx"`. An integer value that specifies the ONNX opset version. + - `litert_kwargs`: Optional `dict`. Specific to + `format="litert"`. A dictionary containing LiteRT export + parameters. Can include: + - `allow_custom_ops`: Optional `bool`. Whether to allow + custom operations during conversion. Defaults to + `False`. + - `enable_select_tf_ops`: Optional `bool`. Whether to + enable TensorFlow Select ops for unsupported + operations. Defaults to `False`. + - `optimizations`: Optional `list`. List of optimizations + to apply (e.g., `[tf.lite.Optimize.DEFAULT]`). - `allow_custom_ops`: Optional `bool`. Specific to `format="litert"`. Whether to allow custom operations during conversion. @@ -701,11 +712,13 @@ def export( **kwargs, ) elif format == "litert": + litert_kwargs = kwargs.pop("litert_kwargs", {}) export_litert( self, filepath, verbose=verbose, input_signature=input_signature, + **litert_kwargs, **kwargs, ) From d01a4cb544e5ce74c6b7c0f12a864c1b95f6014e Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 13 Oct 2025 11:17:49 +0530 Subject: [PATCH 068/115] Update model.py --- keras/src/models/model.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/keras/src/models/model.py b/keras/src/models/model.py index ad8df6097acb..9ff8bb12908c 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -603,18 +603,6 @@ def export( operations. Defaults to `False`. - `optimizations`: Optional `list`. List of optimizations to apply (e.g., `[tf.lite.Optimize.DEFAULT]`). - - `allow_custom_ops`: Optional `bool`. Specific to - `format="litert"`. - Whether to allow custom operations during conversion. - Defaults to `False`. - - `enable_select_tf_ops`: Optional `bool`. Specific to - `format="litert"`. - Whether to enable TensorFlow Select ops for unsupported - operations. Defaults to `False`. - - `optimizations`: Optional `list`. Specific to - `format="litert"`. - List of optimizations to apply (e.g., - `[tf.lite.Optimize.DEFAULT]`). **Note:** This feature is currently supported only with TensorFlow, JAX and Torch backends. From 52440e1dd1bddeb3be89493b851bc38df0292f5b Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 14 Oct 2025 10:20:07 +0530 Subject: [PATCH 069/115] Refactor LiteRT export wrapper and test setup Moved KerasModelWrapper definition inside LitertExporter for dynamic class creation and removed the old _KerasModelWrapper. Updated import logic for TensorFlow to use module_utils. Improved LiteRT test interpreter selection and simplified test skipping conditions for better backend compatibility. --- keras/src/export/__init__.py | 11 +-- keras/src/export/litert.py | 162 ++++++++++++++++---------------- keras/src/export/litert_test.py | 29 ++---- 3 files changed, 94 insertions(+), 108 deletions(-) diff --git a/keras/src/export/__init__.py b/keras/src/export/__init__.py index 8782bca44ce4..f0b0be00231f 100644 --- a/keras/src/export/__init__.py +++ b/keras/src/export/__init__.py @@ -1,14 +1,7 @@ +from keras.src.export.litert import LitertExporter +from keras.src.export.litert import export_litert from keras.src.export.onnx import export_onnx from keras.src.export.openvino import export_openvino from keras.src.export.saved_model import ExportArchive from keras.src.export.saved_model import export_saved_model from keras.src.export.tfsm_layer import TFSMLayer - -# LiteRT export requires TensorFlow, so we import conditionally -try: - from keras.src.export.litert import LitertExporter - from keras.src.export.litert import export_litert -except ImportError: - # TensorFlow not available, LiteRT export will not be available - LitertExporter = None - export_litert = None diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 398be83b6ee3..8365617255ee 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -1,10 +1,9 @@ import os -import tensorflow as tf - from keras.src import tree from keras.src.utils import io_utils from keras.src.utils.module_utils import litert +from keras.src.utils.module_utils import tensorflow as tf def export_litert( @@ -227,8 +226,88 @@ def _convert_to_tflite(self, input_signature): def _convert_with_wrapper(self, input_signature): """Converts the model to TFLite using the tf.Module wrapper.""" + + # Define wrapper class dynamically to avoid module-level + # tf.Module inheritance + class KerasModelWrapper(tf.Module): + """A tf.Module wrapper for a Keras model. + + This wrapper is designed to be a clean, serializable + interface for TFLite conversion. It holds the Keras model + and exposes a single `__call__` method that is decorated + with `tf.function`. Crucially, it also ensures all variables + from the Keras model are tracked by the SavedModel format, + which is key to including them in the final TFLite model. + """ + + def __init__(self, model): + super().__init__() + # Store the model reference in a way that TensorFlow + # won't try to track it. This prevents the _DictWrapper + # error during SavedModel serialization + object.__setattr__(self, "_model", model) + + # Track all variables from the Keras model using proper + # tf.Module methods. This ensures proper variable + # handling for stateful layers like BatchNorm + with self.name_scope: + for i, var in enumerate(model.variables): + # Use a different attribute name to avoid + # conflicts with tf.Module's variables property + setattr(self, f"model_var_{i}", var) + + @tf.function + def __call__(self, *args, **kwargs): + """The single entry point for the exported model.""" + # Handle both single and multi-input cases + if args and not kwargs: + # Called with positional arguments + if len(args) == 1: + return self._model(args[0]) + else: + return self._model(list(args)) + elif kwargs and not args: + # Called with keyword arguments + if len(kwargs) == 1 and "inputs" in kwargs: + # Single input case + return self._model(kwargs["inputs"]) + else: + # Multi-input case - convert to list/dict format + # expected by model + if ( + hasattr(self._model, "inputs") + and len(self._model.inputs) > 1 + ): + # Multi-input functional model + input_list = [] + missing_inputs = [] + for input_layer in self._model.inputs: + input_name = input_layer.name + if input_name in kwargs: + input_list.append(kwargs[input_name]) + else: + missing_inputs.append(input_name) + + if missing_inputs: + raise ValueError( + "Missing required inputs for " + f"multi-input model: {missing_inputs}. " + f"Available kwargs: " + f"{list(kwargs.keys())}. Please provide " + "all inputs by name." + ) + + return self._model(input_list) + else: + # Single input model called with named + # arguments + return self._model(list(kwargs.values())[0]) + else: + # Fallback to original call + return self._model(*args, **kwargs) + # 1. Wrap the Keras model in our clean tf.Module. - wrapper = _KerasModelWrapper(self.model) + wrapper = KerasModelWrapper(self.model) # 2. Get a concrete function from the wrapper. if not isinstance(input_signature, (list, tuple)): @@ -395,80 +474,3 @@ def get_available_targets(cls): dummy_exporter = cls(model=None) return dummy_exporter._get_available_litert_targets() - - -class _KerasModelWrapper(tf.Module): - """ - A tf.Module wrapper for a Keras model. - - This wrapper is designed to be a clean, serializable interface for TFLite - conversion. It holds the Keras model and exposes a single `__call__` - method that is decorated with `tf.function`. Crucially, it also ensures - all variables from the Keras model are tracked by the SavedModel format, - which is key to including them in the final TFLite model. - """ - - def __init__(self, model): - super().__init__() - # Store the model reference in a way that TensorFlow won't try to - # track it - # This prevents the _DictWrapper error during SavedModel serialization - object.__setattr__(self, "_model", model) - - # Track all variables from the Keras model using proper tf.Module - # methods - # This ensures proper variable handling for stateful layers like - # BatchNorm - with self.name_scope: - for i, var in enumerate(model.variables): - # Use a different attribute name to avoid conflicts with - # tf.Module's variables property - setattr(self, f"model_var_{i}", var) - - @tf.function - def __call__(self, *args, **kwargs): - """The single entry point for the exported model.""" - # Handle both single and multi-input cases - if args and not kwargs: - # Called with positional arguments - if len(args) == 1: - return self._model(args[0]) - else: - return self._model(list(args)) - elif kwargs and not args: - # Called with keyword arguments - if len(kwargs) == 1 and "inputs" in kwargs: - # Single input case - return self._model(kwargs["inputs"]) - else: - # Multi-input case - convert to list/dict format expected by - # model - if ( - hasattr(self._model, "inputs") - and len(self._model.inputs) > 1 - ): - # Multi-input functional model - input_list = [] - missing_inputs = [] - for input_layer in self._model.inputs: - input_name = input_layer.name - if input_name in kwargs: - input_list.append(kwargs[input_name]) - else: - missing_inputs.append(input_name) - - if missing_inputs: - raise ValueError( - f"Missing required inputs for multi-input model: " - f"{missing_inputs}. Available kwargs: " - f"{list(kwargs.keys())}. Please provide all inputs " - f"by name." - ) - - return self._model(input_list) - else: - # Single input model called with named arguments - return self._model(list(kwargs.values())[0]) - else: - # Fallback to original call - return self._model(*args, **kwargs) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 926a91e1831e..49ddd8d13868 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -21,18 +21,17 @@ AI_EDGE_LITERT_AVAILABLE = False LiteRtInterpreter = None -if litert.available: - try: - from ai_edge_litert.interpreter import Interpreter as LiteRtInterpreter - - AI_EDGE_LITERT_AVAILABLE = True - except ImportError: - # Fallback to TensorFlow Lite interpreter if AI Edge LiteRT unavailable - if tensorflow.available: +if backend.backend() == "tensorflow": + if litert.available: + try: + from ai_edge_litert.interpreter import ( + Interpreter as LiteRtInterpreter, + ) + + AI_EDGE_LITERT_AVAILABLE = True + except (ImportError, OSError): LiteRtInterpreter = tensorflow.lite.Interpreter -else: - # Fallback to TensorFlow Lite interpreter if AI Edge LiteRT unavailable - if tensorflow.available: + else: LiteRtInterpreter = tensorflow.lite.Interpreter # Model types to test (LSTM only if AI Edge LiteRT is available) @@ -167,18 +166,10 @@ def _get_interpreter_outputs(interpreter): return outputs[0] if len(outputs) == 1 else outputs -@pytest.mark.skipif( - not tensorflow.available, - reason="TensorFlow is required for LiteRT export tests.", -) @pytest.mark.skipif( backend.backend() != "tensorflow", reason="`export_litert` currently supports the TensorFlow backend only.", ) -@pytest.mark.skipif( - testing.tensorflow_uses_gpu(), - reason="LiteRT export tests are only run on CPU to avoid CI issues.", -) class ExportLitertTest(testing.TestCase): """Test suite for LiteRT (TFLite) model export functionality. From 794d85dc05b15ba8ba8eb84624e17e4b012af9a1 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 14 Oct 2025 10:40:19 +0530 Subject: [PATCH 070/115] Update export_utils.py --- keras/src/export/export_utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 641e03e23357..2da2ae6eb004 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -56,8 +56,6 @@ def _infer_input_signature_from_model(model): def _make_input_spec(structure): # We need to turn wrapper structures like TrackingDict or _DictWrapper # into plain Python structures because they don't work with jax2tf/JAX. - if structure is None: - return None if isinstance(structure, dict): return {k: _make_input_spec(v) for k, v in structure.items()} elif isinstance(structure, tuple): From 7a46f781044ea91228e8f3d5b9113452fba05594 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 14 Oct 2025 10:47:47 +0530 Subject: [PATCH 071/115] Replace print statements with io_utils.print_msg and logging Updated verbose output in LitertExporter to use io_utils.print_msg instead of print for consistency and better message handling. Warnings about unavailable LiteRT now use the logging module. Improved comments and formatting for clarity. --- keras/src/export/litert.py | 135 +++++++++++++++++++++---------------- 1 file changed, 78 insertions(+), 57 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 8365617255ee..3e650062657a 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -1,3 +1,4 @@ +import logging import os from keras.src import tree @@ -85,7 +86,7 @@ def export(self, filepath): performed """ if self.verbose: - print("Starting Litert export...") + io_utils.print_msg("Starting Litert export...") # 1. Ensure the model is built by calling it if necessary self._ensure_model_built() @@ -93,7 +94,7 @@ def export(self, filepath): # 2. Resolve / infer input signature if self.input_signature is None: if self.verbose: - print("Inferring input signature from model.") + io_utils.print_msg("Inferring input signature from model.") from keras.src.export.export_utils import get_input_signature self.input_signature = get_input_signature(self.model) @@ -103,7 +104,7 @@ def export(self, filepath): if self.verbose: final_size_mb = len(tflite_model) / (1024 * 1024) - print( + io_utils.print_msg( f"TFLite model converted successfully. Size: " f"{final_size_mb:.2f} MB" ) @@ -118,26 +119,29 @@ def export(self, filepath): f.write(tflite_model) if self.verbose: - print(f"TFLite model saved to {filepath}") + io_utils.print_msg(f"TFLite model saved to {filepath}") # 5. Perform AOT compilation if targets are specified and LiteRT is # available compiled_models = None if self.aot_compile_targets and litert.available: if self.verbose: - print("Performing AOT compilation for Litert targets...") + io_utils.print_msg( + "Performing AOT compilation for Litert targets..." + ) compiled_models = self._aot_compile(filepath) elif self.aot_compile_targets and not litert.available: - if self.verbose: - print( - "Warning: AOT compilation requested but LiteRT is not " - "available. Skipping." - ) + logging.warning( + "AOT compilation requested but LiteRT is not available. " + "Skipping AOT compilation." + ) if self.verbose: - print(f"Litert export completed. Base model: {filepath}") + io_utils.print_msg( + f"Litert export completed. Base model: {filepath}" + ) if compiled_models: - print( + io_utils.print_msg( f"AOT compiled models: {len(compiled_models.models)} " "variants" ) @@ -155,7 +159,7 @@ def _ensure_model_built(self): return if self.verbose: - print("Building model before conversion...") + io_utils.print_msg("Building model before conversion...") try: # Try to build using input_signature if available @@ -173,17 +177,18 @@ def _ensure_model_built(self): self.model.build(input_shapes) else: raise ValueError( - "Cannot build model: no input_signature provided and " - "model has no inputs attribute. Please provide an " - "input_signature or ensure the model is already built." + "Cannot export model to the litert format as the " + "input_signature could not be inferred. Either pass an " + "`input_signature` to `model.export()` or ensure that the " + "model is already built (called once on real inputs)." ) if self.verbose: - print("Model built successfully.") + io_utils.print_msg("Model built successfully.") except Exception as e: if self.verbose: - print(f"Error building model: {e}") + io_utils.print_msg(f"Error building model: {e}") raise ValueError( f"Failed to build model: {e}. Please ensure the model is " "properly defined or provide an input_signature." @@ -197,7 +202,7 @@ def _convert_to_tflite(self, input_signature): try: if self.verbose: model_type = "Sequential" if is_sequential else "Functional" - print( + io_utils.print_msg( f"{model_type} model detected. Trying direct conversion..." ) @@ -210,50 +215,55 @@ def _convert_to_tflite(self, input_signature): tflite_model = converter.convert() if self.verbose: - print("Direct conversion successful.") + io_utils.print_msg("Direct conversion successful.") return tflite_model except Exception as direct_error: if self.verbose: model_type = "Sequential" if is_sequential else "Functional" - print( + io_utils.print_msg( f"Direct conversion failed for {model_type} model: " f"{direct_error}" ) - print("Falling back to wrapper-based conversion...") + io_utils.print_msg( + "Falling back to wrapper-based conversion..." + ) return self._convert_with_wrapper(input_signature) def _convert_with_wrapper(self, input_signature): """Converts the model to TFLite using the tf.Module wrapper.""" - # Define wrapper class dynamically to avoid module-level + # Define the wrapper class dynamically to avoid module-level # tf.Module inheritance class KerasModelWrapper(tf.Module): - """A tf.Module wrapper for a Keras model. - - This wrapper is designed to be a clean, serializable - interface for TFLite conversion. It holds the Keras model - and exposes a single `__call__` method that is decorated - with `tf.function`. Crucially, it also ensures all variables - from the Keras model are tracked by the SavedModel format, - which is key to including them in the final TFLite model. + """ + A tf.Module wrapper for a Keras model. + + This wrapper is designed to be a clean, serializable interface + for TFLite conversion. It holds the Keras model and exposes a + single `__call__` method that is decorated with `tf.function`. + Crucially, it also ensures all variables from the Keras model + are tracked by the SavedModel format, which is key to including + them in the final TFLite model. """ def __init__(self, model): super().__init__() - # Store the model reference in a way that TensorFlow - # won't try to track it. This prevents the _DictWrapper - # error during SavedModel serialization + # Store the model reference in a way that TensorFlow won't + # try to track it + # This prevents the _DictWrapper error during SavedModel + # serialization object.__setattr__(self, "_model", model) # Track all variables from the Keras model using proper - # tf.Module methods. This ensures proper variable - # handling for stateful layers like BatchNorm + # tf.Module methods + # This ensures proper variable handling for stateful layers + # like BatchNorm with self.name_scope: for i, var in enumerate(model.variables): - # Use a different attribute name to avoid - # conflicts with tf.Module's variables property + # Use a different attribute name to avoid conflicts with + # tf.Module's variables property setattr(self, f"model_var_{i}", var) @tf.function @@ -289,18 +299,17 @@ def __call__(self, *args, **kwargs): missing_inputs.append(input_name) if missing_inputs: + available = list(kwargs.keys()) raise ValueError( - "Missing required inputs for " - f"multi-input model: {missing_inputs}. " - f"Available kwargs: " - f"{list(kwargs.keys())}. Please provide " - "all inputs by name." + f"Missing required inputs for multi-input " + f"model: {missing_inputs}. " + f"Available kwargs: {available}. " + f"Please provide all inputs by name." ) return self._model(input_list) else: - # Single input model called with named - # arguments + # Single input model called with named arguments return self._model(list(kwargs.values())[0]) else: # Fallback to original call @@ -323,7 +332,9 @@ def __call__(self, *args, **kwargs): # 3. Convert from the concrete function. if self.verbose: - print("Converting concrete function to TFLite format...") + io_utils.print_msg( + "Converting concrete function to TFLite format..." + ) # Try multiple conversion strategies for better inference compatibility conversion_strategies = [ @@ -351,18 +362,24 @@ def __call__(self, *args, **kwargs): ] if self.verbose: - print(f"Trying conversion {strategy['name']}...") + io_utils.print_msg( + f"Trying conversion {strategy['name']}..." + ) tflite_model = converter.convert() if self.verbose: - print(f"Conversion successful {strategy['name']}!") + io_utils.print_msg( + f"Conversion successful {strategy['name']}!" + ) return tflite_model except Exception as e: if self.verbose: - print(f"Conversion failed {strategy['name']}: {e}") + io_utils.print_msg( + f"Conversion failed {strategy['name']}: {e}" + ) continue # If all strategies fail, raise the last error @@ -387,8 +404,10 @@ def _aot_compile(self, tflite_filepath): output_dir = os.path.join(base_dir, f"{model_name}_compiled") if self.verbose: - print(f"AOT compiling for targets: {self.aot_compile_targets}") - print(f"Output directory: {output_dir}") + io_utils.print_msg( + f"AOT compiling for targets: {self.aot_compile_targets}" + ) + io_utils.print_msg(f"Output directory: {output_dir}") # Perform AOT compilation result = litert.python.aot.aot_compile( @@ -399,20 +418,22 @@ def _aot_compile(self, tflite_filepath): ) if self.verbose: - print( + io_utils.print_msg( f"AOT compilation completed: {len(result.models)} " f"successful, {len(result.failed_backends)} failed" ) if result.failed_backends: for backend, error in result.failed_backends: - print(f" Failed: {backend.id()} - {error}") + io_utils.print_msg( + f" Failed: {backend.id()} - {error}" + ) # Print compilation report if available try: report = result.compilation_report() if report: - print("Compilation Report:") - print(report) + io_utils.print_msg("Compilation Report:") + io_utils.print_msg(report) except Exception: pass @@ -420,7 +441,7 @@ def _aot_compile(self, tflite_filepath): except Exception as e: if self.verbose: - print(f"AOT compilation failed: {e}") + io_utils.print_msg(f"AOT compilation failed: {e}") import traceback traceback.print_exc() @@ -439,7 +460,7 @@ def _get_available_litert_targets(self): return targets if isinstance(targets, list) else [targets] except Exception as e: if self.verbose: - print(f"Failed to get available targets: {e}") + io_utils.print_msg(f"Failed to get available targets: {e}") return [] @classmethod From d2b90eb8ae69c9faf262f23628645c2701c70969 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 16 Oct 2025 15:58:05 +0530 Subject: [PATCH 072/115] typo fix --- keras/src/export/litert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 3e650062657a..6d7f7632e5bf 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -24,7 +24,7 @@ def export_litert( `None`, which uses the default value set by different backends and formats. input_signature: Optional input signature specification. If - ``None``, it will be inferred. + `None`, it will be inferred. aot_compile_targets: Optional list of Litert targets for AOT compilation. **kwargs: Additional keyword arguments passed to the exporter. From 191f802330c07b92edd78dd8ef9b59e36a7764da Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 16 Oct 2025 16:07:16 +0530 Subject: [PATCH 073/115] set verbose to True by default --- keras/src/export/litert.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 6d7f7632e5bf..54f818323f98 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -10,7 +10,7 @@ def export_litert( model, filepath, - verbose=None, + verbose=True, input_signature=None, aot_compile_targets=None, **kwargs, @@ -30,9 +30,6 @@ def export_litert( **kwargs: Additional keyword arguments passed to the exporter. """ - if verbose is None: - verbose = True # Defaults to `True` for all backends. - exporter = LitertExporter( model=model, input_signature=input_signature, From b736ededbf8fc58fbdbac5b71d7d9c19a8614b79 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 16 Oct 2025 16:10:47 +0530 Subject: [PATCH 074/115] removed unnecessary variable --- keras/src/export/litert_test.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 49ddd8d13868..0407b9aa33ae 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -197,10 +197,9 @@ def test_standard_model_export(self, model_type): # Test with model.export() model.export(temp_filepath, format="litert") - export_path = temp_filepath - self.assertTrue(os.path.exists(export_path)) + self.assertTrue(os.path.exists(temp_filepath)) - interpreter = LiteRtInterpreter(model_path=export_path) + interpreter = LiteRtInterpreter(model_path=temp_filepath) interpreter.allocate_tensors() _set_interpreter_inputs(interpreter, ref_input) interpreter.invoke() From 27f1d07c39440f05e5e13d75b10b5642a7985b2a Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 21 Oct 2025 12:52:53 +0530 Subject: [PATCH 075/115] Rename LitertExporter to LiteRTExporter Updated all references from LitertExporter to LiteRTExporter in the export module for consistency and clarity. Also corrected related docstrings and messages to use the LiteRT naming. --- keras/src/export/__init__.py | 2 +- keras/src/export/litert.py | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/keras/src/export/__init__.py b/keras/src/export/__init__.py index f0b0be00231f..4f4c9ed3967c 100644 --- a/keras/src/export/__init__.py +++ b/keras/src/export/__init__.py @@ -1,4 +1,4 @@ -from keras.src.export.litert import LitertExporter +from keras.src.export.litert import LiteRTExporter from keras.src.export.litert import export_litert from keras.src.export.onnx import export_onnx from keras.src.export.openvino import export_openvino diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 54f818323f98..ad7118904a4f 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -15,7 +15,7 @@ def export_litert( aot_compile_targets=None, **kwargs, ): - """Export the model as a Litert artifact for inference. + """Export the model as a LiteRT artifact for inference. Args: model: The Keras model to export. @@ -25,12 +25,12 @@ def export_litert( formats. input_signature: Optional input signature specification. If `None`, it will be inferred. - aot_compile_targets: Optional list of Litert targets for AOT - compilation. + aot_compile_targets: Optional list of LiteRT targets for AOT + compilation. **kwargs: Additional keyword arguments passed to the exporter. """ - exporter = LitertExporter( + exporter = LiteRTExporter( model=model, input_signature=input_signature, verbose=verbose, @@ -42,9 +42,9 @@ def export_litert( io_utils.print_msg(f"Saved artifact at '{filepath}'.") -class LitertExporter: +class LiteRTExporter: """ - Exporter for the Litert (TFLite) format that creates a single, + Exporter for the LiteRT (TFLite) format that creates a single, callable signature for `model.call`. """ @@ -56,13 +56,13 @@ def __init__( aot_compile_targets=None, **kwargs, ): - """Initialize the Litert exporter. + """Initialize the LiteRT exporter. Args: model: The Keras model to export input_signature: Input signature specification verbose: Whether to print progress messages during export. - aot_compile_targets: List of Litert targets for AOT compilation + aot_compile_targets: List of LiteRT targets for AOT compilation **kwargs: Additional export parameters """ self.model = model @@ -83,7 +83,7 @@ def export(self, filepath): performed """ if self.verbose: - io_utils.print_msg("Starting Litert export...") + io_utils.print_msg("Starting LiteRT export...") # 1. Ensure the model is built by calling it if necessary self._ensure_model_built() @@ -124,7 +124,7 @@ def export(self, filepath): if self.aot_compile_targets and litert.available: if self.verbose: io_utils.print_msg( - "Performing AOT compilation for Litert targets..." + "Performing AOT compilation for LiteRT targets..." ) compiled_models = self._aot_compile(filepath) elif self.aot_compile_targets and not litert.available: @@ -135,7 +135,7 @@ def export(self, filepath): if self.verbose: io_utils.print_msg( - f"Litert export completed. Base model: {filepath}" + f"LiteRT export completed. Base model: {filepath}" ) if compiled_models: io_utils.print_msg( From 17dccf2ad9228550d132aee14148e15850a06565 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 21 Oct 2025 13:05:48 +0530 Subject: [PATCH 076/115] Update litert.py --- keras/src/export/litert.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index ad7118904a4f..cddcba52bf4e 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -248,15 +248,13 @@ class KerasModelWrapper(tf.Module): def __init__(self, model): super().__init__() # Store the model reference in a way that TensorFlow won't - # try to track it - # This prevents the _DictWrapper error during SavedModel - # serialization + # try to track it. This prevents the _DictWrapper error during + # SavedModel serialization object.__setattr__(self, "_model", model) # Track all variables from the Keras model using proper - # tf.Module methods - # This ensures proper variable handling for stateful layers - # like BatchNorm + # tf.Module methods. This ensures proper variable handling for + # stateful layers like BatchNorm with self.name_scope: for i, var in enumerate(model.variables): # Use a different attribute name to avoid conflicts with From 3e16ab31b32a4beb7e1263fb93e7dbb34c20f8be Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 22 Oct 2025 10:34:51 +0530 Subject: [PATCH 077/115] Update export_utils.py --- keras/src/export/export_utils.py | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 2da2ae6eb004..f95d2e90a523 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -60,22 +60,12 @@ def _make_input_spec(structure): return {k: _make_input_spec(v) for k, v in structure.items()} elif isinstance(structure, tuple): if all(isinstance(d, (int, type(None))) for d in structure): - # For export, force batch dimension to None for flexible - # batching - shape = ( - (None,) + structure[1:] if len(structure) > 0 else structure - ) + shape = (None,) + structure[1:] return layers.InputSpec(shape=shape, dtype=model.input_dtype) return tuple(_make_input_spec(v) for v in structure) elif isinstance(structure, list): if all(isinstance(d, (int, type(None))) for d in structure): - # For export, force batch dimension to None for flexible - # batching - shape = ( - (None,) + tuple(structure[1:]) - if len(structure) > 0 - else tuple(structure) - ) + shape = (None,) + tuple(structure[1:]) return layers.InputSpec(shape=shape, dtype=model.input_dtype) return [_make_input_spec(v) for v in structure] else: From efbc6d37c6059da4148517878aed075fe567bca2 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 22 Oct 2025 10:45:00 +0530 Subject: [PATCH 078/115] Fix input signature inference and doc formatting Improves error messaging in export_utils.py and refines input signature inference logic. Also corrects code block formatting in model.py documentation. --- keras/src/export/export_utils.py | 9 +++++---- keras/src/models/model.py | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index f95d2e90a523..1ad6571a2da2 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -42,8 +42,8 @@ def get_input_signature(model): input_signature = _infer_input_signature_from_model(model) if not input_signature or not model._called: raise ValueError( - "The model provided has never called. It must be called " - "at least once before export." + "The model provided has never called. " + "It must be called at least once before export." ) return input_signature @@ -60,8 +60,9 @@ def _make_input_spec(structure): return {k: _make_input_spec(v) for k, v in structure.items()} elif isinstance(structure, tuple): if all(isinstance(d, (int, type(None))) for d in structure): - shape = (None,) + structure[1:] - return layers.InputSpec(shape=shape, dtype=model.input_dtype) + return layers.InputSpec( + shape=(None,) + structure[1:], dtype=model.input_dtype + ) return tuple(_make_input_spec(v) for v in structure) elif isinstance(structure, list): if all(isinstance(d, (int, type(None))) for d in structure): diff --git a/keras/src/models/model.py b/keras/src/models/model.py index 9ff8bb12908c..25a6f4e4331f 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -136,6 +136,7 @@ def call(self, inputs, training=False): keras.Input(shape=(None, None, 3)), keras.layers.Conv2D(filters=32, kernel_size=3), ]) + ``` """ def __new__(cls, *args, **kwargs): From 7825983f9d8c05a09c79ec22f76ed2adbc38c32f Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 22 Oct 2025 10:49:54 +0530 Subject: [PATCH 079/115] Update export_utils.py --- keras/src/export/export_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index 1ad6571a2da2..d61868447b0c 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -66,8 +66,9 @@ def _make_input_spec(structure): return tuple(_make_input_spec(v) for v in structure) elif isinstance(structure, list): if all(isinstance(d, (int, type(None))) for d in structure): - shape = (None,) + tuple(structure[1:]) - return layers.InputSpec(shape=shape, dtype=model.input_dtype) + return layers.InputSpec( + shape=[None] + structure[1:], dtype=model.input_dtype + ) return [_make_input_spec(v) for v in structure] else: raise ValueError( From 676a53cd54fd832deebb0e5bd7fba01fe7b2a75f Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 22 Oct 2025 11:00:02 +0530 Subject: [PATCH 080/115] Update litert.py --- keras/src/export/litert.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index cddcba52bf4e..9687886f5ddb 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -100,6 +100,7 @@ def export(self, filepath): tflite_model = self._convert_to_tflite(self.input_signature) if self.verbose: + # Calculate model size from the serialized bytes final_size_mb = len(tflite_model) / (1024 * 1024) io_utils.print_msg( f"TFLite model converted successfully. Size: " @@ -192,7 +193,11 @@ def _ensure_model_built(self): ) def _convert_to_tflite(self, input_signature): - """Converts the Keras model to a TFLite model.""" + """Converts the Keras model to TFLite format. + + Returns: + A bytes object containing the serialized TFLite model. + """ is_sequential = isinstance(self.model, tf.keras.Sequential) # Try direct conversion first for all models @@ -229,7 +234,11 @@ def _convert_to_tflite(self, input_signature): return self._convert_with_wrapper(input_signature) def _convert_with_wrapper(self, input_signature): - """Converts the model to TFLite using the tf.Module wrapper.""" + """Converts the model to TFLite using the tf.Module wrapper. + + Returns: + A bytes object containing the serialized TFLite model. + """ # Define the wrapper class dynamically to avoid module-level # tf.Module inheritance From 4b6386e034f4ee8fe355571cec6ebce6e4cb417a Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 22 Oct 2025 11:00:26 +0530 Subject: [PATCH 081/115] Update litert.py --- keras/src/export/litert.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 9687886f5ddb..1fc6579d499c 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -194,7 +194,7 @@ def _ensure_model_built(self): def _convert_to_tflite(self, input_signature): """Converts the Keras model to TFLite format. - + Returns: A bytes object containing the serialized TFLite model. """ @@ -235,7 +235,7 @@ def _convert_to_tflite(self, input_signature): def _convert_with_wrapper(self, input_signature): """Converts the model to TFLite using the tf.Module wrapper. - + Returns: A bytes object containing the serialized TFLite model. """ From 79f05c8645fd169821e720e0bdd09fd47cb0f958 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 22 Oct 2025 11:04:30 +0530 Subject: [PATCH 082/115] Update litert_test.py --- keras/src/export/litert_test.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 0407b9aa33ae..2bc8d26b9188 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -19,20 +19,20 @@ # 1. Try AI Edge LiteRT interpreter (preferred) # 2. Fall back to TensorFlow Lite interpreter if AI Edge LiteRT unavailable AI_EDGE_LITERT_AVAILABLE = False -LiteRtInterpreter = None +LiteRTInterpreter = None if backend.backend() == "tensorflow": if litert.available: try: from ai_edge_litert.interpreter import ( - Interpreter as LiteRtInterpreter, + Interpreter as LiteRTInterpreter, ) AI_EDGE_LITERT_AVAILABLE = True except (ImportError, OSError): - LiteRtInterpreter = tensorflow.lite.Interpreter + LiteRTInterpreter = tensorflow.lite.Interpreter else: - LiteRtInterpreter = tensorflow.lite.Interpreter + LiteRTInterpreter = tensorflow.lite.Interpreter # Model types to test (LSTM only if AI Edge LiteRT is available) model_types = ["sequential", "functional"] @@ -199,7 +199,7 @@ def test_standard_model_export(self, model_type): model.export(temp_filepath, format="litert") self.assertTrue(os.path.exists(temp_filepath)) - interpreter = LiteRtInterpreter(model_path=temp_filepath) + interpreter = LiteRTInterpreter(model_path=temp_filepath) interpreter.allocate_tensors() _set_interpreter_inputs(interpreter, ref_input) interpreter.invoke() @@ -251,7 +251,7 @@ def test_model_with_input_structure(self, struct_type): # Test with model.export() model.export(temp_filepath, format="litert") export_path = temp_filepath - interpreter = LiteRtInterpreter(model_path=export_path) + interpreter = LiteRTInterpreter(model_path=export_path) interpreter.allocate_tensors() feed_inputs = ref_input @@ -290,7 +290,7 @@ def test_model_with_multiple_inputs(self): # Test with model.export() model.export(temp_filepath, format="litert") export_path = temp_filepath - interpreter = LiteRtInterpreter(model_path=export_path) + interpreter = LiteRTInterpreter(model_path=export_path) interpreter.allocate_tensors() _set_interpreter_inputs(interpreter, [ref_input_x, ref_input_y]) @@ -335,7 +335,7 @@ def test_export_with_custom_input_signature(self): export_path = temp_filepath self.assertTrue(os.path.exists(export_path)) - interpreter = LiteRtInterpreter(model_path=export_path) + interpreter = LiteRTInterpreter(model_path=export_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() self.assertEqual(len(input_details), 1) @@ -358,7 +358,7 @@ def test_multi_output_model_export(self): self.assertTrue(os.path.exists(tflite_path)) # Test inference - interpreter = LiteRtInterpreter(model_path=tflite_path) + interpreter = LiteRTInterpreter(model_path=tflite_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() @@ -393,7 +393,7 @@ def test_export_with_verbose(self): self.assertTrue(os.path.exists(tflite_path)) # Verify the exported model works - interpreter = LiteRtInterpreter(model_path=tflite_path) + interpreter = LiteRTInterpreter(model_path=tflite_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() @@ -427,7 +427,7 @@ def test_export_invalid_filepath(self): def test_export_subclass_model(self): """Test exporting subclass models (uses wrapper conversion path).""" - if LiteRtInterpreter is None: + if LiteRTInterpreter is None: self.skipTest("No LiteRT interpreter available") model = get_model("subclass") @@ -444,7 +444,7 @@ def test_export_subclass_model(self): self.assertTrue(os.path.exists(temp_filepath)) # Verify inference - interpreter = LiteRtInterpreter(model_path=temp_filepath) + interpreter = LiteRTInterpreter(model_path=temp_filepath) interpreter.allocate_tensors() _set_interpreter_inputs(interpreter, ref_input) interpreter.invoke() From a22eb6547a85d532999b8b65b0cee598cd96f948 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 22 Oct 2025 14:44:00 +0530 Subject: [PATCH 083/115] Update litert.py --- keras/src/export/litert.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 1fc6579d499c..917851b3aa8e 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -43,9 +43,13 @@ def export_litert( class LiteRTExporter: - """ - Exporter for the LiteRT (TFLite) format that creates a single, - callable signature for `model.call`. + """Exporter for the LiteRT (TFLite) format. + + This class handles the conversion of Keras models for LiteRT runtime and + generates a `.tflite` model file. For efficient inference on mobile and + embedded devices, it creates a single callable signature based on the + model's `call()` method and supports optional Ahead-of-Time (AOT) + compilation for specific hardware targets. """ def __init__( From f019a0ac9e4f3177f18c9529dcfda0c3f0ce5d62 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 27 Oct 2025 21:36:01 +0530 Subject: [PATCH 084/115] Add support for extra TFLite converter settings via kwargs Introduces the _apply_converter_kwargs method to LiteRTExporter, allowing additional TFLite converter settings to be passed through kwargs and applied dynamically. Also updates .gitignore to exclude /.idea directory. --- .gitignore | 3 ++- keras/src/export/litert.py | 43 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index afd700b49952..c294c4d4db37 100644 --- a/.gitignore +++ b/.gitignore @@ -19,4 +19,5 @@ examples/**/*.jpg .python-version .coverage *coverage.xml -.ruff_cache \ No newline at end of file +.ruff_cache +/.idea diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 917851b3aa8e..b28e06126e31 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -218,6 +218,10 @@ def _convert_to_tflite(self, input_signature): tf.lite.OpsSet.SELECT_TF_OPS, ] converter.experimental_enable_resource_variables = False + + # Apply any additional converter settings from kwargs + self._apply_converter_kwargs(converter) + tflite_model = converter.convert() if self.verbose: @@ -369,6 +373,9 @@ def __call__(self, *args, **kwargs): "experimental_enable_resource_variables" ] + # Apply any additional converter settings from kwargs + self._apply_converter_kwargs(converter) + if self.verbose: io_utils.print_msg( f"Trying conversion {strategy['name']}..." @@ -395,6 +402,42 @@ def __call__(self, *args, **kwargs): "All conversion strategies failed for wrapper-based conversion" ) + def _apply_converter_kwargs(self, converter): + """Apply additional converter settings from kwargs. + + This method applies any TFLite converter settings passed via kwargs + to the converter object. Common settings include: + - optimizations: List of optimization options (e.g., [tf.lite.Optimize.DEFAULT]) + - representative_dataset: Dataset generator for quantization + - target_spec: Additional target specification settings + - inference_input_type: Input type for inference (e.g., tf.int8) + - inference_output_type: Output type for inference (e.g., tf.int8) + + Args: + converter: tf.lite.TFLiteConverter instance to configure + """ + if not self.kwargs: + return + + for key, value in self.kwargs.items(): + if hasattr(converter, key): + setattr(converter, key, value) + if self.verbose: + io_utils.print_msg(f"Applied converter setting: {key}") + elif key == "target_spec" and isinstance(value, dict): + # Handle nested target_spec settings + for spec_key, spec_value in value.items(): + if hasattr(converter.target_spec, spec_key): + setattr(converter.target_spec, spec_key, spec_value) + if self.verbose: + io_utils.print_msg( + f"Applied target_spec setting: {spec_key}" + ) + elif self.verbose: + io_utils.print_msg( + f"Warning: Unknown converter setting '{key}' - ignoring" + ) + def _aot_compile(self, tflite_filepath): """Performs AOT compilation using LiteRT.""" if not litert.available: From 1c8dbcd0891b472cbc404e50bfca4bc28c31c59d Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 27 Oct 2025 21:42:59 +0530 Subject: [PATCH 085/115] Update litert.py --- keras/src/export/litert.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index b28e06126e31..6cb2f7c49ca2 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -218,10 +218,10 @@ def _convert_to_tflite(self, input_signature): tf.lite.OpsSet.SELECT_TF_OPS, ] converter.experimental_enable_resource_variables = False - + # Apply any additional converter settings from kwargs self._apply_converter_kwargs(converter) - + tflite_model = converter.convert() if self.verbose: @@ -404,21 +404,22 @@ def __call__(self, *args, **kwargs): def _apply_converter_kwargs(self, converter): """Apply additional converter settings from kwargs. - + This method applies any TFLite converter settings passed via kwargs to the converter object. Common settings include: - - optimizations: List of optimization options (e.g., [tf.lite.Optimize.DEFAULT]) + - optimizations: List of optimization options + (e.g., [tf.lite.Optimize.DEFAULT]) - representative_dataset: Dataset generator for quantization - target_spec: Additional target specification settings - inference_input_type: Input type for inference (e.g., tf.int8) - inference_output_type: Output type for inference (e.g., tf.int8) - + Args: converter: tf.lite.TFLiteConverter instance to configure """ if not self.kwargs: return - + for key, value in self.kwargs.items(): if hasattr(converter, key): setattr(converter, key, value) From ff4a81eecb30fb685031a6a513e5276a958397d2 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 27 Oct 2025 21:48:16 +0530 Subject: [PATCH 086/115] Update .gitignore from master --- .gitignore | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 8a4b78edf4de..416f213f2c82 100644 --- a/.gitignore +++ b/.gitignore @@ -20,5 +20,4 @@ examples/**/*.jpg .python-version .coverage *coverage.xml -.ruff_cache -/.idea +.ruff_cache \ No newline at end of file From 022cce8bb0d88bad7371dbb04b39fecb9bd86d83 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 29 Oct 2025 11:33:51 +0530 Subject: [PATCH 087/115] Add LiteRT export optimization tests and update doc Added comprehensive tests for exporting models with various TensorFlow Lite optimizations in litert_test.py, including quantization, sparsity, size, latency, and representative dataset support. Updated Model.export() documentation in model.py to clarify LiteRT-specific options are passed as direct keyword arguments, and refactored export logic to remove the litert_kwargs indirection. --- keras/src/export/litert_test.py | 279 ++++++++++++++++++++++++++++++++ keras/src/models/model.py | 20 +-- 2 files changed, 286 insertions(+), 13 deletions(-) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 2bc8d26b9188..93d95411c4fc 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -452,6 +452,285 @@ def test_export_subclass_model(self): self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + def test_export_with_optimizations_default(self): + """Test export with DEFAULT optimization.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + model = get_model("sequential") + temp_filepath = os.path.join( + self.get_temp_dir(), "optimized_default.tflite" + ) + + batch_size = 1 + ref_input = np.random.normal(size=(batch_size, 10)).astype("float32") + ref_output = _convert_to_numpy(model(ref_input)) + + # Export with DEFAULT optimization + model.export( + temp_filepath, + format="litert", + optimizations=[tensorflow.lite.Optimize.DEFAULT], + ) + self.assertTrue(os.path.exists(temp_filepath)) + + # Verify inference still works + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + _set_interpreter_inputs(interpreter, ref_input) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + # Quantized model should be close but not exact + self.assertAllClose(ref_output, litert_output, atol=1e-2, rtol=1e-2) + + def test_export_with_optimizations_sparsity(self): + """Test export with EXPERIMENTAL_SPARSITY optimization.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + model = get_model("functional") + temp_filepath = os.path.join( + self.get_temp_dir(), "optimized_sparsity.tflite" + ) + + batch_size = 1 + ref_input = np.random.normal(size=(batch_size, 10)).astype("float32") + + # Export with EXPERIMENTAL_SPARSITY optimization + model.export( + temp_filepath, + format="litert", + optimizations=[tensorflow.lite.Optimize.EXPERIMENTAL_SPARSITY], + ) + self.assertTrue(os.path.exists(temp_filepath)) + + # Verify the model can run inference + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + _set_interpreter_inputs(interpreter, ref_input) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + # Output should have valid shape + self.assertEqual(litert_output.shape, (batch_size, 1)) + + def test_export_with_optimizations_size(self): + """Test export with OPTIMIZE_FOR_SIZE optimization.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + model = get_model("sequential") + temp_filepath = os.path.join( + self.get_temp_dir(), "optimized_size.tflite" + ) + + batch_size = 1 + ref_input = np.random.normal(size=(batch_size, 10)).astype("float32") + + # Export with OPTIMIZE_FOR_SIZE + model.export( + temp_filepath, + format="litert", + optimizations=[tensorflow.lite.Optimize.OPTIMIZE_FOR_SIZE], + ) + self.assertTrue(os.path.exists(temp_filepath)) + + # Verify the model can run inference + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + _set_interpreter_inputs(interpreter, ref_input) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + self.assertEqual(litert_output.shape, (batch_size, 1)) + + def test_export_with_optimizations_latency(self): + """Test export with OPTIMIZE_FOR_LATENCY optimization.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + model = get_model("functional") + temp_filepath = os.path.join( + self.get_temp_dir(), "optimized_latency.tflite" + ) + + batch_size = 1 + ref_input = np.random.normal(size=(batch_size, 10)).astype("float32") + + # Export with OPTIMIZE_FOR_LATENCY + model.export( + temp_filepath, + format="litert", + optimizations=[tensorflow.lite.Optimize.OPTIMIZE_FOR_LATENCY], + ) + self.assertTrue(os.path.exists(temp_filepath)) + + # Verify the model can run inference + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + _set_interpreter_inputs(interpreter, ref_input) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + self.assertEqual(litert_output.shape, (batch_size, 1)) + + def test_export_with_multiple_optimizations(self): + """Test export with multiple optimization options combined.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + model = get_model("sequential") + temp_filepath = os.path.join( + self.get_temp_dir(), "optimized_multiple.tflite" + ) + + batch_size = 1 + ref_input = np.random.normal(size=(batch_size, 10)).astype("float32") + + # Export with multiple optimizations + model.export( + temp_filepath, + format="litert", + optimizations=[ + tensorflow.lite.Optimize.DEFAULT, + tensorflow.lite.Optimize.EXPERIMENTAL_SPARSITY, + ], + ) + self.assertTrue(os.path.exists(temp_filepath)) + + # Verify the model can run inference + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + _set_interpreter_inputs(interpreter, ref_input) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + self.assertEqual(litert_output.shape, (batch_size, 1)) + + def test_export_with_representative_dataset(self): + """Test export with representative dataset for better quantization.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + model = get_model("functional") + temp_filepath = os.path.join( + self.get_temp_dir(), "quantized_model.tflite" + ) + + # Create representative dataset + def representative_dataset(): + for _ in range(10): + yield [np.random.normal(size=(1, 10)).astype("float32")] + + # Export with optimizations and representative dataset + model.export( + temp_filepath, + format="litert", + optimizations=[tensorflow.lite.Optimize.DEFAULT], + representative_dataset=representative_dataset, + ) + self.assertTrue(os.path.exists(temp_filepath)) + + # Verify the model can run inference + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + + batch_size = 1 + ref_input = np.random.normal(size=(batch_size, 10)).astype("float32") + _set_interpreter_inputs(interpreter, ref_input) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + # Output should have valid shape + self.assertEqual(litert_output.shape, (batch_size, 1)) + + def test_export_with_multiple_kwargs(self): + """Test export with multiple converter kwargs.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + # Create a larger model for quantization testing + inputs = layers.Input(shape=(28, 28, 3)) + x = layers.Conv2D(32, 3, activation="relu")(inputs) + x = layers.MaxPooling2D()(x) + x = layers.Flatten()(x) + x = layers.Dense(10, activation="softmax")(x) + model = models.Model(inputs, x) + + temp_filepath = os.path.join( + self.get_temp_dir(), "multi_kwargs_model.tflite" + ) + + # Create representative dataset + def representative_dataset(): + for _ in range(5): + yield [np.random.normal(size=(1, 28, 28, 3)).astype("float32")] + + # Export with multiple kwargs + model.export( + temp_filepath, + format="litert", + optimizations=[tensorflow.lite.Optimize.DEFAULT], + representative_dataset=representative_dataset, + experimental_new_quantizer=True, + ) + self.assertTrue(os.path.exists(temp_filepath)) + + # Verify file size is reduced compared to non-quantized + file_size = os.path.getsize(temp_filepath) + self.assertGreater(file_size, 0) + + def test_export_optimization_file_size_comparison(self): + """Test that optimizations reduce file size.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + # Create a larger model to see size differences + inputs = layers.Input(shape=(28, 28, 3)) + x = layers.Conv2D(64, 3, activation="relu")(inputs) + x = layers.Conv2D(64, 3, activation="relu")(x) + x = layers.MaxPooling2D()(x) + x = layers.Flatten()(x) + x = layers.Dense(128, activation="relu")(x) + x = layers.Dense(10, activation="softmax")(x) + model = models.Model(inputs, x) + + # Export without optimization + filepath_no_opt = os.path.join( + self.get_temp_dir(), "model_no_opt.tflite" + ) + model.export(filepath_no_opt, format="litert") + + # Export with optimization + filepath_with_opt = os.path.join( + self.get_temp_dir(), "model_with_opt.tflite" + ) + model.export( + filepath_with_opt, + format="litert", + optimizations=[tensorflow.lite.Optimize.DEFAULT], + ) + + # Optimized model should be smaller + size_no_opt = os.path.getsize(filepath_no_opt) + size_with_opt = os.path.getsize(filepath_with_opt) + + self.assertLess( + size_with_opt, + size_no_opt, + f"Optimized model ({size_with_opt} bytes) should be smaller " + f"than non-optimized ({size_no_opt} bytes)", + ) + + # Typically expect ~75% size reduction with quantization + reduction_ratio = size_with_opt / size_no_opt + self.assertLess( + reduction_ratio, + 0.5, # Should be less than 50% of original size + f"Expected significant size reduction, got {reduction_ratio:.2%}", + ) + if __name__ == "__main__": pytest.main([__file__]) diff --git a/keras/src/models/model.py b/keras/src/models/model.py index 25a6f4e4331f..70410f041c2d 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -593,17 +593,13 @@ def export( provided, they will be automatically computed. - `opset_version`: Optional `int`. Specific to `format="onnx"`. An integer value that specifies the ONNX opset version. - - `litert_kwargs`: Optional `dict`. Specific to - `format="litert"`. A dictionary containing LiteRT export - parameters. Can include: - - `allow_custom_ops`: Optional `bool`. Whether to allow - custom operations during conversion. Defaults to - `False`. - - `enable_select_tf_ops`: Optional `bool`. Whether to - enable TensorFlow Select ops for unsupported - operations. Defaults to `False`. - - `optimizations`: Optional `list`. List of optimizations - to apply (e.g., `[tf.lite.Optimize.DEFAULT]`). + - LiteRT-specific options: Optional keyword arguments specific + to `format="litert"`. These are passed directly to the + TensorFlow Lite converter and include options like + `optimizations`, `representative_dataset`, + `experimental_new_quantizer`, `allow_custom_ops`, + `enable_select_tf_ops`, etc. See TensorFlow Lite + documentation for all available options. **Note:** This feature is currently supported only with TensorFlow, JAX and Torch backends. @@ -701,13 +697,11 @@ def export( **kwargs, ) elif format == "litert": - litert_kwargs = kwargs.pop("litert_kwargs", {}) export_litert( self, filepath, verbose=verbose, input_signature=input_signature, - **litert_kwargs, **kwargs, ) From 85e878b6d6124a107ed0c186ac1de3f93ff4d92a Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Fri, 31 Oct 2025 15:07:50 +0530 Subject: [PATCH 088/115] Add tests for LiteRT AOT compilation support Introduces multiple tests to verify LiteRT export functionality with AOT compilation, including parameter acceptance, multiple targets, quantization optimizations, fallback behavior when LiteRT is unavailable, and the export_with_aot class method. These tests ensure robust handling of AOT compilation scenarios and graceful fallback when infrastructure is missing. --- keras/src/export/litert_test.py | 213 ++++++++++++++++++++++++++++++++ 1 file changed, 213 insertions(+) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 93d95411c4fc..742ffc5567af 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -731,6 +731,219 @@ def test_export_optimization_file_size_comparison(self): f"Expected significant size reduction, got {reduction_ratio:.2%}", ) + def test_aot_compile_parameter_accepted(self): + """Test that aot_compile_targets parameter is accepted without error.""" + if not litert.available: + self.skipTest("LiteRT not available") + + model = get_model("sequential") + temp_filepath = os.path.join( + self.get_temp_dir(), "model_with_aot.tflite" + ) + + # Test that parameter is accepted (compilation may or may not succeed) + # The key is that it doesn't crash + try: + result = model.export( + temp_filepath, + format="litert", + aot_compile_targets=["arm64"], + verbose=True, + ) + # Base .tflite file should always be created + self.assertTrue(os.path.exists(temp_filepath)) + + # Result could be filepath (if AOT failed/skipped) or + # CompilationResult (if AOT succeeded) + self.assertIsNotNone(result) + except Exception as e: + # If AOT infrastructure not available, that's okay as long as + # base model was exported + error_msg = str(e) + if "AOT" in error_msg or "compilation" in error_msg.lower(): + if os.path.exists(temp_filepath): + # Base model created, AOT just not available - this is fine + pass + else: + self.fail( + f"Base .tflite model should be created even if AOT " + f"fails: {error_msg}" + ) + else: + # Some other error - re-raise + raise + + def test_aot_compile_multiple_targets(self): + """Test AOT compilation with multiple targets.""" + if not litert.available: + self.skipTest("LiteRT not available") + + model = get_model("functional") + temp_filepath = os.path.join( + self.get_temp_dir(), "model_multi_aot.tflite" + ) + + # Test with multiple targets + try: + result = model.export( + temp_filepath, + format="litert", + aot_compile_targets=["arm64", "x86_64"], + verbose=True, + ) + + # Base model should exist + self.assertTrue(os.path.exists(temp_filepath)) + + # Check if result contains compilation info + if hasattr(result, "models"): + # AOT compilation succeeded + self.assertGreater( + len(result.models), + 0, + "Should have at least one compiled model", + ) + elif isinstance(result, str): + # AOT skipped, returned filepath + self.assertEqual(result, temp_filepath) + + except Exception as e: + # AOT infrastructure may not be available + if os.path.exists(temp_filepath): + # Base model was created - acceptable + pass + else: + self.fail(f"Base model should be created: {str(e)}") + + def test_aot_compile_with_optimizations(self): + """Test AOT compilation combined with quantization optimizations.""" + if not litert.available: + self.skipTest("LiteRT not available") + + model = get_model("sequential") + temp_filepath = os.path.join( + self.get_temp_dir(), "model_aot_optimized.tflite" + ) + + # Test AOT with quantization + try: + model.export( + temp_filepath, + format="litert", + aot_compile_targets=["arm64"], + optimizations=[tensorflow.lite.Optimize.DEFAULT], + verbose=True, + ) + + # Base model must exist + self.assertTrue(os.path.exists(temp_filepath)) + + # Verify model is quantized (smaller size) + size = os.path.getsize(temp_filepath) + self.assertGreater(size, 0) + + except Exception as e: + # Acceptable if AOT not available but base model created + if not os.path.exists(temp_filepath): + self.fail(f"Base model should be created: {str(e)}") + + def test_get_available_aot_targets(self): + """Test retrieving available AOT compilation targets.""" + if not litert.available: + self.skipTest("LiteRT not available") + + try: + from keras.src.export.litert import LiteRTExporter + + # This should not crash even if no targets available + targets = LiteRTExporter.get_available_targets() + + # Should return a list (possibly empty) + self.assertIsInstance(targets, list) + + # If targets are available, they should be valid + if targets: + for target in targets: + # Each target should have some identifying property + self.assertIsNotNone(target) + + except ImportError: + self.skipTest("LiteRTExporter not available") + except Exception as e: + # No targets available is acceptable + if "target" in str(e).lower() or "vendor" in str(e).lower(): + pass + else: + raise + + def test_aot_compile_without_litert_available(self): + """Test that export works gracefully when LiteRT AOT is unavailable.""" + # This test verifies the fallback behavior + model = get_model("sequential") + temp_filepath = os.path.join(self.get_temp_dir(), "model_no_aot.tflite") + + # Even if we request AOT, export should succeed and create base model + # AOT compilation may fail, but that's acceptable as long as base model + # is created + try: + result = model.export( + temp_filepath, + format="litert", + aot_compile_targets=["arm64"], + verbose=False, # Suppress warnings in test output + ) + + # Base .tflite file should be created regardless + self.assertTrue(os.path.exists(temp_filepath)) + + # Result should be either filepath or CompilationResult + self.assertIsNotNone(result) + + except RuntimeError as e: + # AOT compilation may fail if infrastructure not available + # This is acceptable as long as base model is created + if "AOT" in str(e): + # Verify base model was created before AOT failure + self.assertTrue( + os.path.exists(temp_filepath), + "Base .tflite model should be created even if AOT fails", + ) + else: + # Other runtime errors should be raised + raise + + def test_export_with_aot_class_method(self): + """Test the export_with_aot class method.""" + if not litert.available: + self.skipTest("LiteRT not available") + + try: + from keras.src.export.litert import LiteRTExporter + + model = get_model("functional") + temp_filepath = os.path.join( + self.get_temp_dir(), "model_class_method_aot.tflite" + ) + + # Test the class method + result = LiteRTExporter.export_with_aot( + model=model, + filepath=temp_filepath, + targets=["arm64"], + verbose=True, + ) + + # Base model should exist + self.assertTrue(os.path.exists(temp_filepath)) + self.assertIsNotNone(result) + + except ImportError: + self.skipTest("LiteRTExporter not available") + except Exception as e: + # AOT may not be available, but base model should be created + if not os.path.exists(temp_filepath): + self.fail(f"Base model should be created: {str(e)}") + if __name__ == "__main__": pytest.main([__file__]) From 100506341714b4a37c22ac97cdd903406e2eb676 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Fri, 31 Oct 2025 15:33:46 +0530 Subject: [PATCH 089/115] Update LiteRT export backend check in Model Changed the LiteRT export availability check to require the TensorFlow backend instead of just TensorFlow installation. Updated error message to instruct users to set the backend to TensorFlow. --- keras/src/models/model.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/keras/src/models/model.py b/keras/src/models/model.py index 70410f041c2d..52a865ae70bd 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -665,11 +665,12 @@ def export( f"{list(available_formats)}." ) - # Check if LiteRT export is available (requires TensorFlow) - if format == "litert" and export_litert is None: + # Check if LiteRT export is available (requires TensorFlow backend) + if format == "litert" and backend.backend() != "tensorflow": raise ImportError( - "LiteRT export requires TensorFlow to be installed. " - "Please install TensorFlow: `pip install tensorflow`" + "LiteRT export requires TensorFlow backend. " + "Please set the backend to TensorFlow: " + "`keras.backend.set_backend('tensorflow')`" ) if format == "tf_saved_model": From c984a6b0408a0ddd44d2003b071d46fa69c87fd9 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Fri, 31 Oct 2025 16:13:07 +0530 Subject: [PATCH 090/115] Update litert_test.py --- keras/src/export/litert_test.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 742ffc5567af..4f4fa9183cab 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -886,7 +886,7 @@ def test_aot_compile_without_litert_available(self): # AOT compilation may fail, but that's acceptable as long as base model # is created try: - result = model.export( + model.export( temp_filepath, format="litert", aot_compile_targets=["arm64"], @@ -896,9 +896,6 @@ def test_aot_compile_without_litert_available(self): # Base .tflite file should be created regardless self.assertTrue(os.path.exists(temp_filepath)) - # Result should be either filepath or CompilationResult - self.assertIsNotNone(result) - except RuntimeError as e: # AOT compilation may fail if infrastructure not available # This is acceptable as long as base model is created From 65dc0f979c270121a8136493e9a0be7bdcef7d9b Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Sun, 2 Nov 2025 18:56:22 +0530 Subject: [PATCH 091/115] Update litert.py --- keras/src/export/litert.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 6cb2f7c49ca2..23cf7abba75c 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -1,5 +1,6 @@ import logging import os +import traceback from keras.src import tree from keras.src.utils import io_utils @@ -494,8 +495,6 @@ def _aot_compile(self, tflite_filepath): except Exception as e: if self.verbose: io_utils.print_msg(f"AOT compilation failed: {e}") - import traceback - traceback.print_exc() raise RuntimeError(f"AOT compilation failed: {e}") From dd1cfbd5ca8aa74e74fdcfb2639f0170c8571636 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Sun, 2 Nov 2025 19:03:15 +0530 Subject: [PATCH 092/115] Fix model call with multiple positional arguments Replaces passing a list of positional arguments to the model with unpacking using '*args', ensuring correct argument handling when multiple inputs are provided. --- keras/src/export/litert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 23cf7abba75c..2968fe4c4c69 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -288,7 +288,7 @@ def __call__(self, *args, **kwargs): if len(args) == 1: return self._model(args[0]) else: - return self._model(list(args)) + return self._model(*args) elif kwargs and not args: # Called with keyword arguments if len(kwargs) == 1 and "inputs" in kwargs: From 4bf2e808ae1631a244a82c2d3965cd7d71ca6e3c Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 4 Nov 2025 15:56:15 +0530 Subject: [PATCH 093/115] Add comprehensive SignatureDef tests for LiteRT export Add new test methods to verify SignatureDef functionality in LiteRT export: - test_signature_def_with_named_model: Tests input name preservation - test_signature_def_with_functional_model: Tests functional model export - test_signature_def_with_multi_input_model: Tests multi-input model handling - test_signature_def_with_multi_output_model: Tests multi-output model handling These tests ensure that exported TFLite models maintain proper input/output names in their SignatureDef and work correctly with signature runners for inference. All tests pass and verify compatibility across different model types (functional, multi-input, multi-output). --- keras/src/export/litert_test.py | 304 ++++++++++++++++++++++++++++++++ 1 file changed, 304 insertions(+) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 4f4fa9183cab..d1d1e7cda07f 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -941,6 +941,310 @@ def test_export_with_aot_class_method(self): if not os.path.exists(temp_filepath): self.fail(f"Base model should be created: {str(e)}") + def test_signature_def_with_named_model(self): + """Test that exported models have SignatureDef with input names.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + # Build a model with explicit layer names + inputs = layers.Input(shape=(10,), name="feature_input") + x = layers.Dense(32, activation="relu", name="encoder")(inputs) + x = layers.Dense(16, activation="relu", name="bottleneck")(x) + outputs = layers.Dense( + 1, activation="sigmoid", name="prediction_output" + )(x) + model = models.Model(inputs=inputs, outputs=outputs, name="named_model") + + temp_filepath = os.path.join(self.get_temp_dir(), "named_model.tflite") + + # Export the model + model.export(temp_filepath, format="litert") + self.assertTrue(os.path.exists(temp_filepath)) + + # Load and check SignatureDef + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + + # Get SignatureDef information + signature_defs = interpreter.get_signature_list() + self.assertIn("serving_default", signature_defs) + + serving_sig = signature_defs["serving_default"] + sig_inputs = serving_sig.get("inputs", []) + sig_outputs = serving_sig.get("outputs", []) + + # Verify SignatureDef has inputs and outputs + self.assertGreater( + len(sig_inputs), 0, "Should have at least one input in SignatureDef" + ) + self.assertGreater( + len(sig_outputs), + 0, + "Should have at least one output in SignatureDef", + ) + + # Verify input names are preserved (they should match Keras input names) + self.assertIn( + "feature_input", + sig_inputs, + f"Input name 'feature_input' should be in SignatureDef inputs: " + f"{sig_inputs}", + ) + + # Verify inference works using signature runner + batch_size = 1 + ref_input = np.random.normal(size=(batch_size, 10)).astype("float32") + ref_output = _convert_to_numpy(model(ref_input)) + + # Note: For single-output Functional models, Keras returns a tensor + # (not dict). SignatureDef will have generic output names like + # 'output_0'. + # Only multi-output models or models with explicit dict returns have + # named outputs + + # Test inference using signature runner for better output name handling + signature_runner = interpreter.get_signature_runner("serving_default") + sig_output = signature_runner(feature_input=ref_input) + + # sig_output should be a dict with meaningful output names + self.assertIsInstance(sig_output, dict) + self.assertGreater( + len(sig_output), 0, "Should have at least one output" + ) + + # For single output, extract the value + if len(sig_output) == 1: + litert_output = list(sig_output.values())[0] + else: + litert_output = list(sig_output.values()) + + self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + + def test_signature_def_with_functional_model(self): + """Test that SignatureDef preserves input/output names for + Functional models.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + # Create a Functional model with named inputs and outputs + inputs = layers.Input(shape=(10,), name="input_layer") + x = layers.Dense(32, activation="relu", name="hidden_layer")(inputs) + outputs = layers.Dense(1, activation="sigmoid", name="output_layer")(x) + model = models.Model( + inputs=inputs, outputs=outputs, name="functional_model" + ) + + temp_filepath = os.path.join( + self.get_temp_dir(), "functional_model.tflite" + ) + + # Export the model + model.export(temp_filepath, format="litert") + self.assertTrue(os.path.exists(temp_filepath)) + + # Load and check SignatureDef + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + + # Get SignatureDef information + signature_defs = interpreter.get_signature_list() + self.assertIn("serving_default", signature_defs) + + serving_sig = signature_defs["serving_default"] + sig_inputs = serving_sig.get("inputs", []) + sig_outputs = serving_sig.get("outputs", []) + + # Verify SignatureDef has inputs and outputs + self.assertGreater( + len(sig_inputs), 0, "Should have at least one input in SignatureDef" + ) + self.assertGreater( + len(sig_outputs), + 0, + "Should have at least one output in SignatureDef", + ) + + # Verify that input names are preserved + self.assertIn( + "input_layer", + sig_inputs, + f"Input name 'input_layer' should be in SignatureDef inputs: " + f"{sig_inputs}", + ) + + # Test inference using signature runner for named outputs + batch_size = 1 + ref_input = np.random.normal(size=(batch_size, 10)).astype("float32") + ref_output = _convert_to_numpy(model(ref_input)) + + # Use signature runner to get outputs with meaningful names + signature_runner = interpreter.get_signature_runner("serving_default") + sig_output = signature_runner(input_layer=ref_input) + + # sig_output should be a dict with output names + self.assertIsInstance(sig_output, dict) + self.assertGreater( + len(sig_output), 0, "Should have at least one output" + ) + + # For single output, TFLite typically uses generic names like 'output_0' + # Extract the single output value + if len(sig_output) == 1: + litert_output = list(sig_output.values())[0] + else: + litert_output = list(sig_output.values()) + + self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + + def test_signature_def_with_multi_input_model(self): + """Test that SignatureDef preserves names for multi-input models.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + # Create a multi-input model + input1 = layers.Input(shape=(10,), name="input_1") + input2 = layers.Input(shape=(5,), name="input_2") + concat = layers.Concatenate(name="concat_layer")([input1, input2]) + outputs = layers.Dense(1, activation="sigmoid", name="output")(concat) + model = models.Model( + inputs=[input1, input2], outputs=outputs, name="multi_input_model" + ) + + temp_filepath = os.path.join( + self.get_temp_dir(), "multi_input_model.tflite" + ) + + # Export the model + model.export(temp_filepath, format="litert") + self.assertTrue(os.path.exists(temp_filepath)) + + # Load and check SignatureDef + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + + # Get SignatureDef information + signature_defs = interpreter.get_signature_list() + self.assertIn("serving_default", signature_defs) + + serving_sig = signature_defs["serving_default"] + sig_inputs = serving_sig.get("inputs", []) + sig_outputs = serving_sig.get("outputs", []) + + # Verify SignatureDef has correct number of inputs and outputs + self.assertEqual( + len(sig_inputs), 2, "Should have 2 inputs in SignatureDef" + ) + self.assertGreater( + len(sig_outputs), + 0, + "Should have at least one output in SignatureDef", + ) + + # Verify that input names are preserved + self.assertIn( + "input_1", + sig_inputs, + f"Input name 'input_1' should be in SignatureDef inputs: " + f"{sig_inputs}", + ) + self.assertIn( + "input_2", + sig_inputs, + f"Input name 'input_2' should be in SignatureDef inputs: " + f"{sig_inputs}", + ) + + # Test inference using signature runner + batch_size = 1 + ref_input1 = np.random.normal(size=(batch_size, 10)).astype("float32") + ref_input2 = np.random.normal(size=(batch_size, 5)).astype("float32") + ref_inputs = [ref_input1, ref_input2] + ref_output = _convert_to_numpy(model(ref_inputs)) + + # Use signature runner with named inputs + signature_runner = interpreter.get_signature_runner("serving_default") + sig_output = signature_runner(input_1=ref_input1, input_2=ref_input2) + + # sig_output should be a dict with output names + self.assertIsInstance(sig_output, dict) + self.assertGreater( + len(sig_output), 0, "Should have at least one output" + ) + + # For single output, TFLite uses generic names like 'output_0' + if len(sig_output) == 1: + litert_output = list(sig_output.values())[0] + else: + litert_output = list(sig_output.values()) + + self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + + def test_signature_def_with_multi_output_model(self): + """Test that SignatureDef handles multi-output models correctly.""" + if LiteRTInterpreter is None: + self.skipTest("No LiteRT interpreter available") + + # Create a multi-output model + inputs = layers.Input(shape=(10,), name="input_layer") + x = layers.Dense(32, activation="relu", name="shared_layer")(inputs) + output1 = layers.Dense(1, activation="sigmoid", name="output_1")(x) + output2 = layers.Dense(2, activation="softmax", name="output_2")(x) + model = models.Model( + inputs=inputs, outputs=[output1, output2], name="multi_output_model" + ) + + temp_filepath = os.path.join( + self.get_temp_dir(), "multi_output_model.tflite" + ) + + # Export the model + model.export(temp_filepath, format="litert") + self.assertTrue(os.path.exists(temp_filepath)) + + # Load and check SignatureDef + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + + # Get SignatureDef information + signature_defs = interpreter.get_signature_list() + self.assertIn("serving_default", signature_defs) + + serving_sig = signature_defs["serving_default"] + sig_inputs = serving_sig.get("inputs", []) + sig_outputs = serving_sig.get("outputs", []) + + # Verify SignatureDef structure + self.assertGreater( + len(sig_inputs), 0, "Should have at least one input in SignatureDef" + ) + self.assertEqual( + len(sig_outputs), 2, "Should have 2 outputs in SignatureDef" + ) + + # Test inference using signature runner + batch_size = 1 + ref_input = np.random.normal(size=(batch_size, 10)).astype("float32") + ref_outputs = _convert_to_numpy(model(ref_input)) + + # Use signature runner + signature_runner = interpreter.get_signature_runner("serving_default") + sig_output = signature_runner(input_layer=ref_input) + + # sig_output should be a dict with output names + self.assertIsInstance(sig_output, dict) + self.assertEqual(len(sig_output), 2, "Should have 2 outputs") + + # Note: TFLite uses generic names like 'output_0', 'output_1' for + # SignatureDef outputs. These don't match the Keras layer names + # ('output_1', 'output_2') - this is expected. The names come from + # TensorFlow's symbolic tracing, not from our exporter code. + # Verify outputs match by position + sig_output_values = list(sig_output.values()) + for i, ref_out in enumerate(ref_outputs): + self.assertAllClose( + ref_out, sig_output_values[i], atol=1e-4, rtol=1e-4 + ) + if __name__ == "__main__": pytest.main([__file__]) From 4773089e8257bce567367e75a2437f19fcc57006 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 5 Nov 2025 10:15:37 +0530 Subject: [PATCH 094/115] Improve error reporting for AOT compilation failure Replaces direct printing of traceback with formatted traceback output using io_utils.print_msg for better consistency in verbose error reporting. --- keras/src/export/litert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 2968fe4c4c69..83569b589b74 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -495,7 +495,7 @@ def _aot_compile(self, tflite_filepath): except Exception as e: if self.verbose: io_utils.print_msg(f"AOT compilation failed: {e}") - traceback.print_exc() + io_utils.print_msg(traceback.format_exc()) raise RuntimeError(f"AOT compilation failed: {e}") def _get_available_litert_targets(self): From d98cca16ee0f214347ad4d74c60af556dd8ed2b7 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 12 Nov 2025 16:52:04 +0530 Subject: [PATCH 095/115] Add support for dictionary model inputs in LiteRTExporter Introduces methods to detect and handle models with dictionary inputs, including input signature inference and adapter model creation for TFLite export. Updates export logic to convert dict input signatures to list format and ensures compatibility with TFLite conversion. Also improves handling of multi-input models and input signature processing. --- keras/src/export/litert.py | 201 +++++++++++++++++++++++++++++++++++-- 1 file changed, 195 insertions(+), 6 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 83569b589b74..4166a7c9fb49 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -76,6 +76,70 @@ def __init__( self.aot_compile_targets = aot_compile_targets self.kwargs = kwargs + def _has_dict_inputs(self): + """Check if the model expects dictionary inputs. + + Returns: + bool: True if model has dict inputs, False otherwise. + """ + # Check if model.inputs is a dict (most reliable for built models) + if hasattr(self.model, "inputs") and isinstance( + self.model.inputs, dict + ): + return True + + # Check if _inputs_struct is a dict (for Functional models) + if hasattr(self.model, "_inputs_struct") and isinstance( + self.model._inputs_struct, dict + ): + return True + + # Check if provided input_signature is a dict + if self.input_signature is not None: + if isinstance(self.input_signature, dict): + return True + # Check for wrapped dict (Functional model pattern) + if ( + isinstance(self.input_signature, (list, tuple)) + and len(self.input_signature) == 1 + and isinstance(self.input_signature[0], dict) + ): + return True + + return False + + def _infer_dict_input_signature(self): + """Infer input signature from a model with dict inputs. + + This reads the actual shapes and dtypes from model._inputs_struct. + + Returns: + dict or None: Dictionary mapping input names to InputSpec, or None + """ + # Check _inputs_struct first (preserves dict structure) + if hasattr(self.model, "_inputs_struct") and isinstance( + self.model._inputs_struct, dict + ): + from keras.src.export.export_utils import make_input_spec + + return { + name: make_input_spec(inp) + for name, inp in self.model._inputs_struct.items() + } + + # Fall back to model.inputs if it's a dict + if hasattr(self.model, "inputs") and isinstance( + self.model.inputs, dict + ): + from keras.src.export.export_utils import make_input_spec + + return { + name: make_input_spec(inp) + for name, inp in self.model.inputs.items() + } + + return None + def export(self, filepath): """Exports the Keras model to a TFLite file and optionally performs AOT compilation. @@ -97,12 +161,54 @@ def export(self, filepath): if self.input_signature is None: if self.verbose: io_utils.print_msg("Inferring input signature from model.") - from keras.src.export.export_utils import get_input_signature - self.input_signature = get_input_signature(self.model) + # Try dict-specific inference first (for models with dict inputs) + dict_signature = self._infer_dict_input_signature() + if dict_signature is not None: + self.input_signature = dict_signature + if self.verbose: + io_utils.print_msg( + f"Detected dictionary inputs with keys: " + f"{list(dict_signature.keys())}" + ) + else: + # Fall back to standard inference + from keras.src.export.export_utils import get_input_signature + + self.input_signature = get_input_signature(self.model) - # 3. Convert the model to TFLite. - tflite_model = self._convert_to_tflite(self.input_signature) + # 3. Handle dictionary inputs by creating an adapter + # Check if we have dict inputs that need adaptation + has_dict_inputs = isinstance(self.input_signature, dict) + + if has_dict_inputs: + # Create adapter model that converts list to dict + adapted_model = self._create_dict_adapter(self.input_signature) + + # Convert dict signature to list for TFLite conversion + # The adapter will handle the dict->list conversion + input_signature_list = list(self.input_signature.values()) + + # Use adapted model and list signature for conversion + model_to_convert = adapted_model + signature_for_conversion = input_signature_list + else: + # No dict inputs - use model as-is + model_to_convert = self.model + signature_for_conversion = self.input_signature + + # Store original model reference for later use + original_model = self.model + + # Temporarily replace self.model with the model to convert + self.model = model_to_convert + + try: + # 4. Convert the model to TFLite. + tflite_model = self._convert_to_tflite(signature_for_conversion) + finally: + # Restore original model + self.model = original_model if self.verbose: # Calculate model size from the serialized bytes @@ -151,6 +257,60 @@ def export(self, filepath): return compiled_models if compiled_models else filepath + def _create_dict_adapter(self, input_signature_dict): + """Create an adapter model that converts list inputs to dict inputs. + + This adapter allows models expecting dictionary inputs to be exported + to TFLite format (which only supports positional/list inputs). + + Args: + input_signature_dict: Dictionary mapping input names to InputSpec + + Returns: + A Functional model that accepts list inputs and converts to dict + """ + if self.verbose: + io_utils.print_msg( + f"Creating adapter for dictionary inputs: " + f"{list(input_signature_dict.keys())}" + ) + + input_keys = list(input_signature_dict.keys()) + + # Create Input layers for TFLite (list-based) + input_layers = [] + for name in input_keys: + spec = input_signature_dict[name] + input_layer = tf.keras.layers.Input( + shape=spec.shape[1:], # Remove batch dimension + dtype=spec.dtype, + name=name, + ) + input_layers.append(input_layer) + + # Create dict from list inputs + inputs_dict = { + name: layer for name, layer in zip(input_keys, input_layers) + } + + # Call the original model with dict inputs + outputs = self.model(inputs_dict) + + # Build as Functional model (list inputs -> dict -> model -> output) + adapted_model = tf.keras.Model(inputs=input_layers, outputs=outputs) + + # Preserve the original model's variables + adapted_model._variables = self.model.variables + adapted_model._trainable_variables = self.model.trainable_variables + adapted_model._non_trainable_variables = ( + self.model.non_trainable_variables + ) + + if self.verbose: + io_utils.print_msg("Adapter created successfully.") + + return adapted_model + def _ensure_model_built(self): """ Ensures the model is built before conversion. @@ -288,7 +448,15 @@ def __call__(self, *args, **kwargs): if len(args) == 1: return self._model(args[0]) else: - return self._model(*args) + # Multi-input case: Functional models expect a list, + # not unpacked positional args + if ( + hasattr(self._model, "inputs") + and len(self._model.inputs) > 1 + ): + return self._model(list(args)) + else: + return self._model(*args) elif kwargs and not args: # Called with keyword arguments if len(kwargs) == 1 and "inputs" in kwargs: @@ -332,7 +500,28 @@ def __call__(self, *args, **kwargs): wrapper = KerasModelWrapper(self.model) # 2. Get a concrete function from the wrapper. - if not isinstance(input_signature, (list, tuple)): + # Handle dict input signatures for multi-input models + if isinstance(input_signature, dict): + # For Functional models with multiple inputs, convert dict to + # ordered list matching model.inputs order + if hasattr(self.model, "inputs") and len(self.model.inputs) > 1: + input_signature_list = [] + for input_layer in self.model.inputs: + input_name = input_layer.name + if input_name not in input_signature: + raise ValueError( + f"Missing input '{input_name}' in input_signature. " + f"Model expects inputs: " + f"{[inp.name for inp in self.model.inputs]}, " + f"but input_signature only has: " + f"{list(input_signature.keys())}" + ) + input_signature_list.append(input_signature[input_name]) + input_signature = input_signature_list + else: + # Single-input model with dict signature + input_signature = [input_signature] + elif not isinstance(input_signature, (list, tuple)): input_signature = [input_signature] from keras.src.export.export_utils import make_tf_tensor_spec From 11bb4be5b72c7c84d7a2edc3588e5a9c165ad624 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 12 Nov 2025 17:08:16 +0530 Subject: [PATCH 096/115] Add tests for dict input adapter in LiteRT export Introduces multiple tests to verify LiteRT export functionality for models with dictionary inputs, including adapter creation, input signature inference, custom signatures, numerical accuracy, variable sharing, and multi-output support. These tests ensure correct export and inference behavior for dict input models. --- keras/src/export/litert_test.py | 292 ++++++++++++++++++++++++++++++++ 1 file changed, 292 insertions(+) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index d1d1e7cda07f..8daf6ee28dd6 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -1245,6 +1245,298 @@ def test_signature_def_with_multi_output_model(self): ref_out, sig_output_values[i], atol=1e-4, rtol=1e-4 ) + def test_dict_input_adapter_creation(self): + """Test that dict input adapter is created and works correctly.""" + if not litert.available: + self.skipTest("LiteRT not available") + + # Create a model with dictionary inputs + input1 = layers.Input(shape=(10,), name="x") + input2 = layers.Input(shape=(10,), name="y") + output = layers.Add()([input1, input2]) + model = models.Model(inputs={"x": input1, "y": input2}, outputs=output) + + temp_filepath = os.path.join( + self.get_temp_dir(), "dict_adapter_model.tflite" + ) + + # Export with verbose to verify adapter creation messages + model.export(temp_filepath, format="litert", verbose=True) + + # Verify the file was created + self.assertTrue(os.path.exists(temp_filepath)) + + # Load and test the model + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + + # Check input details - should have 2 inputs in list form + input_details = interpreter.get_input_details() + self.assertEqual(len(input_details), 2) + + # Test inference + batch_size = 1 + x_val = np.random.normal(size=(batch_size, 10)).astype("float32") + y_val = np.random.normal(size=(batch_size, 10)).astype("float32") + + ref_output = _convert_to_numpy( + model( + { + "x": ops.convert_to_tensor(x_val), + "y": ops.convert_to_tensor(y_val), + } + ) + ) + + # Set inputs as list (adapter converts list to dict internally) + _set_interpreter_inputs(interpreter, [x_val, y_val]) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + + def test_dict_input_signature_inference(self): + """Test automatic inference of dict input signatures.""" + if not litert.available: + self.skipTest("LiteRT not available") + + # Create a model with dictionary inputs (without calling it first) + input1 = layers.Input(shape=(5,), name="feature_a") + input2 = layers.Input(shape=(3,), name="feature_b") + concat = layers.Concatenate()([input1, input2]) + output = layers.Dense(1)(concat) + model = models.Model( + inputs={"feature_a": input1, "feature_b": input2}, outputs=output + ) + + temp_filepath = os.path.join( + self.get_temp_dir(), "inferred_dict_model.tflite" + ) + + # Export without providing input_signature - should be inferred + model.export(temp_filepath, format="litert") + + # Verify successful export + self.assertTrue(os.path.exists(temp_filepath)) + + # Load and verify structure + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + + input_details = interpreter.get_input_details() + self.assertEqual(len(input_details), 2) + + # Verify shapes match expected + shapes = [tuple(d["shape"][1:]) for d in input_details] + self.assertIn((5,), shapes) + self.assertIn((3,), shapes) + + def test_dict_input_with_custom_signature(self): + """Test dict input export with custom input signature.""" + if not litert.available: + self.skipTest("LiteRT not available") + + # Create model with dict inputs + input1 = layers.Input(shape=(10,), name="input_x") + input2 = layers.Input(shape=(10,), name="input_y") + output = layers.Multiply()([input1, input2]) + model = models.Model( + inputs={"input_x": input1, "input_y": input2}, outputs=output + ) + + temp_filepath = os.path.join( + self.get_temp_dir(), "dict_custom_sig_model.tflite" + ) + + # Provide custom dict input signature + input_signature = { + "input_x": layers.InputSpec(shape=(None, 10), dtype="float32"), + "input_y": layers.InputSpec(shape=(None, 10), dtype="float32"), + } + + model.export( + temp_filepath, format="litert", input_signature=input_signature + ) + + # Verify export + self.assertTrue(os.path.exists(temp_filepath)) + + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + + # Test inference + batch_size = 1 + x_val = np.random.normal(size=(batch_size, 10)).astype("float32") + y_val = np.random.normal(size=(batch_size, 10)).astype("float32") + + ref_output = _convert_to_numpy( + model( + { + "input_x": ops.convert_to_tensor(x_val), + "input_y": ops.convert_to_tensor(y_val), + } + ) + ) + + _set_interpreter_inputs(interpreter, [x_val, y_val]) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + + def test_dict_input_numerical_accuracy(self): + """Test numerical accuracy of dict input models with complex ops.""" + if not litert.available: + self.skipTest("LiteRT not available") + + # Create a more complex model with dict inputs + input1 = layers.Input(shape=(20,), name="tokens") + input2 = layers.Input(shape=(20,), name="mask") + + # Apply some transformations + x1 = layers.Dense(16, activation="relu")(input1) + x2 = layers.Dense(16, activation="relu")(input2) + + # Combine + combined = layers.Multiply()([x1, x2]) + output = layers.Dense(1, activation="sigmoid")(combined) + + model = models.Model( + inputs={"tokens": input1, "mask": input2}, outputs=output + ) + + temp_filepath = os.path.join( + self.get_temp_dir(), "dict_numerical_model.tflite" + ) + + model.export(temp_filepath, format="litert") + + # Test with multiple samples + batch_size = 1 + tokens_val = np.random.normal(size=(batch_size, 20)).astype("float32") + mask_val = np.random.normal(size=(batch_size, 20)).astype("float32") + + ref_output = _convert_to_numpy( + model( + { + "tokens": ops.convert_to_tensor(tokens_val), + "mask": ops.convert_to_tensor(mask_val), + } + ) + ) + + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + _set_interpreter_inputs(interpreter, [tokens_val, mask_val]) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + # Should have good numerical accuracy + self.assertAllClose(ref_output, litert_output, atol=1e-5, rtol=1e-5) + + def test_dict_input_preserves_variable_sharing(self): + """Test that adapter preserves variable sharing from original model.""" + if not litert.available: + self.skipTest("LiteRT not available") + + # Create model with shared layers + shared_dense = layers.Dense(8, activation="relu") + + input1 = layers.Input(shape=(10,), name="branch_a") + input2 = layers.Input(shape=(10,), name="branch_b") + + # Both inputs go through same shared layer + x1 = shared_dense(input1) + x2 = shared_dense(input2) + + output = layers.Add()([x1, x2]) + model = models.Model( + inputs={"branch_a": input1, "branch_b": input2}, outputs=output + ) + + # Train briefly to ensure weights are meaningful + model.compile(optimizer="adam", loss="mse") + x_train = { + "branch_a": np.random.normal(size=(5, 10)).astype("float32"), + "branch_b": np.random.normal(size=(5, 10)).astype("float32"), + } + y_train = np.random.normal(size=(5, 8)).astype("float32") + model.fit(x_train, y_train, epochs=1, verbose=0) + + temp_filepath = os.path.join( + self.get_temp_dir(), "dict_shared_vars_model.tflite" + ) + + model.export(temp_filepath, format="litert") + + # Verify export works and inference matches + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + + batch_size = 1 + a_val = np.random.normal(size=(batch_size, 10)).astype("float32") + b_val = np.random.normal(size=(batch_size, 10)).astype("float32") + + ref_output = _convert_to_numpy( + model( + { + "branch_a": ops.convert_to_tensor(a_val), + "branch_b": ops.convert_to_tensor(b_val), + } + ) + ) + + _set_interpreter_inputs(interpreter, [a_val, b_val]) + interpreter.invoke() + litert_output = _get_interpreter_outputs(interpreter) + + self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4) + + def test_dict_input_multi_output_model(self): + """Test dict input model with multiple outputs exports successfully.""" + if not litert.available: + self.skipTest("LiteRT not available") + + # Create model with dict inputs and multiple outputs + input1 = layers.Input(shape=(10,), name="feature_1") + input2 = layers.Input(shape=(10,), name="feature_2") + + # Two output branches + output1 = layers.Dense(5, name="output_a")(input1) + output2 = layers.Dense(3, name="output_b")(input2) + + model = models.Model( + inputs={"feature_1": input1, "feature_2": input2}, + outputs=[output1, output2], + ) + + temp_filepath = os.path.join( + self.get_temp_dir(), "dict_multi_output_model.tflite" + ) + + # Main test: export should succeed with dict inputs + multi outputs + model.export(temp_filepath, format="litert") + + # Verify file was created + self.assertTrue(os.path.exists(temp_filepath)) + + # Verify structure + interpreter = LiteRTInterpreter(model_path=temp_filepath) + interpreter.allocate_tensors() + + # Should have 2 inputs (from dict) + input_details = interpreter.get_input_details() + self.assertEqual(len(input_details), 2) + + # Should have 2 outputs + output_details = interpreter.get_output_details() + self.assertEqual(len(output_details), 2) + + # Verify shapes + output_shapes = [tuple(d["shape"][1:]) for d in output_details] + self.assertIn((5,), output_shapes) + self.assertIn((3,), output_shapes) + if __name__ == "__main__": pytest.main([__file__]) From 0f9f214854120d62073300041114c4b38ab4aebd Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 12 Nov 2025 19:16:11 +0530 Subject: [PATCH 097/115] Update litert_test.py --- keras/src/export/litert_test.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 8daf6ee28dd6..ee8991a1f96b 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -1247,8 +1247,6 @@ def test_signature_def_with_multi_output_model(self): def test_dict_input_adapter_creation(self): """Test that dict input adapter is created and works correctly.""" - if not litert.available: - self.skipTest("LiteRT not available") # Create a model with dictionary inputs input1 = layers.Input(shape=(10,), name="x") @@ -1297,8 +1295,6 @@ def test_dict_input_adapter_creation(self): def test_dict_input_signature_inference(self): """Test automatic inference of dict input signatures.""" - if not litert.available: - self.skipTest("LiteRT not available") # Create a model with dictionary inputs (without calling it first) input1 = layers.Input(shape=(5,), name="feature_a") @@ -1333,8 +1329,6 @@ def test_dict_input_signature_inference(self): def test_dict_input_with_custom_signature(self): """Test dict input export with custom input signature.""" - if not litert.available: - self.skipTest("LiteRT not available") # Create model with dict inputs input1 = layers.Input(shape=(10,), name="input_x") @@ -1386,8 +1380,6 @@ def test_dict_input_with_custom_signature(self): def test_dict_input_numerical_accuracy(self): """Test numerical accuracy of dict input models with complex ops.""" - if not litert.available: - self.skipTest("LiteRT not available") # Create a more complex model with dict inputs input1 = layers.Input(shape=(20,), name="tokens") @@ -1436,8 +1428,6 @@ def test_dict_input_numerical_accuracy(self): def test_dict_input_preserves_variable_sharing(self): """Test that adapter preserves variable sharing from original model.""" - if not litert.available: - self.skipTest("LiteRT not available") # Create model with shared layers shared_dense = layers.Dense(8, activation="relu") @@ -1494,8 +1484,6 @@ def test_dict_input_preserves_variable_sharing(self): def test_dict_input_multi_output_model(self): """Test dict input model with multiple outputs exports successfully.""" - if not litert.available: - self.skipTest("LiteRT not available") # Create model with dict inputs and multiple outputs input1 = layers.Input(shape=(10,), name="feature_1") @@ -1536,7 +1524,3 @@ def test_dict_input_multi_output_model(self): output_shapes = [tuple(d["shape"][1:]) for d in output_details] self.assertIn((5,), output_shapes) self.assertIn((3,), output_shapes) - - -if __name__ == "__main__": - pytest.main([__file__]) From ddf911f3eb9ad41381771a298b688f97fb57c58b Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 12 Nov 2025 19:17:40 +0530 Subject: [PATCH 098/115] Simplify LiteRT export error and remove verbose param Streamlined the ImportError message for LiteRT export to remove backend switching instructions. Also removed the unused 'verbose' parameter from the export_litert call. --- keras/src/models/model.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/keras/src/models/model.py b/keras/src/models/model.py index 52a865ae70bd..43c3fb8f6167 100644 --- a/keras/src/models/model.py +++ b/keras/src/models/model.py @@ -667,11 +667,7 @@ def export( # Check if LiteRT export is available (requires TensorFlow backend) if format == "litert" and backend.backend() != "tensorflow": - raise ImportError( - "LiteRT export requires TensorFlow backend. " - "Please set the backend to TensorFlow: " - "`keras.backend.set_backend('tensorflow')`" - ) + raise ImportError("LiteRT export requires TensorFlow backend.") if format == "tf_saved_model": export_saved_model( @@ -701,7 +697,6 @@ def export( export_litert( self, filepath, - verbose=verbose, input_signature=input_signature, **kwargs, ) From 2a46ab3b18b89882b957754e08420e10eb570412 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Wed, 12 Nov 2025 19:18:13 +0530 Subject: [PATCH 099/115] Update litert.py --- keras/src/export/litert.py | 218 +++++++------------------------------ 1 file changed, 39 insertions(+), 179 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 4166a7c9fb49..edf5dffba468 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -11,7 +11,6 @@ def export_litert( model, filepath, - verbose=True, input_signature=None, aot_compile_targets=None, **kwargs, @@ -21,9 +20,6 @@ def export_litert( Args: model: The Keras model to export. filepath: The path to save the exported artifact. - verbose: `bool`. Whether to print a message during export. Defaults to - `None`, which uses the default value set by different backends and - formats. input_signature: Optional input signature specification. If `None`, it will be inferred. aot_compile_targets: Optional list of LiteRT targets for AOT @@ -34,13 +30,11 @@ def export_litert( exporter = LiteRTExporter( model=model, input_signature=input_signature, - verbose=verbose, aot_compile_targets=aot_compile_targets, **kwargs, ) exporter.export(filepath) - if verbose: - io_utils.print_msg(f"Saved artifact at '{filepath}'.") + io_utils.print_msg(f"Saved artifact at '{filepath}'.") class LiteRTExporter: @@ -57,7 +51,6 @@ def __init__( self, model, input_signature=None, - verbose=False, aot_compile_targets=None, **kwargs, ): @@ -65,14 +58,13 @@ def __init__( Args: model: The Keras model to export - input_signature: Input signature specification - verbose: Whether to print progress messages during export. + input_signature: Input signature specification (e.g., TensorFlow + TensorSpec or list of TensorSpec) aot_compile_targets: List of LiteRT targets for AOT compilation **kwargs: Additional export parameters """ self.model = model self.input_signature = input_signature - self.verbose = verbose self.aot_compile_targets = aot_compile_targets self.kwargs = kwargs @@ -151,26 +143,15 @@ def export(self, filepath): Path to exported model or compiled models if AOT compilation is performed """ - if self.verbose: - io_utils.print_msg("Starting LiteRT export...") - - # 1. Ensure the model is built by calling it if necessary + # 1. Ensure the model is built self._ensure_model_built() # 2. Resolve / infer input signature if self.input_signature is None: - if self.verbose: - io_utils.print_msg("Inferring input signature from model.") - # Try dict-specific inference first (for models with dict inputs) dict_signature = self._infer_dict_input_signature() if dict_signature is not None: self.input_signature = dict_signature - if self.verbose: - io_utils.print_msg( - f"Detected dictionary inputs with keys: " - f"{list(dict_signature.keys())}" - ) else: # Fall back to standard inference from keras.src.export.export_utils import get_input_signature @@ -210,14 +191,6 @@ def export(self, filepath): # Restore original model self.model = original_model - if self.verbose: - # Calculate model size from the serialized bytes - final_size_mb = len(tflite_model) / (1024 * 1024) - io_utils.print_msg( - f"TFLite model converted successfully. Size: " - f"{final_size_mb:.2f} MB" - ) - # 4. Save the initial TFLite model to the specified file path. assert filepath.endswith(".tflite"), ( "The LiteRT export requires the filepath to end with '.tflite'. " @@ -227,17 +200,10 @@ def export(self, filepath): with open(filepath, "wb") as f: f.write(tflite_model) - if self.verbose: - io_utils.print_msg(f"TFLite model saved to {filepath}") - # 5. Perform AOT compilation if targets are specified and LiteRT is # available compiled_models = None if self.aot_compile_targets and litert.available: - if self.verbose: - io_utils.print_msg( - "Performing AOT compilation for LiteRT targets..." - ) compiled_models = self._aot_compile(filepath) elif self.aot_compile_targets and not litert.available: logging.warning( @@ -245,16 +211,6 @@ def export(self, filepath): "Skipping AOT compilation." ) - if self.verbose: - io_utils.print_msg( - f"LiteRT export completed. Base model: {filepath}" - ) - if compiled_models: - io_utils.print_msg( - f"AOT compiled models: {len(compiled_models.models)} " - "variants" - ) - return compiled_models if compiled_models else filepath def _create_dict_adapter(self, input_signature_dict): @@ -269,11 +225,10 @@ def _create_dict_adapter(self, input_signature_dict): Returns: A Functional model that accepts list inputs and converts to dict """ - if self.verbose: - io_utils.print_msg( - f"Creating adapter for dictionary inputs: " - f"{list(input_signature_dict.keys())}" - ) + io_utils.print_msg( + f"Creating adapter for dictionary inputs: " + f"{list(input_signature_dict.keys())}" + ) input_keys = list(input_signature_dict.keys()) @@ -306,9 +261,6 @@ def _create_dict_adapter(self, input_signature_dict): self.model.non_trainable_variables ) - if self.verbose: - io_utils.print_msg("Adapter created successfully.") - return adapted_model def _ensure_model_built(self): @@ -321,9 +273,6 @@ def _ensure_model_built(self): if self.model.built: return - if self.verbose: - io_utils.print_msg("Building model before conversion...") - try: # Try to build using input_signature if available if self.input_signature: @@ -346,12 +295,7 @@ def _ensure_model_built(self): "model is already built (called once on real inputs)." ) - if self.verbose: - io_utils.print_msg("Model built successfully.") - except Exception as e: - if self.verbose: - io_utils.print_msg(f"Error building model: {e}") raise ValueError( f"Failed to build model: {e}. Please ensure the model is " "properly defined or provide an input_signature." @@ -363,16 +307,8 @@ def _convert_to_tflite(self, input_signature): Returns: A bytes object containing the serialized TFLite model. """ - is_sequential = isinstance(self.model, tf.keras.Sequential) - # Try direct conversion first for all models try: - if self.verbose: - model_type = "Sequential" if is_sequential else "Functional" - io_utils.print_msg( - f"{model_type} model detected. Trying direct conversion..." - ) - converter = tf.lite.TFLiteConverter.from_keras_model(self.model) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, @@ -385,21 +321,9 @@ def _convert_to_tflite(self, input_signature): tflite_model = converter.convert() - if self.verbose: - io_utils.print_msg("Direct conversion successful.") return tflite_model - except Exception as direct_error: - if self.verbose: - model_type = "Sequential" if is_sequential else "Functional" - io_utils.print_msg( - f"Direct conversion failed for {model_type} model: " - f"{direct_error}" - ) - io_utils.print_msg( - "Falling back to wrapper-based conversion..." - ) - + except Exception: return self._convert_with_wrapper(input_signature) def _convert_with_wrapper(self, input_signature): @@ -533,10 +457,6 @@ def __call__(self, *args, **kwargs): concrete_func = wrapper.__call__.get_concrete_function(*tensor_specs) # 3. Convert from the concrete function. - if self.verbose: - io_utils.print_msg( - "Converting concrete function to TFLite format..." - ) # Try multiple conversion strategies for better inference compatibility conversion_strategies = [ @@ -566,25 +486,11 @@ def __call__(self, *args, **kwargs): # Apply any additional converter settings from kwargs self._apply_converter_kwargs(converter) - if self.verbose: - io_utils.print_msg( - f"Trying conversion {strategy['name']}..." - ) - tflite_model = converter.convert() - if self.verbose: - io_utils.print_msg( - f"Conversion successful {strategy['name']}!" - ) - return tflite_model - except Exception as e: - if self.verbose: - io_utils.print_msg( - f"Conversion failed {strategy['name']}: {e}" - ) + except Exception: continue # If all strategies fail, raise the last error @@ -613,18 +519,12 @@ def _apply_converter_kwargs(self, converter): for key, value in self.kwargs.items(): if hasattr(converter, key): setattr(converter, key, value) - if self.verbose: - io_utils.print_msg(f"Applied converter setting: {key}") elif key == "target_spec" and isinstance(value, dict): # Handle nested target_spec settings for spec_key, spec_value in value.items(): if hasattr(converter.target_spec, spec_key): setattr(converter.target_spec, spec_key, spec_value) - if self.verbose: - io_utils.print_msg( - f"Applied target_spec setting: {spec_key}" - ) - elif self.verbose: + else: io_utils.print_msg( f"Warning: Unknown converter setting '{key}' - ignoring" ) @@ -634,58 +534,34 @@ def _aot_compile(self, tflite_filepath): if not litert.available: raise RuntimeError("LiteRT is not available for AOT compilation") - try: - # Create a LiteRT model from the TFLite file - litert_model = litert.python.aot.core.types.Model.create_from_path( - tflite_filepath - ) - - # Determine output directory - base_dir = os.path.dirname(tflite_filepath) - model_name = os.path.splitext(os.path.basename(tflite_filepath))[0] - output_dir = os.path.join(base_dir, f"{model_name}_compiled") - - if self.verbose: - io_utils.print_msg( - f"AOT compiling for targets: {self.aot_compile_targets}" - ) - io_utils.print_msg(f"Output directory: {output_dir}") - - # Perform AOT compilation - result = litert.python.aot.aot_compile( - input_model=litert_model, - output_dir=output_dir, - target=self.aot_compile_targets, - keep_going=True, # Continue even if some targets fail - ) - - if self.verbose: - io_utils.print_msg( - f"AOT compilation completed: {len(result.models)} " - f"successful, {len(result.failed_backends)} failed" - ) - if result.failed_backends: - for backend, error in result.failed_backends: - io_utils.print_msg( - f" Failed: {backend.id()} - {error}" - ) + # Create a LiteRT model from the TFLite file + litert_model = litert.python.aot.core.types.Model.create_from_path( + tflite_filepath + ) - # Print compilation report if available - try: - report = result.compilation_report() - if report: - io_utils.print_msg("Compilation Report:") - io_utils.print_msg(report) - except Exception: - pass + # Determine output directory + base_dir = os.path.dirname(tflite_filepath) + model_name = os.path.splitext(os.path.basename(tflite_filepath))[0] + output_dir = os.path.join(base_dir, f"{model_name}_compiled") + + # Perform AOT compilation + result = litert.python.aot.aot_compile( + input_model=litert_model, + output_dir=output_dir, + target=self.aot_compile_targets, + keep_going=True, # Continue even if some targets fail + ) - return result + # Print compilation report if available + try: + report = result.compilation_report() + if report: + io_utils.print_msg("Compilation Report:") + io_utils.print_msg(report) + except Exception: + pass - except Exception as e: - if self.verbose: - io_utils.print_msg(f"AOT compilation failed: {e}") - io_utils.print_msg(traceback.format_exc()) - raise RuntimeError(f"AOT compilation failed: {e}") + return result def _get_available_litert_targets(self): """Get available LiteRT targets for AOT compilation.""" @@ -698,15 +574,11 @@ def _get_available_litert_targets(self): litert.python.aot.vendors.import_vendor.AllRegisteredTarget() ) return targets if isinstance(targets, list) else [targets] - except Exception as e: - if self.verbose: - io_utils.print_msg(f"Failed to get available targets: {e}") + except Exception: return [] @classmethod - def export_with_aot( - cls, model, filepath, targets=None, verbose=True, **kwargs - ): + def export_with_aot(cls, model, filepath, targets=None, **kwargs): """ Convenience method to export a Keras model with AOT compilation. @@ -715,23 +587,11 @@ def export_with_aot( filepath: Output file path targets: List of LiteRT targets for AOT compilation (e.g., ['qualcomm', 'mediatek']) - verbose: Whether to print verbose output **kwargs: Additional arguments for the exporter Returns: CompilationResult if AOT compilation is performed, otherwise the filepath """ - exporter = cls( - model=model, verbose=verbose, aot_compile_targets=targets, **kwargs - ) + exporter = cls(model=model, aot_compile_targets=targets, **kwargs) return exporter.export(filepath) - - @classmethod - def get_available_targets(cls): - """Get list of available LiteRT AOT compilation targets.""" - if not litert.available: - return [] - - dummy_exporter = cls(model=None) - return dummy_exporter._get_available_litert_targets() From 26ac1608960f27d388420fe055ca11698f4eea14 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 13 Nov 2025 16:48:37 +0530 Subject: [PATCH 100/115] Remove model build step from LiteRTExporter Eliminated the _ensure_model_built method and its invocation from LiteRTExporter. The exporter now skips explicit model building before input signature resolution, simplifying the export process. --- keras/src/export/litert.py | 45 +------------------------------------- 1 file changed, 1 insertion(+), 44 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index edf5dffba468..748475c02179 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -1,8 +1,6 @@ import logging import os -import traceback -from keras.src import tree from keras.src.utils import io_utils from keras.src.utils.module_utils import litert from keras.src.utils.module_utils import tensorflow as tf @@ -143,10 +141,7 @@ def export(self, filepath): Path to exported model or compiled models if AOT compilation is performed """ - # 1. Ensure the model is built - self._ensure_model_built() - - # 2. Resolve / infer input signature + # 1. Resolve / infer input signature if self.input_signature is None: # Try dict-specific inference first (for models with dict inputs) dict_signature = self._infer_dict_input_signature() @@ -263,44 +258,6 @@ def _create_dict_adapter(self, input_signature_dict): return adapted_model - def _ensure_model_built(self): - """ - Ensures the model is built before conversion. - - For models that are not yet built, this attempts to build them - using the input signature or model.inputs. - """ - if self.model.built: - return - - try: - # Try to build using input_signature if available - if self.input_signature: - input_shapes = tree.map_structure( - lambda spec: spec.shape, self.input_signature - ) - self.model.build(input_shapes) - # Fall back to model.inputs for Functional/Sequential models - elif hasattr(self.model, "inputs") and self.model.inputs: - input_shapes = [inp.shape for inp in self.model.inputs] - if len(input_shapes) == 1: - self.model.build(input_shapes[0]) - else: - self.model.build(input_shapes) - else: - raise ValueError( - "Cannot export model to the litert format as the " - "input_signature could not be inferred. Either pass an " - "`input_signature` to `model.export()` or ensure that the " - "model is already built (called once on real inputs)." - ) - - except Exception as e: - raise ValueError( - f"Failed to build model: {e}. Please ensure the model is " - "properly defined or provide an input_signature." - ) - def _convert_to_tflite(self, input_signature): """Converts the Keras model to TFLite format. From 7c5cb3f37be6b07c154b1cb76166e85a31ba9f95 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 13 Nov 2025 17:57:09 +0530 Subject: [PATCH 101/115] Remove LiteRT AOT compilation support Eliminates Ahead-of-Time (AOT) compilation functionality from the LiteRT exporter and related tests. The exporter no longer accepts or processes aot_compile_targets, and all AOT-related methods and test cases have been removed for simplification and maintenance. AOT compilation was opensouce earlier, now removed by google as opensource feature. --- keras/src/export/litert.py | 102 ++------------- keras/src/export/litert_test.py | 211 +------------------------------- 2 files changed, 8 insertions(+), 305 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 748475c02179..4c56bb8d264f 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -10,7 +10,6 @@ def export_litert( model, filepath, input_signature=None, - aot_compile_targets=None, **kwargs, ): """Export the model as a LiteRT artifact for inference. @@ -20,15 +19,12 @@ def export_litert( filepath: The path to save the exported artifact. input_signature: Optional input signature specification. If `None`, it will be inferred. - aot_compile_targets: Optional list of LiteRT targets for AOT - compilation. **kwargs: Additional keyword arguments passed to the exporter. """ exporter = LiteRTExporter( model=model, input_signature=input_signature, - aot_compile_targets=aot_compile_targets, **kwargs, ) exporter.export(filepath) @@ -41,15 +37,13 @@ class LiteRTExporter: This class handles the conversion of Keras models for LiteRT runtime and generates a `.tflite` model file. For efficient inference on mobile and embedded devices, it creates a single callable signature based on the - model's `call()` method and supports optional Ahead-of-Time (AOT) - compilation for specific hardware targets. + model's `call()` method. """ def __init__( self, model, input_signature=None, - aot_compile_targets=None, **kwargs, ): """Initialize the LiteRT exporter. @@ -58,12 +52,10 @@ def __init__( model: The Keras model to export input_signature: Input signature specification (e.g., TensorFlow TensorSpec or list of TensorSpec) - aot_compile_targets: List of LiteRT targets for AOT compilation **kwargs: Additional export parameters """ self.model = model self.input_signature = input_signature - self.aot_compile_targets = aot_compile_targets self.kwargs = kwargs def _has_dict_inputs(self): @@ -131,15 +123,13 @@ def _infer_dict_input_signature(self): return None def export(self, filepath): - """Exports the Keras model to a TFLite file and optionally performs AOT - compilation. + """Exports the Keras model to a TFLite file. Args: filepath: Output path for the exported model Returns: - Path to exported model or compiled models if AOT compilation is - performed + Path to exported model """ # 1. Resolve / infer input signature if self.input_signature is None: @@ -195,18 +185,7 @@ def export(self, filepath): with open(filepath, "wb") as f: f.write(tflite_model) - # 5. Perform AOT compilation if targets are specified and LiteRT is - # available - compiled_models = None - if self.aot_compile_targets and litert.available: - compiled_models = self._aot_compile(filepath) - elif self.aot_compile_targets and not litert.available: - logging.warning( - "AOT compilation requested but LiteRT is not available. " - "Skipping AOT compilation." - ) - - return compiled_models if compiled_models else filepath + return filepath def _create_dict_adapter(self, input_signature_dict): """Create an adapter model that converts list inputs to dict inputs. @@ -474,81 +453,14 @@ def _apply_converter_kwargs(self, converter): return for key, value in self.kwargs.items(): - if hasattr(converter, key): - setattr(converter, key, value) - elif key == "target_spec" and isinstance(value, dict): + if key == "target_spec" and isinstance(value, dict): # Handle nested target_spec settings for spec_key, spec_value in value.items(): if hasattr(converter.target_spec, spec_key): setattr(converter.target_spec, spec_key, spec_value) + elif hasattr(converter, key): + setattr(converter, key, value) else: io_utils.print_msg( f"Warning: Unknown converter setting '{key}' - ignoring" ) - - def _aot_compile(self, tflite_filepath): - """Performs AOT compilation using LiteRT.""" - if not litert.available: - raise RuntimeError("LiteRT is not available for AOT compilation") - - # Create a LiteRT model from the TFLite file - litert_model = litert.python.aot.core.types.Model.create_from_path( - tflite_filepath - ) - - # Determine output directory - base_dir = os.path.dirname(tflite_filepath) - model_name = os.path.splitext(os.path.basename(tflite_filepath))[0] - output_dir = os.path.join(base_dir, f"{model_name}_compiled") - - # Perform AOT compilation - result = litert.python.aot.aot_compile( - input_model=litert_model, - output_dir=output_dir, - target=self.aot_compile_targets, - keep_going=True, # Continue even if some targets fail - ) - - # Print compilation report if available - try: - report = result.compilation_report() - if report: - io_utils.print_msg("Compilation Report:") - io_utils.print_msg(report) - except Exception: - pass - - return result - - def _get_available_litert_targets(self): - """Get available LiteRT targets for AOT compilation.""" - if not litert.available: - return [] - - try: - # Get all registered targets - targets = ( - litert.python.aot.vendors.import_vendor.AllRegisteredTarget() - ) - return targets if isinstance(targets, list) else [targets] - except Exception: - return [] - - @classmethod - def export_with_aot(cls, model, filepath, targets=None, **kwargs): - """ - Convenience method to export a Keras model with AOT compilation. - - Args: - model: Keras model to export - filepath: Output file path - targets: List of LiteRT targets for AOT compilation (e.g., - ['qualcomm', 'mediatek']) - **kwargs: Additional arguments for the exporter - - Returns: - CompilationResult if AOT compilation is performed, otherwise the - filepath - """ - exporter = cls(model=model, aot_compile_targets=targets, **kwargs) - return exporter.export(filepath) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index ee8991a1f96b..189121b41f80 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -731,216 +731,6 @@ def test_export_optimization_file_size_comparison(self): f"Expected significant size reduction, got {reduction_ratio:.2%}", ) - def test_aot_compile_parameter_accepted(self): - """Test that aot_compile_targets parameter is accepted without error.""" - if not litert.available: - self.skipTest("LiteRT not available") - - model = get_model("sequential") - temp_filepath = os.path.join( - self.get_temp_dir(), "model_with_aot.tflite" - ) - - # Test that parameter is accepted (compilation may or may not succeed) - # The key is that it doesn't crash - try: - result = model.export( - temp_filepath, - format="litert", - aot_compile_targets=["arm64"], - verbose=True, - ) - # Base .tflite file should always be created - self.assertTrue(os.path.exists(temp_filepath)) - - # Result could be filepath (if AOT failed/skipped) or - # CompilationResult (if AOT succeeded) - self.assertIsNotNone(result) - except Exception as e: - # If AOT infrastructure not available, that's okay as long as - # base model was exported - error_msg = str(e) - if "AOT" in error_msg or "compilation" in error_msg.lower(): - if os.path.exists(temp_filepath): - # Base model created, AOT just not available - this is fine - pass - else: - self.fail( - f"Base .tflite model should be created even if AOT " - f"fails: {error_msg}" - ) - else: - # Some other error - re-raise - raise - - def test_aot_compile_multiple_targets(self): - """Test AOT compilation with multiple targets.""" - if not litert.available: - self.skipTest("LiteRT not available") - - model = get_model("functional") - temp_filepath = os.path.join( - self.get_temp_dir(), "model_multi_aot.tflite" - ) - - # Test with multiple targets - try: - result = model.export( - temp_filepath, - format="litert", - aot_compile_targets=["arm64", "x86_64"], - verbose=True, - ) - - # Base model should exist - self.assertTrue(os.path.exists(temp_filepath)) - - # Check if result contains compilation info - if hasattr(result, "models"): - # AOT compilation succeeded - self.assertGreater( - len(result.models), - 0, - "Should have at least one compiled model", - ) - elif isinstance(result, str): - # AOT skipped, returned filepath - self.assertEqual(result, temp_filepath) - - except Exception as e: - # AOT infrastructure may not be available - if os.path.exists(temp_filepath): - # Base model was created - acceptable - pass - else: - self.fail(f"Base model should be created: {str(e)}") - - def test_aot_compile_with_optimizations(self): - """Test AOT compilation combined with quantization optimizations.""" - if not litert.available: - self.skipTest("LiteRT not available") - - model = get_model("sequential") - temp_filepath = os.path.join( - self.get_temp_dir(), "model_aot_optimized.tflite" - ) - - # Test AOT with quantization - try: - model.export( - temp_filepath, - format="litert", - aot_compile_targets=["arm64"], - optimizations=[tensorflow.lite.Optimize.DEFAULT], - verbose=True, - ) - - # Base model must exist - self.assertTrue(os.path.exists(temp_filepath)) - - # Verify model is quantized (smaller size) - size = os.path.getsize(temp_filepath) - self.assertGreater(size, 0) - - except Exception as e: - # Acceptable if AOT not available but base model created - if not os.path.exists(temp_filepath): - self.fail(f"Base model should be created: {str(e)}") - - def test_get_available_aot_targets(self): - """Test retrieving available AOT compilation targets.""" - if not litert.available: - self.skipTest("LiteRT not available") - - try: - from keras.src.export.litert import LiteRTExporter - - # This should not crash even if no targets available - targets = LiteRTExporter.get_available_targets() - - # Should return a list (possibly empty) - self.assertIsInstance(targets, list) - - # If targets are available, they should be valid - if targets: - for target in targets: - # Each target should have some identifying property - self.assertIsNotNone(target) - - except ImportError: - self.skipTest("LiteRTExporter not available") - except Exception as e: - # No targets available is acceptable - if "target" in str(e).lower() or "vendor" in str(e).lower(): - pass - else: - raise - - def test_aot_compile_without_litert_available(self): - """Test that export works gracefully when LiteRT AOT is unavailable.""" - # This test verifies the fallback behavior - model = get_model("sequential") - temp_filepath = os.path.join(self.get_temp_dir(), "model_no_aot.tflite") - - # Even if we request AOT, export should succeed and create base model - # AOT compilation may fail, but that's acceptable as long as base model - # is created - try: - model.export( - temp_filepath, - format="litert", - aot_compile_targets=["arm64"], - verbose=False, # Suppress warnings in test output - ) - - # Base .tflite file should be created regardless - self.assertTrue(os.path.exists(temp_filepath)) - - except RuntimeError as e: - # AOT compilation may fail if infrastructure not available - # This is acceptable as long as base model is created - if "AOT" in str(e): - # Verify base model was created before AOT failure - self.assertTrue( - os.path.exists(temp_filepath), - "Base .tflite model should be created even if AOT fails", - ) - else: - # Other runtime errors should be raised - raise - - def test_export_with_aot_class_method(self): - """Test the export_with_aot class method.""" - if not litert.available: - self.skipTest("LiteRT not available") - - try: - from keras.src.export.litert import LiteRTExporter - - model = get_model("functional") - temp_filepath = os.path.join( - self.get_temp_dir(), "model_class_method_aot.tflite" - ) - - # Test the class method - result = LiteRTExporter.export_with_aot( - model=model, - filepath=temp_filepath, - targets=["arm64"], - verbose=True, - ) - - # Base model should exist - self.assertTrue(os.path.exists(temp_filepath)) - self.assertIsNotNone(result) - - except ImportError: - self.skipTest("LiteRTExporter not available") - except Exception as e: - # AOT may not be available, but base model should be created - if not os.path.exists(temp_filepath): - self.fail(f"Base model should be created: {str(e)}") - def test_signature_def_with_named_model(self): """Test that exported models have SignatureDef with input names.""" if LiteRTInterpreter is None: @@ -1172,6 +962,7 @@ def test_signature_def_with_multi_input_model(self): ) # For single output, TFLite uses generic names like 'output_0' + # Extract the single output value if len(sig_output) == 1: litert_output = list(sig_output.values())[0] else: From 537880f50568ddccc0299c9d1f2cf162de288a56 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 13 Nov 2025 18:08:17 +0530 Subject: [PATCH 102/115] Refactor import statements in litert.py Moved import statements for export_utils functions to the top of the file and removed redundant inline imports. This improves code clarity and reduces repeated imports within methods. --- keras/src/export/litert.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 4c56bb8d264f..a32eec1dd513 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -1,8 +1,7 @@ -import logging -import os - +from keras.src.export.export_utils import get_input_signature +from keras.src.export.export_utils import make_input_spec +from keras.src.export.export_utils import make_tf_tensor_spec from keras.src.utils import io_utils -from keras.src.utils.module_utils import litert from keras.src.utils.module_utils import tensorflow as tf @@ -102,8 +101,6 @@ def _infer_dict_input_signature(self): if hasattr(self.model, "_inputs_struct") and isinstance( self.model._inputs_struct, dict ): - from keras.src.export.export_utils import make_input_spec - return { name: make_input_spec(inp) for name, inp in self.model._inputs_struct.items() @@ -113,8 +110,6 @@ def _infer_dict_input_signature(self): if hasattr(self.model, "inputs") and isinstance( self.model.inputs, dict ): - from keras.src.export.export_utils import make_input_spec - return { name: make_input_spec(inp) for name, inp in self.model.inputs.items() @@ -139,8 +134,6 @@ def export(self, filepath): self.input_signature = dict_signature else: # Fall back to standard inference - from keras.src.export.export_utils import get_input_signature - self.input_signature = get_input_signature(self.model) # 3. Handle dictionary inputs by creating an adapter @@ -384,8 +377,6 @@ def __call__(self, *args, **kwargs): elif not isinstance(input_signature, (list, tuple)): input_signature = [input_signature] - from keras.src.export.export_utils import make_tf_tensor_spec - tensor_specs = [make_tf_tensor_spec(spec) for spec in input_signature] # Pass tensor specs as positional arguments to get the concrete From 211b44d57aff2bffac1444e7a88265b80eb71cfa Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 13 Nov 2025 18:11:44 +0530 Subject: [PATCH 103/115] Raise ValueError for invalid TFLite file extension Replaces assertion with ValueError when the export filepath does not end with '.tflite' in LiteRTExporter. Updates corresponding test to expect ValueError instead of AssertionError for incorrect file extension. --- keras/src/export/litert.py | 9 +++++---- keras/src/export/litert_test.py | 4 ++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index a32eec1dd513..5afad56ab1b5 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -170,10 +170,11 @@ def export(self, filepath): self.model = original_model # 4. Save the initial TFLite model to the specified file path. - assert filepath.endswith(".tflite"), ( - "The LiteRT export requires the filepath to end with '.tflite'. " - f"Got: {filepath}" - ) + if not filepath.endswith(".tflite"): + raise ValueError( + "The LiteRT export requires the filepath to end with " + "'.tflite'. Got: {filepath}" + ) with open(filepath, "wb") as f: f.write(tflite_model) diff --git a/keras/src/export/litert_test.py b/keras/src/export/litert_test.py index 189121b41f80..c0c06d70a167 100644 --- a/keras/src/export/litert_test.py +++ b/keras/src/export/litert_test.py @@ -421,8 +421,8 @@ def test_export_invalid_filepath(self): temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.txt") - # Should raise AssertionError for wrong extension - with self.assertRaises(AssertionError): + # Should raise ValueError for wrong extension + with self.assertRaises(ValueError): model.export(temp_filepath, format="litert") def test_export_subclass_model(self): From 4199c69af7343e1294a165198ef1298487cf2740 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 17 Nov 2025 15:12:25 +0530 Subject: [PATCH 104/115] Refactor tracked collection conversion and add _get_save_spec Moved tracked collection conversion logic into a dedicated method with no automatic dependency tracking to avoid TensorFlow wrappers. Added a compatibility shim for TensorFlow saving utilities by implementing the _get_save_spec method, which generates TensorSpec objects for model input signatures. Needed for liteRT export. --- keras/src/backend/tensorflow/layer.py | 72 ++++++++++++++++++++++++--- 1 file changed, 64 insertions(+), 8 deletions(-) diff --git a/keras/src/backend/tensorflow/layer.py b/keras/src/backend/tensorflow/layer.py index 2e0c4cd2c144..030cb48ef7c7 100644 --- a/keras/src/backend/tensorflow/layer.py +++ b/keras/src/backend/tensorflow/layer.py @@ -62,17 +62,73 @@ def _trackable_children(self, save_type="checkpoint", **kwargs): self.test_function = test_function self.predict_function = predict_function - for tracked_attr in self._tracked: - tracked_item = getattr(self, tracked_attr) - if isinstance(tracked_item, tracking.TrackedList): - children[tracked_attr] = list(tracked_item) - if isinstance(tracked_item, tracking.TrackedDict): - children[tracked_attr] = dict(tracked_item) - if isinstance(tracked_item, tracking.TrackedSet): - children[tracked_attr] = list(tracked_item) + # Convert Keras tracked collections to plain Python structures + # without creating TensorFlow trackable dependencies + self._convert_tracked_collections(children) return children + @tf.__internal__.tracking.no_automatic_dependency_tracking + def _convert_tracked_collections(self, children): + """Convert TrackedList/Dict/Set to plain Python structures. + + The decorator prevents TensorFlow from automatically wrapping + these conversions in _DictWrapper objects. + """ + for tracked_attr in self._tracked: + tracked_item = getattr(self, tracked_attr) + if isinstance(tracked_item, tracking.TrackedList): + children[tracked_attr] = list(tracked_item) + if isinstance(tracked_item, tracking.TrackedDict): + children[tracked_attr] = dict(tracked_item) + if isinstance(tracked_item, tracking.TrackedSet): + children[tracked_attr] = list(tracked_item) + + def _get_save_spec(self, dynamic_batch=True): + """Compatibility shim for TensorFlow saving utilities. + + TensorFlow's SavedModel / TFLite export paths (e.g., + tf.lite.TFLiteConverter.from_keras_model) expect a `_get_save_spec` + method on models. This method generates TensorSpec objects + describing the model's input signature. + + Args: + dynamic_batch: whether to set the batch dimension to `None`. + + Returns: + A TensorSpec, list or dict mirroring the model inputs, or + `None` when specs cannot be inferred. + """ + # Prefer the base implementation if available + try: + return super()._get_save_spec(dynamic_batch) + except AttributeError: + # Fall back to building specs from `self.inputs` + inputs = getattr(self, "inputs", None) + if inputs is None: + return None + + def _make_spec(t): + # t is a tf.Tensor-like object + shape = list(t.shape) + if dynamic_batch and len(shape) > 0: + shape[0] = None + # Convert to tuple for TensorSpec + try: + name = getattr(t, "name", None) + return tf.TensorSpec( + shape=tuple(shape), dtype=t.dtype, name=name + ) + except (ImportError, ModuleNotFoundError): + return None + + # Handle dict/list/single tensor inputs + if isinstance(inputs, dict): + return {k: _make_spec(v) for k, v in inputs.items()} + if isinstance(inputs, (list, tuple)): + return [_make_spec(t) for t in inputs] + return _make_spec(inputs) + @property def _default_save_signature(self): """For SavedModel support: returns the default serving signature.""" From d376afb130a91db53b9b2051f21a9c809ab37330 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 17 Nov 2025 16:39:19 +0530 Subject: [PATCH 105/115] Refactor TFLite conversion fallback and add verbose option Refactored the fallback TFLite conversion method to use a direct tf.function approach instead of a tf.Module wrapper, simplifying the conversion logic. Added a 'verbose' parameter to export_litert and LiteRTExporter for progress messaging. Improved converter kwargs handling to only apply known TFLite settings. --- keras/src/export/litert.py | 175 ++++++++++++------------------------- 1 file changed, 56 insertions(+), 119 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 5afad56ab1b5..819f1d64a5a4 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -8,6 +8,7 @@ def export_litert( model, filepath, + verbose=True, input_signature=None, **kwargs, ): @@ -16,6 +17,7 @@ def export_litert( Args: model: The Keras model to export. filepath: The path to save the exported artifact. + verbose: Whether to print export progress messages. Defaults to True. input_signature: Optional input signature specification. If `None`, it will be inferred. **kwargs: Additional keyword arguments passed to the exporter. @@ -24,6 +26,7 @@ def export_litert( exporter = LiteRTExporter( model=model, input_signature=input_signature, + verbose=verbose, **kwargs, ) exporter.export(filepath) @@ -42,6 +45,7 @@ class LiteRTExporter: def __init__( self, model, + verbose=True, input_signature=None, **kwargs, ): @@ -49,11 +53,14 @@ def __init__( Args: model: The Keras model to export + verbose: Whether to print export progress messages. + Defaults to True. input_signature: Input signature specification (e.g., TensorFlow TensorSpec or list of TensorSpec) **kwargs: Additional export parameters """ self.model = model + self.verbose = verbose self.input_signature = input_signature self.kwargs = kwargs @@ -257,107 +264,18 @@ def _convert_to_tflite(self, input_signature): return self._convert_with_wrapper(input_signature) def _convert_with_wrapper(self, input_signature): - """Converts the model to TFLite using the tf.Module wrapper. + """Converts the model to TFLite using SavedModel as intermediate. + + This fallback method is used when direct Keras conversion fails. + It uses TensorFlow's SavedModel format as an intermediate step. Returns: A bytes object containing the serialized TFLite model. """ - - # Define the wrapper class dynamically to avoid module-level - # tf.Module inheritance - class KerasModelWrapper(tf.Module): - """ - A tf.Module wrapper for a Keras model. - - This wrapper is designed to be a clean, serializable interface - for TFLite conversion. It holds the Keras model and exposes a - single `__call__` method that is decorated with `tf.function`. - Crucially, it also ensures all variables from the Keras model - are tracked by the SavedModel format, which is key to including - them in the final TFLite model. - """ - - def __init__(self, model): - super().__init__() - # Store the model reference in a way that TensorFlow won't - # try to track it. This prevents the _DictWrapper error during - # SavedModel serialization - object.__setattr__(self, "_model", model) - - # Track all variables from the Keras model using proper - # tf.Module methods. This ensures proper variable handling for - # stateful layers like BatchNorm - with self.name_scope: - for i, var in enumerate(model.variables): - # Use a different attribute name to avoid conflicts with - # tf.Module's variables property - setattr(self, f"model_var_{i}", var) - - @tf.function - def __call__(self, *args, **kwargs): - """The single entry point for the exported model.""" - # Handle both single and multi-input cases - if args and not kwargs: - # Called with positional arguments - if len(args) == 1: - return self._model(args[0]) - else: - # Multi-input case: Functional models expect a list, - # not unpacked positional args - if ( - hasattr(self._model, "inputs") - and len(self._model.inputs) > 1 - ): - return self._model(list(args)) - else: - return self._model(*args) - elif kwargs and not args: - # Called with keyword arguments - if len(kwargs) == 1 and "inputs" in kwargs: - # Single input case - return self._model(kwargs["inputs"]) - else: - # Multi-input case - convert to list/dict format - # expected by model - if ( - hasattr(self._model, "inputs") - and len(self._model.inputs) > 1 - ): - # Multi-input functional model - input_list = [] - missing_inputs = [] - for input_layer in self._model.inputs: - input_name = input_layer.name - if input_name in kwargs: - input_list.append(kwargs[input_name]) - else: - missing_inputs.append(input_name) - - if missing_inputs: - available = list(kwargs.keys()) - raise ValueError( - f"Missing required inputs for multi-input " - f"model: {missing_inputs}. " - f"Available kwargs: {available}. " - f"Please provide all inputs by name." - ) - - return self._model(input_list) - else: - # Single input model called with named arguments - return self._model(list(kwargs.values())[0]) - else: - # Fallback to original call - return self._model(*args, **kwargs) - - # 1. Wrap the Keras model in our clean tf.Module. - wrapper = KerasModelWrapper(self.model) - - # 2. Get a concrete function from the wrapper. - # Handle dict input signatures for multi-input models + # Normalize input_signature to list format for concrete function if isinstance(input_signature, dict): - # For Functional models with multiple inputs, convert dict to - # ordered list matching model.inputs order + # For multi-input models with dict signature, convert to + # ordered list if hasattr(self.model, "inputs") and len(self.model.inputs) > 1: input_signature_list = [] for input_layer in self.model.inputs: @@ -378,30 +296,26 @@ def __call__(self, *args, **kwargs): elif not isinstance(input_signature, (list, tuple)): input_signature = [input_signature] + # Convert to TensorSpec tensor_specs = [make_tf_tensor_spec(spec) for spec in input_signature] - # Pass tensor specs as positional arguments to get the concrete - # function. - concrete_func = wrapper.__call__.get_concrete_function(*tensor_specs) + # Get concrete function from the model + @tf.function + def model_fn(*args): + return self.model(*args) - # 3. Convert from the concrete function. + concrete_func = model_fn.get_concrete_function(*tensor_specs) - # Try multiple conversion strategies for better inference compatibility + # Try conversion with different strategies conversion_strategies = [ - { - "experimental_enable_resource_variables": False, - "name": "without resource variables", - }, - { - "experimental_enable_resource_variables": True, - "name": "with resource variables", - }, + {"experimental_enable_resource_variables": False}, + {"experimental_enable_resource_variables": True}, ] for strategy in conversion_strategies: try: converter = tf.lite.TFLiteConverter.from_concrete_functions( - [concrete_func], trackable_obj=wrapper + [concrete_func], self.model ) converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, @@ -415,28 +329,35 @@ def __call__(self, *args, **kwargs): self._apply_converter_kwargs(converter) tflite_model = converter.convert() - return tflite_model except Exception: continue - # If all strategies fail, raise the last error + # If all strategies fail, raise an error raise RuntimeError( - "All conversion strategies failed for wrapper-based conversion" + "Failed to convert model to TFLite. " + "Both direct Keras conversion and concrete function " + "conversion failed." ) def _apply_converter_kwargs(self, converter): """Apply additional converter settings from kwargs. - This method applies any TFLite converter settings passed via kwargs - to the converter object. Common settings include: + This method applies TFLite converter settings passed via kwargs. + Only known LiteRT/TFLite converter settings are applied. Other kwargs + (like format-specific settings for other export formats) are ignored. + + Known LiteRT converter settings include: - optimizations: List of optimization options - (e.g., [tf.lite.Optimize.DEFAULT]) - representative_dataset: Dataset generator for quantization - - target_spec: Additional target specification settings - - inference_input_type: Input type for inference (e.g., tf.int8) - - inference_output_type: Output type for inference (e.g., tf.int8) + - experimental_new_quantizer: Enable experimental quantizer + - allow_custom_ops: Allow custom operations + - enable_select_tf_ops: Enable select TF ops + - target_spec: Target specification settings + - inference_input_type: Input type for inference + - inference_output_type: Output type for inference + - experimental_enable_resource_variables: Enable resource variables Args: converter: tf.lite.TFLiteConverter instance to configure @@ -444,13 +365,29 @@ def _apply_converter_kwargs(self, converter): if not self.kwargs: return + # Known TFLite converter attributes that can be set + known_converter_attrs = { + "optimizations", + "representative_dataset", + "experimental_new_quantizer", + "allow_custom_ops", + "enable_select_tf_ops", + "target_spec", + "inference_input_type", + "inference_output_type", + "experimental_enable_resource_variables", + } + for key, value in self.kwargs.items(): if key == "target_spec" and isinstance(value, dict): # Handle nested target_spec settings for spec_key, spec_value in value.items(): if hasattr(converter.target_spec, spec_key): setattr(converter.target_spec, spec_key, spec_value) + elif key in known_converter_attrs and hasattr(converter, key): + setattr(converter, key, value) elif hasattr(converter, key): + # Allow any attribute that exists on the converter setattr(converter, key, value) else: io_utils.print_msg( From 66acb8f673b5352758d56f769e71bf8d179d3fc9 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 17 Nov 2025 16:43:49 +0530 Subject: [PATCH 106/115] Remove verbose argument from LiteRT export functions Eliminates the 'verbose' parameter from export_litert and LiteRTExporter, simplifying the API and reducing unnecessary options for export progress messages. --- keras/src/export/litert.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 819f1d64a5a4..fce8d567d57a 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -8,7 +8,6 @@ def export_litert( model, filepath, - verbose=True, input_signature=None, **kwargs, ): @@ -17,7 +16,6 @@ def export_litert( Args: model: The Keras model to export. filepath: The path to save the exported artifact. - verbose: Whether to print export progress messages. Defaults to True. input_signature: Optional input signature specification. If `None`, it will be inferred. **kwargs: Additional keyword arguments passed to the exporter. @@ -26,7 +24,6 @@ def export_litert( exporter = LiteRTExporter( model=model, input_signature=input_signature, - verbose=verbose, **kwargs, ) exporter.export(filepath) @@ -45,7 +42,6 @@ class LiteRTExporter: def __init__( self, model, - verbose=True, input_signature=None, **kwargs, ): @@ -53,14 +49,11 @@ def __init__( Args: model: The Keras model to export - verbose: Whether to print export progress messages. - Defaults to True. input_signature: Input signature specification (e.g., TensorFlow TensorSpec or list of TensorSpec) **kwargs: Additional export parameters """ self.model = model - self.verbose = verbose self.input_signature = input_signature self.kwargs = kwargs From 3c2a4be4e0345f199f0bd63ae163b76217f1ea50 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 17 Nov 2025 16:50:17 +0530 Subject: [PATCH 107/115] Enable resource variables for TFLite conversion Update LiteRTExporter to always enable resource variables during TFLite conversion, as Keras 3 only supports resource variables. Simplify conversion logic by removing strategy loop and error handling for unsupported conversion paths. --- keras/src/export/litert.py | 49 +++++++++++++------------------------- 1 file changed, 16 insertions(+), 33 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index fce8d567d57a..e7de3ffa0003 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -244,7 +244,8 @@ def _convert_to_tflite(self, input_signature): tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS, ] - converter.experimental_enable_resource_variables = False + # Keras 3 only supports resource variables + converter.experimental_enable_resource_variables = True # Apply any additional converter settings from kwargs self._apply_converter_kwargs(converter) @@ -299,40 +300,22 @@ def model_fn(*args): concrete_func = model_fn.get_concrete_function(*tensor_specs) - # Try conversion with different strategies - conversion_strategies = [ - {"experimental_enable_resource_variables": False}, - {"experimental_enable_resource_variables": True}, + # Convert using concrete function + converter = tf.lite.TFLiteConverter.from_concrete_functions( + [concrete_func], self.model + ) + converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS, + tf.lite.OpsSet.SELECT_TF_OPS, ] + # Keras 3 only supports resource variables + converter.experimental_enable_resource_variables = True - for strategy in conversion_strategies: - try: - converter = tf.lite.TFLiteConverter.from_concrete_functions( - [concrete_func], self.model - ) - converter.target_spec.supported_ops = [ - tf.lite.OpsSet.TFLITE_BUILTINS, - tf.lite.OpsSet.SELECT_TF_OPS, - ] - converter.experimental_enable_resource_variables = strategy[ - "experimental_enable_resource_variables" - ] - - # Apply any additional converter settings from kwargs - self._apply_converter_kwargs(converter) - - tflite_model = converter.convert() - return tflite_model - - except Exception: - continue - - # If all strategies fail, raise an error - raise RuntimeError( - "Failed to convert model to TFLite. " - "Both direct Keras conversion and concrete function " - "conversion failed." - ) + # Apply any additional converter settings from kwargs + self._apply_converter_kwargs(converter) + + tflite_model = converter.convert() + return tflite_model def _apply_converter_kwargs(self, converter): """Apply additional converter settings from kwargs. From 071c819c5f2710c63862b8c4ec003a94181093dc Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 17 Nov 2025 16:58:21 +0530 Subject: [PATCH 108/115] Remove unused _has_dict_inputs method from LiteRTExporter Deleted the _has_dict_inputs method from the LiteRTExporter class in litert.py as it is no longer used. This helps clean up the code and improve maintainability. --- keras/src/export/litert.py | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index e7de3ffa0003..11c04d43ffe8 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -57,38 +57,6 @@ def __init__( self.input_signature = input_signature self.kwargs = kwargs - def _has_dict_inputs(self): - """Check if the model expects dictionary inputs. - - Returns: - bool: True if model has dict inputs, False otherwise. - """ - # Check if model.inputs is a dict (most reliable for built models) - if hasattr(self.model, "inputs") and isinstance( - self.model.inputs, dict - ): - return True - - # Check if _inputs_struct is a dict (for Functional models) - if hasattr(self.model, "_inputs_struct") and isinstance( - self.model._inputs_struct, dict - ): - return True - - # Check if provided input_signature is a dict - if self.input_signature is not None: - if isinstance(self.input_signature, dict): - return True - # Check for wrapped dict (Functional model pattern) - if ( - isinstance(self.input_signature, (list, tuple)) - and len(self.input_signature) == 1 - and isinstance(self.input_signature[0], dict) - ): - return True - - return False - def _infer_dict_input_signature(self): """Infer input signature from a model with dict inputs. From f17422c5d35f916740debd3e7a17b739d479312f Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 17 Nov 2025 17:12:03 +0530 Subject: [PATCH 109/115] Refactor converter kwargs handling in LiteRTExporter Simplifies and enforces stricter validation for converter kwargs in LiteRTExporter. Unknown attributes now raise ValueError instead of being ignored, and the method no longer maintains a list of known attributes, relying on attribute existence checks. --- keras/src/export/litert.py | 51 ++++++++------------------------------ 1 file changed, 10 insertions(+), 41 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 11c04d43ffe8..76f9998079a7 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -288,52 +288,21 @@ def model_fn(*args): def _apply_converter_kwargs(self, converter): """Apply additional converter settings from kwargs. - This method applies TFLite converter settings passed via kwargs. - Only known LiteRT/TFLite converter settings are applied. Other kwargs - (like format-specific settings for other export formats) are ignored. - - Known LiteRT converter settings include: - - optimizations: List of optimization options - - representative_dataset: Dataset generator for quantization - - experimental_new_quantizer: Enable experimental quantizer - - allow_custom_ops: Allow custom operations - - enable_select_tf_ops: Enable select TF ops - - target_spec: Target specification settings - - inference_input_type: Input type for inference - - inference_output_type: Output type for inference - - experimental_enable_resource_variables: Enable resource variables - Args: converter: tf.lite.TFLiteConverter instance to configure - """ - if not self.kwargs: - return - - # Known TFLite converter attributes that can be set - known_converter_attrs = { - "optimizations", - "representative_dataset", - "experimental_new_quantizer", - "allow_custom_ops", - "enable_select_tf_ops", - "target_spec", - "inference_input_type", - "inference_output_type", - "experimental_enable_resource_variables", - } - for key, value in self.kwargs.items(): - if key == "target_spec" and isinstance(value, dict): + Raises: + ValueError: If any kwarg is not a valid converter attribute + """ + for attr, value in self.kwargs.items(): + if attr == "target_spec" and isinstance(value, dict): # Handle nested target_spec settings for spec_key, spec_value in value.items(): if hasattr(converter.target_spec, spec_key): setattr(converter.target_spec, spec_key, spec_value) - elif key in known_converter_attrs and hasattr(converter, key): - setattr(converter, key, value) - elif hasattr(converter, key): - # Allow any attribute that exists on the converter - setattr(converter, key, value) + else: + raise ValueError(f"Unknown target_spec attribute '{spec_key}'") + elif hasattr(converter, attr): + setattr(converter, attr, value) else: - io_utils.print_msg( - f"Warning: Unknown converter setting '{key}' - ignoring" - ) + raise ValueError(f"Unknown converter attribute '{attr}'") From a550fccdd2e52d5e21228c5f0933643a1b4642e7 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Mon, 17 Nov 2025 17:31:52 +0530 Subject: [PATCH 110/115] Update litert.py --- keras/src/export/litert.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 76f9998079a7..94cdd473f969 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -301,7 +301,9 @@ def _apply_converter_kwargs(self, converter): if hasattr(converter.target_spec, spec_key): setattr(converter.target_spec, spec_key, spec_value) else: - raise ValueError(f"Unknown target_spec attribute '{spec_key}'") + raise ValueError( + f"Unknown target_spec attribute '{spec_key}'" + ) elif hasattr(converter, attr): setattr(converter, attr, value) else: From b8267c6e9aaab1dc979366a6892a3815b1dd61d9 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 18 Nov 2025 18:43:20 +0530 Subject: [PATCH 111/115] Remove SavedModel fallback in TFLite conversion Eliminates the fallback method that used SavedModel as an intermediate step for TFLite conversion. Now, if direct conversion fails, a RuntimeError is raised with a helpful message, simplifying the export logic and error handling. --- keras/src/export/litert.py | 69 ++++---------------------------------- 1 file changed, 6 insertions(+), 63 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 94cdd473f969..a90935e2ef93 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -1,6 +1,5 @@ from keras.src.export.export_utils import get_input_signature from keras.src.export.export_utils import make_input_spec -from keras.src.export.export_utils import make_tf_tensor_spec from keras.src.utils import io_utils from keras.src.utils.module_utils import tensorflow as tf @@ -222,68 +221,12 @@ def _convert_to_tflite(self, input_signature): return tflite_model - except Exception: - return self._convert_with_wrapper(input_signature) - - def _convert_with_wrapper(self, input_signature): - """Converts the model to TFLite using SavedModel as intermediate. - - This fallback method is used when direct Keras conversion fails. - It uses TensorFlow's SavedModel format as an intermediate step. - - Returns: - A bytes object containing the serialized TFLite model. - """ - # Normalize input_signature to list format for concrete function - if isinstance(input_signature, dict): - # For multi-input models with dict signature, convert to - # ordered list - if hasattr(self.model, "inputs") and len(self.model.inputs) > 1: - input_signature_list = [] - for input_layer in self.model.inputs: - input_name = input_layer.name - if input_name not in input_signature: - raise ValueError( - f"Missing input '{input_name}' in input_signature. " - f"Model expects inputs: " - f"{[inp.name for inp in self.model.inputs]}, " - f"but input_signature only has: " - f"{list(input_signature.keys())}" - ) - input_signature_list.append(input_signature[input_name]) - input_signature = input_signature_list - else: - # Single-input model with dict signature - input_signature = [input_signature] - elif not isinstance(input_signature, (list, tuple)): - input_signature = [input_signature] - - # Convert to TensorSpec - tensor_specs = [make_tf_tensor_spec(spec) for spec in input_signature] - - # Get concrete function from the model - @tf.function - def model_fn(*args): - return self.model(*args) - - concrete_func = model_fn.get_concrete_function(*tensor_specs) - - # Convert using concrete function - converter = tf.lite.TFLiteConverter.from_concrete_functions( - [concrete_func], self.model - ) - converter.target_spec.supported_ops = [ - tf.lite.OpsSet.TFLITE_BUILTINS, - tf.lite.OpsSet.SELECT_TF_OPS, - ] - # Keras 3 only supports resource variables - converter.experimental_enable_resource_variables = True - - # Apply any additional converter settings from kwargs - self._apply_converter_kwargs(converter) - - tflite_model = converter.convert() - return tflite_model + except Exception as e: + # If direct conversion fails, raise the error with helpful message + raise RuntimeError( + f"Direct TFLite conversion failed. This may be due to model " + f"complexity or unsupported operations. Error: {e}" + ) from e def _apply_converter_kwargs(self, converter): """Apply additional converter settings from kwargs. From 46ead2fdde2e850fa0ae7b2e68cf8cc3da4ece89 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Tue, 18 Nov 2025 20:00:30 +0530 Subject: [PATCH 112/115] Refactor to use local keras layers and models imports Replaces tf.keras.layers and tf.keras.Model references with locally imported layers and models from keras.src. This improves consistency and may help with modularity or compatibility within the keras.src namespace. --- keras/src/export/litert.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index a90935e2ef93..891a008c22bf 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -1,3 +1,5 @@ +from keras.src import layers +from keras.src import models from keras.src.export.export_utils import get_input_signature from keras.src.export.export_utils import make_input_spec from keras.src.utils import io_utils @@ -171,7 +173,7 @@ def _create_dict_adapter(self, input_signature_dict): input_layers = [] for name in input_keys: spec = input_signature_dict[name] - input_layer = tf.keras.layers.Input( + input_layer = layers.Input( shape=spec.shape[1:], # Remove batch dimension dtype=spec.dtype, name=name, @@ -187,7 +189,7 @@ def _create_dict_adapter(self, input_signature_dict): outputs = self.model(inputs_dict) # Build as Functional model (list inputs -> dict -> model -> output) - adapted_model = tf.keras.Model(inputs=input_layers, outputs=outputs) + adapted_model = models.Model(inputs=input_layers, outputs=outputs) # Preserve the original model's variables adapted_model._variables = self.model.variables From b523552205cfaf19729c0876d73e0e94de1a5767 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 27 Nov 2025 11:17:20 +0530 Subject: [PATCH 113/115] Refactor input signature handling for TFLite export Improves input signature inference and adapter creation for models with nested input structures (dicts, lists, etc.) in LiteRTExporter. Moves TensorSpec creation logic to export_utils and updates TFLayer to use tree.map_structure for save spec generation. Removes legacy dict-specific input signature inference and centralizes input structure handling for TFLite conversion. --- keras/src/backend/tensorflow/layer.py | 36 ++----- keras/src/export/export_utils.py | 24 ++++- keras/src/export/litert.py | 144 ++++++++++++-------------- 3 files changed, 96 insertions(+), 108 deletions(-) diff --git a/keras/src/backend/tensorflow/layer.py b/keras/src/backend/tensorflow/layer.py index 030cb48ef7c7..d1022b96b699 100644 --- a/keras/src/backend/tensorflow/layer.py +++ b/keras/src/backend/tensorflow/layer.py @@ -13,7 +13,6 @@ def __init__(self, *args, **kwargs): self._saved_model_arg_spec = None self._tracked = [] - @tf.__internal__.tracking.no_automatic_dependency_tracking def _set_save_spec(self, inputs, args=None, kwargs=None): """Defines the save spec so that serialization can trace layer calls. @@ -45,6 +44,7 @@ def _set_save_spec(self, inputs, args=None, kwargs=None): kwargs_spec, ) + @tf.__internal__.tracking.no_automatic_dependency_tracking def _trackable_children(self, save_type="checkpoint", **kwargs): if save_type == "savedmodel": # SavedModel needs to ignore the execution functions. @@ -68,13 +68,8 @@ def _trackable_children(self, save_type="checkpoint", **kwargs): return children - @tf.__internal__.tracking.no_automatic_dependency_tracking def _convert_tracked_collections(self, children): - """Convert TrackedList/Dict/Set to plain Python structures. - - The decorator prevents TensorFlow from automatically wrapping - these conversions in _DictWrapper objects. - """ + """Convert TrackedList/Dict/Set to plain Python structures.""" for tracked_attr in self._tracked: tracked_item = getattr(self, tracked_attr) if isinstance(tracked_item, tracking.TrackedList): @@ -103,31 +98,18 @@ def _get_save_spec(self, dynamic_batch=True): try: return super()._get_save_spec(dynamic_batch) except AttributeError: + # Lazy import to avoid circular dependency + from keras.src.export.export_utils import make_tf_tensor_spec + # Fall back to building specs from `self.inputs` inputs = getattr(self, "inputs", None) if inputs is None: return None - def _make_spec(t): - # t is a tf.Tensor-like object - shape = list(t.shape) - if dynamic_batch and len(shape) > 0: - shape[0] = None - # Convert to tuple for TensorSpec - try: - name = getattr(t, "name", None) - return tf.TensorSpec( - shape=tuple(shape), dtype=t.dtype, name=name - ) - except (ImportError, ModuleNotFoundError): - return None - - # Handle dict/list/single tensor inputs - if isinstance(inputs, dict): - return {k: _make_spec(v) for k, v in inputs.items()} - if isinstance(inputs, (list, tuple)): - return [_make_spec(t) for t in inputs] - return _make_spec(inputs) + return tree.map_structure( + lambda x: make_tf_tensor_spec(x, dynamic_batch=dynamic_batch), + inputs, + ) @property def _default_save_signature(self): diff --git a/keras/src/export/export_utils.py b/keras/src/export/export_utils.py index d61868447b0c..0a898bf22420 100644 --- a/keras/src/export/export_utils.py +++ b/keras/src/export/export_utils.py @@ -102,13 +102,33 @@ def make_input_spec(x): return input_spec -def make_tf_tensor_spec(x): +def make_tf_tensor_spec(x, dynamic_batch=True): + """Create a TensorSpec from various input types. + + Args: + x: Input to convert (tf.TensorSpec, KerasTensor, or backend tensor). + dynamic_batch: If True, set the batch dimension to None. + + Returns: + A tf.TensorSpec instance. + """ if isinstance(x, tf.TensorSpec): tensor_spec = x + # Adjust batch dimension if needed + if dynamic_batch and len(tensor_spec.shape) > 0: + shape = list(tensor_spec.shape) + shape[0] = None + tensor_spec = tf.TensorSpec( + tuple(shape), dtype=tensor_spec.dtype, name=tensor_spec.name + ) else: input_spec = make_input_spec(x) + shape = input_spec.shape + # Adjust batch dimension if needed and shape is not None + if dynamic_batch and shape is not None and len(shape) > 0: + shape = tuple(None if i == 0 else s for i, s in enumerate(shape)) tensor_spec = tf.TensorSpec( - input_spec.shape, dtype=input_spec.dtype, name=input_spec.name + shape, dtype=input_spec.dtype, name=input_spec.name ) return tensor_spec diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 891a008c22bf..6c2bc1ce184b 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -1,7 +1,7 @@ from keras.src import layers from keras.src import models +from keras.src import tree from keras.src.export.export_utils import get_input_signature -from keras.src.export.export_utils import make_input_spec from keras.src.utils import io_utils from keras.src.utils.module_utils import tensorflow as tf @@ -58,34 +58,6 @@ def __init__( self.input_signature = input_signature self.kwargs = kwargs - def _infer_dict_input_signature(self): - """Infer input signature from a model with dict inputs. - - This reads the actual shapes and dtypes from model._inputs_struct. - - Returns: - dict or None: Dictionary mapping input names to InputSpec, or None - """ - # Check _inputs_struct first (preserves dict structure) - if hasattr(self.model, "_inputs_struct") and isinstance( - self.model._inputs_struct, dict - ): - return { - name: make_input_spec(inp) - for name, inp in self.model._inputs_struct.items() - } - - # Fall back to model.inputs if it's a dict - if hasattr(self.model, "inputs") and isinstance( - self.model.inputs, dict - ): - return { - name: make_input_spec(inp) - for name, inp in self.model.inputs.items() - } - - return None - def export(self, filepath): """Exports the Keras model to a TFLite file. @@ -97,33 +69,38 @@ def export(self, filepath): """ # 1. Resolve / infer input signature if self.input_signature is None: - # Try dict-specific inference first (for models with dict inputs) - dict_signature = self._infer_dict_input_signature() - if dict_signature is not None: - self.input_signature = dict_signature - else: - # Fall back to standard inference - self.input_signature = get_input_signature(self.model) - - # 3. Handle dictionary inputs by creating an adapter - # Check if we have dict inputs that need adaptation - has_dict_inputs = isinstance(self.input_signature, dict) - - if has_dict_inputs: - # Create adapter model that converts list to dict - adapted_model = self._create_dict_adapter(self.input_signature) + # Use the standard get_input_signature which handles all model types + # and preserves nested structures (dicts, lists, etc.) + self.input_signature = get_input_signature(self.model) + + # 2. Determine input structure and create adapter if needed + # There are 3 cases: + # Case 1: Single input (not nested) + # Case 2: Flat list of inputs (list where flattened == original) + # Case 3: Nested structure (dicts, nested lists, etc.) + + if not tree.is_nested(self.input_signature): + # Case 1: Single input - use as-is + model_to_convert = self.model + signature_for_conversion = self.input_signature + elif isinstance(self.input_signature, list) and len( + self.input_signature + ) == len(tree.flatten(self.input_signature)): + # Case 2: Flat list of inputs - use as-is + model_to_convert = self.model + signature_for_conversion = self.input_signature + else: + # Case 3: Nested structure (dict, nested lists, etc.) + # Create adapter model that converts flat list to nested structure + adapted_model = self._create_nested_inputs_adapter( + self.input_signature + ) - # Convert dict signature to list for TFLite conversion - # The adapter will handle the dict->list conversion - input_signature_list = list(self.input_signature.values()) + # Flatten signature for TFLite conversion + signature_for_conversion = tree.flatten(self.input_signature) - # Use adapted model and list signature for conversion + # Use adapted model and flat list signature for conversion model_to_convert = adapted_model - signature_for_conversion = input_signature_list - else: - # No dict inputs - use model as-is - model_to_convert = self.model - signature_for_conversion = self.input_signature # Store original model reference for later use original_model = self.model @@ -132,17 +109,17 @@ def export(self, filepath): self.model = model_to_convert try: - # 4. Convert the model to TFLite. + # Convert the model to TFLite. tflite_model = self._convert_to_tflite(signature_for_conversion) finally: # Restore original model self.model = original_model - # 4. Save the initial TFLite model to the specified file path. + # Save the TFLite model to the specified file path. if not filepath.endswith(".tflite"): raise ValueError( - "The LiteRT export requires the filepath to end with " - "'.tflite'. Got: {filepath}" + f"The LiteRT export requires the filepath to end with " + f"'.tflite'. Got: {filepath}" ) with open(filepath, "wb") as f: @@ -150,29 +127,37 @@ def export(self, filepath): return filepath - def _create_dict_adapter(self, input_signature_dict): - """Create an adapter model that converts list inputs to dict inputs. + def _create_nested_inputs_adapter(self, input_signature_struct): + """Create an adapter model that converts flat list inputs to nested + structure. - This adapter allows models expecting dictionary inputs to be exported - to TFLite format (which only supports positional/list inputs). + This adapter allows models expecting nested inputs (dicts, lists, etc.) + to be exported to TFLite format (which only supports positional/list + inputs). Args: - input_signature_dict: Dictionary mapping input names to InputSpec + input_signature_struct: Nested structure of InputSpecs (dict, list, + etc.) Returns: - A Functional model that accepts list inputs and converts to dict + A Functional model that accepts flat list inputs and converts to + nested """ - io_utils.print_msg( - f"Creating adapter for dictionary inputs: " - f"{list(input_signature_dict.keys())}" - ) - - input_keys = list(input_signature_dict.keys()) + # Get flat paths to preserve names and print input mapping + paths_and_specs = tree.flatten_with_path(input_signature_struct) + paths = [".".join(str(e) for e in p) for p, v in paths_and_specs] + io_utils.print_msg(f"Creating adapter for inputs: {paths}") - # Create Input layers for TFLite (list-based) + # Create Input layers for TFLite (flat list-based) input_layers = [] - for name in input_keys: - spec = input_signature_dict[name] + for path, spec in paths_and_specs: + # Extract the input name from the path + if path: + # path is a tuple of keys, use the last one as the name + name = str(path[-1]) + else: + name = "input" + input_layer = layers.Input( shape=spec.shape[1:], # Remove batch dimension dtype=spec.dtype, @@ -180,15 +165,16 @@ def _create_dict_adapter(self, input_signature_dict): ) input_layers.append(input_layer) - # Create dict from list inputs - inputs_dict = { - name: layer for name, layer in zip(input_keys, input_layers) - } + # Reconstruct the nested structure from flat list + inputs_structure = tree.pack_sequence_as( + input_signature_struct, input_layers + ) - # Call the original model with dict inputs - outputs = self.model(inputs_dict) + # Call the original model with nested inputs + outputs = self.model(inputs_structure) - # Build as Functional model (list inputs -> dict -> model -> output) + # Build as Functional model (flat list inputs -> nested -> model -> + # output) adapted_model = models.Model(inputs=input_layers, outputs=outputs) # Preserve the original model's variables From ada71deb68689f6e9a02ded116afc7b1f8affd23 Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 27 Nov 2025 11:20:50 +0530 Subject: [PATCH 114/115] Add ai-edge-litert to requirements.txt Included the ai-edge-litert package in requirements.txt to support new functionality or dependencies. --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 926a3ec883d2..7167e61174e8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,7 @@ tensorflow-cpu~=2.18.1;sys_platform != 'darwin' tensorflow~=2.18.1;sys_platform == 'darwin' tf2onnx +ai-edge-litert # Torch. --extra-index-url https://download.pytorch.org/whl/cpu From 00088c99a8031ff07405ebf23fb1b728a5de51de Mon Sep 17 00:00:00 2001 From: RAHUL KUMAR Date: Thu, 27 Nov 2025 12:50:26 +0530 Subject: [PATCH 115/115] Fix input signature handling in LiteRTExporter Improves analysis of input signatures by unwrapping single-element lists for Functional models and consistently using the correct structure for input handling. Also updates input layer naming to prefer spec.name when available, ensuring more accurate input identification. --- keras/src/export/litert.py | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/keras/src/export/litert.py b/keras/src/export/litert.py index 6c2bc1ce184b..27788172ff78 100644 --- a/keras/src/export/litert.py +++ b/keras/src/export/litert.py @@ -79,25 +79,32 @@ def export(self, filepath): # Case 2: Flat list of inputs (list where flattened == original) # Case 3: Nested structure (dicts, nested lists, etc.) - if not tree.is_nested(self.input_signature): + # Special handling for Functional models: get_input_signature wraps + # the structure in a list, so unwrap it for analysis + input_struct = self.input_signature + if ( + isinstance(self.input_signature, list) + and len(self.input_signature) == 1 + ): + input_struct = self.input_signature[0] + + if not tree.is_nested(input_struct): # Case 1: Single input - use as-is model_to_convert = self.model signature_for_conversion = self.input_signature - elif isinstance(self.input_signature, list) and len( - self.input_signature - ) == len(tree.flatten(self.input_signature)): + elif isinstance(input_struct, list) and len(input_struct) == len( + tree.flatten(input_struct) + ): # Case 2: Flat list of inputs - use as-is model_to_convert = self.model signature_for_conversion = self.input_signature else: # Case 3: Nested structure (dict, nested lists, etc.) # Create adapter model that converts flat list to nested structure - adapted_model = self._create_nested_inputs_adapter( - self.input_signature - ) + adapted_model = self._create_nested_inputs_adapter(input_struct) # Flatten signature for TFLite conversion - signature_for_conversion = tree.flatten(self.input_signature) + signature_for_conversion = tree.flatten(input_struct) # Use adapted model and flat list signature for conversion model_to_convert = adapted_model @@ -151,12 +158,12 @@ def _create_nested_inputs_adapter(self, input_signature_struct): # Create Input layers for TFLite (flat list-based) input_layers = [] for path, spec in paths_and_specs: - # Extract the input name from the path - if path: - # path is a tuple of keys, use the last one as the name - name = str(path[-1]) - else: - name = "input" + # Extract the input name from spec or path + name = ( + spec.name + if hasattr(spec, "name") and spec.name + else (str(path[-1]) if path else "input") + ) input_layer = layers.Input( shape=spec.shape[1:], # Remove batch dimension