Data Science Asked by Agostino Dorano on January 25, 2021
I’ve wrapped meta-architecture with the code below:
num_classes = 1
model_conf = 'models/research/object_detection/configs/tf2/faster_rcnn_resnet101_v1_640x640_coco17_tpu-8.config'
configs = config_util.get_configs_from_pipeline_file(model_conf)
model_config = configs['model']
model_config.faster_rcnn.num_classes = num_classes
model_config.faster_rcnn.feature_extractor.batch_norm_trainable = False
model_config.faster_rcnn.number_of_stages = 2
model_config.faster_rcnn.second_stage_post_processing.batch_non_max_suppression.max_detections_per_class=150
class MyDetectionModel(tf.keras.Model): # ADAPTER
def __init__(self, model):
super(MyDetectionModel, self).__init__()
self.detection_model = model
#self.detection_model.build(x,y,z)
#image, shapes = self.detection_model.preprocess(tf.concat([tf.zeros([1, 640, 640, 3]),tf.zeros([1, 640, 640, 3]) ],axis=0))
#prediction_dict = self.detection_model.predict(image, shapes)
#_ = self.detection_model.postprocess(prediction_dict, shapes)
def select_variable_to_fine_tune(self):
# TO-DO
return None
def print_training_variable(self):
print(list(self.to_fine_tune))
def train_step(self, data):
image_tensors, gt = data[0], data[1]
groundtruth_boxes_list,groundtruth_classes_list = gt[0], gt[1]
shapes = tf.constant(len(image_tensors) * [[640, 640, 3]], dtype=tf.int32) # risolve il problema della dimensione del batch
# GROUDTRUTH
self.detection_model.provide_groundtruth(
groundtruth_boxes_list=groundtruth_boxes_list,
groundtruth_classes_list=groundtruth_classes_list,)
### debug
#print(image_tensors[0][0])
###
with tf.GradientTape() as tape:
preprocessed_images = tf.concat([self.detection_model.preprocess(image_tensor)[0] for image_tensor in image_tensors], axis=0)
#print(preprocessed_images)
prediction_dict = self.detection_model.predict(preprocessed_images, shapes) # differenza con i model tradizionali: qui uso predict e richiede shape
losses_dict = self.detection_model.loss(prediction_dict, shapes)
total_loss = losses_dict['Loss/RPNLoss/localization_loss'] + losses_dict['Loss/RPNLoss/objectness_loss'] + losses_dict['Loss/BoxClassifierLoss/localization_loss'] + losses_dict['Loss/BoxClassifierLoss/classification_loss']
gradients = tape.gradient(total_loss, self.detection_model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.detection_model.trainable_variables))
tl = total_loss
return{
"total_loss": tl ,
"Loss/RPNLoss/localization_loss":losses_dict['Loss/RPNLoss/localization_loss'] ,
"Loss/RPNLoss/objectness_loss":losses_dict['Loss/RPNLoss/objectness_loss'],
"Loss/BoxClassifierLoss/localization_loss":losses_dict['Loss/BoxClassifierLoss/localization_loss'],
"Loss/BoxClassifierLoss/classification_loss":losses_dict['Loss/BoxClassifierLoss/classification_loss']
}
def test_step(self, data):
### TO DO
return None
and I train it with the following code:
my_model = MyDetectionModel(detection_model)
optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
my_model.compile(optimizer)
my_model.fit(
x = train_image_tensors,
y = (gt_box_tensors,gt_classes_one_hot_tensors),
epochs = 150,
batch_size = BATCH_SIZE,
#verbose = 1,
)
The training goes well, with the losses that go down, but at test stage the preprocess function give the following error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-54-9b4b502bec7a> in <module>()
34 a = np.resize(test_images_np[i],(1,1000,1000,3))
35 input_tensor = tf.convert_to_tensor(a, dtype=tf.float32)
---> 36 detections = detect(input_tensor)
37 #print(detections['detection_boxes'][0].numpy(),detections['detection_classes'][0].numpy().astype(np.uint32)+label_id_offset, detections['detection_scores'][0].numpy() )
38 print(detections['detection_scores'][0].numpy() )
8 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/def_function.py in __call__(self, *args, **kwds)
778 else:
779 compiler = "nonXla"
--> 780 result = self._call(*args, **kwds)
781
782 new_tracing_count = self._get_tracing_count()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
821 # This is the first call of __call__, so we have to initialize.
822 initializers = []
--> 823 self._initialize(args, kwds, add_initializers_to=initializers)
824 finally:
825 # At this point we know that the initialization is complete (or less
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
695 self._concrete_stateful_fn = (
696 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 697 *args, **kwds))
698
699 def invalid_creator_scope(*unused_args, **unused_kwds):
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2853 args, kwargs = None, None
2854 with self._lock:
-> 2855 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2856 return graph_function
2857
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3211
3212 self._function_cache.missed.add(call_context_key)
-> 3213 graph_function = self._create_graph_function(args, kwargs)
3214 self._function_cache.primary[cache_key] = graph_function
3215 return graph_function, args, kwargs
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3073 arg_names=arg_names,
3074 override_flat_arg_shapes=override_flat_arg_shapes,
-> 3075 capture_by_value=self._capture_by_value),
3076 self._function_attributes,
3077 function_spec=self.function_spec,
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
984 _, original_func = tf_decorator.unwrap(python_func)
985
--> 986 func_outputs = python_func(*func_args, **func_kwargs)
987
988 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
598 # __wrapped__ allows AutoGraph to swap in a converted function. We give
599 # the function a weak reference to itself to avoid a reference cycle.
--> 600 return weak_wrapped_fn().__wrapped__(*args, **kwds)
601 weak_wrapped_fn = weakref.ref(wrapped_fn)
602
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
971 except Exception as e: # pylint:disable=broad-except
972 if hasattr(e, "ag_error_metadata"):
--> 973 raise e.ag_error_metadata.to_exception(e)
974 else:
975 raise
ValueError: in user code:
<ipython-input-50-08bee00ed330>:25 detect *
prediction_dict = my_model.detection_model.predict(preprocessed_image, shapes)
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py:818 predict *
prediction_dict.update(
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py:991 _predict_second_stage *
proposal_boxes_normalized, num_proposals = self._proposal_postprocess(
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py:721 _proposal_postprocess *
proposal_boxes_normalized, _, _, num_proposals, _, _ = self._postprocess_rpn(
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py:1694 _postprocess_rpn *
(groundtruth_boxlists, groundtruth_classes_with_background_list, _,
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py:1829 _format_groundtruth_data *
groundtruth_boxlists = [
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/array_ops.py:1024 _slice_helper
name=name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py:201 wrapper
return target(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/array_ops.py:1196 strided_slice
shrink_axis_mask=shrink_axis_mask)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_array_ops.py:10352 strided_slice
shrink_axis_mask=shrink_axis_mask, name=name)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:744 _apply_op_helper
attrs=attr_protos, op_def=op_def)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/func_graph.py:593 _create_op_internal
compute_device)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py:3485 _create_op_internal
op_def=op_def)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py:1975 __init__
control_input_ops, op_def)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py:1815 _create_c_op
raise ValueError(str(e))
ValueError: slice index 1 of dimension 0 out of bounds. for '{{node strided_slice_11}} = StridedSlice[Index=DT_INT32, T=DT_INT32, begin_mask=0, ellipsis_mask=0, end_mask=0, new_axis_mask=0, shrink_axis_mask=3](Tile, strided_slice_11/stack, strided_slice_11/stack_1, strided_slice_11/stack_2)' with input shapes: [1,3], [2], [2], [2] and with computed input tensors: input[1] = <1 0>, input[2] = <2 1>, input[3] = <1 1>.
the code used for predeticting is:
test_image_dir = 'models/research/object_detection/test_images/ducky/test/'
test_images_np = []
for i in range(1, 50):
image_path = os.path.join(test_image_dir, 'out' + str(i) + '.jpg')
test_images_np.append(np.expand_dims(
load_image_into_numpy_array(image_path), axis=0))
@tf.function
def detect(input_tensor):
"""Run detection on an input image.
Args:
input_tensor: A [1, height, width, 3] Tensor of type tf.float32.
Note that height and width can be anything since the image will be
immediately resized according to the needs of the model within this
function.
Returns:
A dict containing 3 Tensors (`detection_boxes`, `detection_classes`,
and `detection_scores`).
"""
preprocessed_image, shapes = my_model.detection_model.preprocess(input_tensor)
prediction_dict = my_model.detection_model.predict(preprocessed_image, shapes)
return my_model.detection_model.postprocess(prediction_dict, shapes)
# Note that the first frame will trigger tracing of the tf.function, which will
# take some time, after which inference should be fast.
label_id_offset = 1
for i in range(len(test_images_np)):
print(i)
a = np.resize(test_images_np[i],(1,1000,1000,3))
input_tensor = tf.convert_to_tensor(a, dtype=tf.float32)
detections = detect(input_tensor)
#print(detections['detection_boxes'][0].numpy(),detections['detection_classes'][0].numpy().astype(np.uint32)+label_id_offset, detections['detection_scores'][0].numpy() )
print(detections['detection_scores'][0].numpy() )
plot_detections(
test_images_np[i][0],
detections['detection_boxes'][0].numpy(),
detections['detection_classes'][0].numpy().astype(np.uint32)
+ label_id_offset,
detections['detection_scores'][0].numpy(),
category_index, figsize=(15, 20),image_name="gif_frame_" + ('%02d' % i) + ".jpg")
The strange thing is that the same function that give me an error at prediction stage, works properly in train_step
method.
Removing the tf.function annotation the error became:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-23-f3417ad28e97> in <module>()
33 a = np.resize(test_images_np[i],(1,1000,1000,3))
34 input_tensor = tf.convert_to_tensor(a, dtype=tf.float32)
---> 35 detections = detect(input_tensor)
36 #print(detections['detection_boxes'][0].numpy(),detections['detection_classes'][0].numpy().astype(np.uint32)+label_id_offset, detections['detection_scores'][0].numpy() )
37 print(detections['detection_scores'][0].numpy() )
17 frames
<ipython-input-23-f3417ad28e97> in detect(input_tensor)
22 """
23 preprocessed_image, shapes = my_model.detection_model.preprocess(input_tensor)
---> 24 prediction_dict = my_model.detection_model.predict(preprocessed_image, shapes)
25 return my_model.detection_model.postprocess(prediction_dict, shapes)
26
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py in predict(self, preprocessed_inputs, true_image_shapes, **side_inputs)
822 prediction_dict['rpn_features_to_crop'],
823 prediction_dict['anchors'], prediction_dict['image_shape'],
--> 824 true_image_shapes, **side_inputs))
825
826 if self._number_of_stages == 3:
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py in _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors, image_shape, true_image_shapes, **side_inputs)
991 proposal_boxes_normalized, num_proposals = self._proposal_postprocess(
992 rpn_box_encodings, rpn_objectness_predictions_with_background, anchors,
--> 993 image_shape, true_image_shapes)
994 prediction_dict = self._box_prediction(rpn_features_to_crop,
995 proposal_boxes_normalized,
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py in _proposal_postprocess(self, rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape, true_image_shapes)
722 self._postprocess_rpn(
723 rpn_box_encodings, rpn_objectness_predictions_with_background,
--> 724 anchors, image_shape_2d, true_image_shapes)
725 return proposal_boxes_normalized, num_proposals
726
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py in _postprocess_rpn(self, rpn_box_encodings_batch, rpn_objectness_predictions_with_background_batch, anchors, image_shapes, true_image_shapes)
1694 (groundtruth_boxlists, groundtruth_classes_with_background_list, _,
1695 groundtruth_weights_list
-> 1696 ) = self._format_groundtruth_data(image_shapes)
1697 (proposal_boxes, proposal_scores,
1698 num_proposals) = self._sample_box_classifier_batch(
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py in _format_groundtruth_data(self, image_shapes)
1831 box_list.BoxList(boxes), image_shapes[i, 0], image_shapes[i, 1])
1832 for i, boxes in enumerate(
-> 1833 self.groundtruth_lists(fields.BoxListFields.boxes))
1834 ]
1835 groundtruth_classes_with_background_list = []
/usr/local/lib/python3.6/dist-packages/object_detection/meta_architectures/faster_rcnn_meta_arch.py in <listcomp>(.0)
1830 box_list_ops.to_absolute_coordinates(
1831 box_list.BoxList(boxes), image_shapes[i, 0], image_shapes[i, 1])
-> 1832 for i, boxes in enumerate(
1833 self.groundtruth_lists(fields.BoxListFields.boxes))
1834 ]
/usr/local/lib/python3.6/dist-packages/object_detection/core/box_list_ops.py in to_absolute_coordinates(boxlist, height, width, check_range, maximum_normalized_coordinate, scope)
910 # Ensure range of input boxes is correct.
911 if check_range:
--> 912 box_maximum = tf.reduce_max(boxlist.get())
913 max_assert = tf.Assert(
914 tf.greater_equal(maximum_normalized_coordinate, box_maximum),
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
199 """Call target, and fall back on dispatchers if there is a TypeError."""
200 try:
--> 201 return target(*args, **kwargs)
202 except (TypeError, ValueError):
203 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/deprecation.py in new_func(*args, **kwargs)
505 'in a future version' if date is None else ('after %s' % date),
506 instructions)
--> 507 return func(*args, **kwargs)
508
509 doc = _add_deprecated_arg_notice_to_docstring(
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py in reduce_max_v1(input_tensor, axis, keepdims, name, reduction_indices, keep_dims)
2636 keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2637 "keep_dims", keep_dims)
-> 2638 return reduce_max(input_tensor, axis, keepdims, name)
2639
2640
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
199 """Call target, and fall back on dispatchers if there is a TypeError."""
200 try:
--> 201 return target(*args, **kwargs)
202 except (TypeError, ValueError):
203 # Note: convert_to_eager_tensor currently raises a ValueError, not a
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py in reduce_max(input_tensor, axis, keepdims, name)
2684 """
2685 return reduce_max_with_dims(input_tensor, axis, keepdims, name,
-> 2686 _ReductionDims(input_tensor, axis))
2687
2688
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py in reduce_max_with_dims(input_tensor, axis, keepdims, name, dims)
2695 return _may_reduce_to_scalar(
2696 keepdims, axis,
-> 2697 gen_math_ops._max(input_tensor, dims, keepdims, name=name))
2698
2699
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_math_ops.py in _max(input, axis, keep_dims, name)
5710 try:
5711 return _max_eager_fallback(
-> 5712 input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
5713 except _core._SymbolicException:
5714 pass # Add nodes to the TensorFlow graph.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_math_ops.py in _max_eager_fallback(input, axis, keep_dims, name, ctx)
5742 _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
5743 _result = _execute.execute(b"Max", 1, inputs=_inputs_flat, attrs=_attrs,
-> 5744 ctx=ctx, name=name)
5745 if _execute.must_record_gradient():
5746 _execute.record_gradient(
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
73 "Inputs to eager execution function cannot be Keras symbolic "
74 "tensors, but found {}".format(keras_symbolic_tensors))
---> 75 raise e
76 # pylint: enable=protected-access
77 return tensors
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
TypeError: An op outside of the function building code is being passed
a "Graph" tensor. It is possible to have Graph tensors
leak out of the function building context by including a
tf.init_scope in your function building code.
For example, the following function will fail:
@tf.function
def has_init_scope():
my_constant = tf.constant(1.)
with tf.init_scope():
added = my_constant * 2
The graph tensor has name: IteratorGetNext:5
```
Get help from others!
Recent Questions
Recent Answers
© 2024 TransWikia.com. All rights reserved. Sites we Love: PCI Database, UKBizDB, Menu Kuliner, Sharing RPP