Open cpoptic opened 5 years ago
Try using custom_callbacks. Counting accuracy takes a lot of time, so don't use it every epoch.
mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(model,\ model_inference, dataset_val, calculate_map_at_every_X_epoch=5, verbose=1)
model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=100, layers='heads', custom_callbacks=[mean_average_precision_callback])
also add this somewhere in the model.py, for example at the end of the file
############################################################
# Custom Callbacks
############################################################
class MeanAveragePrecisionCallback(Callback):
def __init__(self, train_model: MaskRCNN, inference_model: MaskRCNN, dataset: Dataset,
calculate_map_at_every_X_epoch=5, dataset_limit=None,
verbose=1):
super().__init__()
self.train_model = train_model
self.inference_model = inference_model
self.dataset = dataset
self.calculate_map_at_every_X_epoch = calculate_map_at_every_X_epoch
self.dataset_limit = len(self.dataset.image_ids)
if dataset_limit is not None:
self.dataset_limit = dataset_limit
self.dataset_image_ids = self.dataset.image_ids.copy()
if inference_model.config.BATCH_SIZE != 1:
raise ValueError("This callback only works with the bacth size of 1")
self._verbose_print = print if verbose > 0 else lambda *a, **k: None
def on_epoch_end(self, epoch, logs=None):
if epoch > 2 and (epoch+1)%self.calculate_map_at_every_X_epoch == 0:
self._verbose_print("Calculating mAP...")
self._load_weights_for_model()
mAPs = self._calculate_mean_average_precision()
mAP = np.mean(mAPs)
if logs is not None:
logs["val_mean_average_precision"] = mAP
self._verbose_print("mAP at epoch {0} is: {1}".format(epoch+1, mAP))
super().on_epoch_end(epoch, logs)
def _load_weights_for_model(self):
last_weights_path = self.train_model.find_last()
self._verbose_print("Loaded weights for the inference model (last checkpoint of the train model): {0}".format(
last_weights_path))
self.inference_model.load_weights(last_weights_path,
by_name=True)
def _calculate_mean_average_precision(self):
mAPs = []
# Use a random subset of the data when a limit is defined
np.random.shuffle(self.dataset_image_ids)
for image_id in self.dataset_image_ids[:self.dataset_limit]:
image, image_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(self.dataset, self.inference_model.config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(mold_image(image, self.inference_model.config), 0)
results = self.inference_model.detect(molded_images, verbose=0)
r = results[0]
# Compute mAP - VOC uses IoU 0.5
AP, _, _, _ = utils.compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"],
r["class_ids"], r["scores"], r['masks'])
mAPs.append(AP)
return np.array(mAPs)
Hello guys, thanks for sharing the code to have accuracy metrics. I try to implement the code but I have errors :( I pasted the class "MeanAveragePrecisionCallbacks" at the end of the model.py code.
Then, in train.py I defined the variable like you did @VtlNmnk:
mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(model, model_inference, dataset_val, calculate_map_at_every_X_epoch=5, verbose=1)
But when I launch my training, I have this error:
Traceback (most recent call last): File "Taraudage.py", line 372, in <module> train(model) File "Taraudage.py", line 196, in train mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(train_model=model, inference_model=model_inference, NameError: name 'model_inference' is not defined
What variable do you expect for model inference ?
Thanks for you time, Regards, Antoine
Then, in train.py I defined the variable like you did @VtlNmnk:
mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(model, model_inference, dataset_val, calculate_map_at_every_X_epoch=5, verbose=1)
I did not write to you to change anything in the train.py ) This line needs to be added to the script from which you access the train.py. Personally, I use google collab from guys, but with many of my changes.
Here is an example of how this works for me
Hi,
I am sorry but I am trying to implement your code but the dataset: Dataset in the def init part when defining the callback comes up as "Dataset not defined".
Thank you for any suggestions :)
Hi @ben975,
You can use below class to calculate MAP, precision, recall for each image
from mrcnn.utils import compute_ap
class EvalImage():
def __init__(self,dataset,model,cfg):
self.dataset = dataset
self.model = model
self.cfg = cfg
def evaluate_model(self , len = 50):
APs = list()
precisions_dict = {}
recall_dict = {}
for index,image_id in enumerate(self.dataset.image_ids):
if(index > len):
break;
# load image, bounding boxes and masks for the image id
image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(self.dataset, self.cfg,image_id, use_mini_mask=False)
# convert pixel values (e.g. center)
#scaled_image = modellib.mold_image(image, self.cfg)
# convert image into one sample
sample = np.expand_dims(image, 0)
# print(len(image))
# make prediction
yhat = self.model.detect(sample, verbose=1)
# extract results for first sample
r = yhat[0]
# calculate statistics, including AP
AP, precisions, recalls, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks'])
precisions_dict[image_id] = np.mean(precisions)
recall_dict[image_id] = np.mean(recalls)
# store
APs.append(AP)
# calculate the mean AP across all images
mAP = np.mean(APs)
return mAP,precisions_dict,recall_dict
Usage :
prepare data set:
dataset_val = Dataset()
dataset_val.load_data_set("dataset name","val_data")
dataset_val.prepare()
Create object of config and load model
config = DataConfig()
eval = EvalImage(dataset_val,model,config)
evel.evaluate_model()
you will get the results
Wow thank for the really fast reply! I am currently training but it will be done soon and will have a go! I have searched ages for this question to be answered. Will report back shortly! :)
Hi @ben975,
You can use below class to calculate MAP, precision, recall for each image
class EvalImage(): def __init__(self,dataset,model,cfg): self.dataset = dataset self.model = model self.cfg = cfg def evaluate_model(self , len = 50): APs = list() precisions_dict = {} recall_dict = {} for index,image_id in enumerate(self.dataset.image_ids): if(index > len): break; # load image, bounding boxes and masks for the image id image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(self.dataset, self.cfg,image_id, use_mini_mask=False) # convert pixel values (e.g. center) #scaled_image = modellib.mold_image(image, self.cfg) # convert image into one sample sample = np.expand_dims(image, 0) # print(len(image)) # make prediction yhat = self.model.detect(sample, verbose=1) # extract results for first sample r = yhat[0] # calculate statistics, including AP AP, precisions, recalls, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks']) precisions_dict[image_id] = np.mean(precisions) recall_dict[image_id] = np.mean(recalls) # store APs.append(AP) # calculate the mean AP across all images mAP = np.mean(APs) return mAP,precisions_dict,recall_dict
Usage :
prepare data set:
dataset_val = Dataset() dataset_val.load_data_set("onion","val_data") dataset_val.prepare()
Create object of config and load model
config = DataConfig() eval = EvalImage(dataset_val,model,config)
evel.evaluate_model()
you will get the results
Hi @mhtarora39 im sorry but when I run it I seem to get:
File "
File "
NameError: name 'compute_ap' is not defined
Thanks again for your help I am new to mask rcnns
Sorry I didn't mentioned please import compute_ap as "from mrcnn.utils import compute_ap". I am also updating above code with import let me know if you encounter any other issue i will update code accordingly
seems to be running good thank you so much!
Here is an example of how this works for me
Hi @VtlNmnk I have made the changes that were mentioned by you and was able to run the code without any errors, but when I start training mAP is not getting printed. Training command: python custom.py train --dataset =customImages --weights=coco
Please find the output:
Hi, @hardikmanek! Can you show the code how you initialize the callback? Without additional information, I can’t help you.
Hi @VtlNmnk Please find the attached screenshot.
I am passing the training command as a command-line argument
This is the initial code of the function copied in the last part of model.py
By the way, the complete execution command for the program is: python custom.py train --dataset=customImages --weights=coco
I got the mAP value after 9th Epoch which is 0.3, not sure what's wrong
Thank you so much.
Can anyone in this thread explain how to get the loss output logging that we see in hardikmanik's screenshot? I'm talking about the various losses logged to stdout - I'm only seeing the total loss and none of the rest (like rpn_class_loss, rpn_box_loss, etc..).
hi,@VtlNmnk .i used your code but have the error
Traceback (most recent call last): File "D:/study/Mask_RCNN-2.0/train_test.py", line 16, in <module> import model as modellib File "D:\study\Mask_RCNN-2.0\model.py", line 2769, in <module> import train_test File "D:\study\Mask_RCNN-2.0\train_test.py", line 225, in <module> mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(model, AttributeError: module 'model' has no attribute 'MeanAveragePrecisionCallback'
model.py: `from keras.callbacks import Callback import train_test Dataset=train_test.DrugDataset() class MeanAveragePrecisionCallback(Callback): def init(self, train_model: MaskRCNN, inference_model=MaskRCNN, dataset=Dataset, calculate_map_at_every_X_epoch=1, dataset_limit=None, verbose=1): super().init() self.train_model = train_model self.inference_model = inference_model self.dataset = dataset self.calculate_map_at_every_X_epoch = calculate_map_at_every_X_epoch self.dataset_limit = len(self.dataset.image_ids) if dataset_limit is not None: self.dataset_limit = dataset_limit self.dataset_image_ids = self.dataset.image_ids.copy()
if inference_model.config.BATCH_SIZE != 1:
raise ValueError("This callback only works with the bacth size of 1")
self._verbose_print = print if verbose > 0 else lambda *a, **k: None
def on_epoch_end(self, epoch, logs=None):
if epoch > 2 and (epoch+1)%self.calculate_map_at_every_X_epoch == 0:
self._verbose_print("Calculating mAP...")
self._load_weights_for_model()
mAPs = self._calculate_mean_average_precision()
mAP = np.mean(mAPs)
if logs is not None:
logs["val_mean_average_precision"] = mAP
self._verbose_print("mAP at epoch {0} is: {1}".format(epoch+1, mAP))
super().on_epoch_end(epoch, logs)
def _load_weights_for_model(self):
last_weights_path = self.train_model.find_last()
self._verbose_print("Loaded weights for the inference model (last checkpoint of the train model): {0}".format(
last_weights_path))
self.inference_model.load_weights(last_weights_path,
by_name=True)
def _calculate_mean_average_precision(self):
mAPs = []
# Use a random subset of the data when a limit is defined
np.random.shuffle(self.dataset_image_ids)
for image_id in self.dataset_image_ids[:self.dataset_limit]:
image, image_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(self.dataset, self.inference_model.config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(mold_image(image, self.inference_model.config), 0)
results = self.inference_model.detect(molded_images, verbose=0)
r = results[0]
# Compute mAP - VOC uses IoU 0.5
AP, _, _, _ = utils.compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"],
r["class_ids"], r["scores"], r['masks'])
mAPs.append(AP)
return np.array(mAPs)`
my train.py:
model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR) model_inference = modellib.MaskRCNN(mode="inference", config=config, model_dir=MODEL_DIR) mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(model, model_inference, dataset=dataset_val, calculate_map_at_every_X_epoch=1, verbose=1)
model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=1, layers='heads', custom_callbacks=[mean_average_precision_callback])
thanks a lot!!
how to get the loss output logging that we see in hardikmanik's screenshot?
set verbose = 1
from keras.callbacks import Callback import train_test Dataset=train_test.DrugDataset() class MeanAveragePrecisionCallback(Callback): def init(self, train_model: MaskRCNN, inference_model=MaskRCNN, dataset=Dataset, calculate_map_at_every_X_epoch=1, dataset_limit=None, verbose=1):
you showed your code in the model.py that the markup is not visible ) I think either invalid path to the model.py file is specified, or the wrong indentation in the model.py file itself.
I got the mAP value after 9th Epoch which is 0.3, not sure what's wrong
Sorry for the long reply. Well, the mAP was calculated, but not after the third epoch. Perhaps there were no saved models to count it earlier? Check how often your models are saved.
I customized the "https://github.com/matterport/Mask_RCNN.git" repository to train with my own data set, for object detection, ignoring the mask segmentation part. Now I am evaluating my results, I can calculate the MAP, but I cannot calculate the F1 score. I have this function: compute_ap, from "https://github.com/matterport/Mask_RCNN/blob/master/mrcnn/utils.py" which returns the "mAP, details, memories, overlays" for each image. The point is that I cannot apply the F1 score formula, because the variables "precision" and "recalls" are lists.
def compute_ap(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5):
# Get matches and overlaps
gt_match, pred_match, overlaps = compute_matches(
gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold)
# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)
# Pad with start and end values to simplify the math
precisions = np.concatenate([[0], precisions, [0]])
recalls = np.concatenate([[0], recalls, [1]])
# Ensure precision values decrease but don't increase. This way, the
# precision value at each recall threshold is the maximum it can be
# for all following recall thresholds, as specified by the VOC paper.
for i in range(len(precisions) - 2, -1, -1):
precisions[i] = np.maximum(precisions[i], precisions[i + 1])
# Compute mean AP over recall range
indices = np.where(recalls[:-1] != recalls[1:])[0] + 1
mAP = np.sum((recalls[indices] - recalls[indices - 1]) *
precisions[indices])
return mAP, precisions, recalls, overlaps
Can anyone in this thread explain how to get the loss output logging that we see in hardikmanik's screenshot? I'm talking about the various losses logged to stdout - I'm only seeing the total loss and none of the rest (like rpn_class_loss, rpn_box_loss, etc..).
I have tried all the solutions I'm facing the same issue ...... any solution?
Hi @VtlNmnk Please find the attached screenshot.
I am passing the training command as a command-line argument
This is the initial code of the function copied in the last part of model.py
By the way, the complete execution command for the program is: python custom.py train --dataset=customImages --weights=coco
I got the mAP value after 9th Epoch which is 0.3, not sure what's wrong
Thank you so much.
Hello, I have tried this and I'm getting mAP value as 0 after every 3 epochs.... any suggestion why is the value of mAP zero????
@rupa1118 I couldn't get the loss outputs. At this point I'm suspecting it might have something to do with the tensorflow/keras version you're using - maybe some older version prints these values? However, I'm not going to change the version just for printing the loss. You could also try writing a custom callback to print these numbers, I guess (I haven't tried that - instead I put some tf.Print lines in the code to see the losses for debugging (which is ugly, but was fast)).
@rupa1118 Does the balloon example work for you? I would try to first add the mAP to a known working example, and add the changes one at a time. Perhaps you have something wrong with the masks, as your val_loss also looks strange.
Hello, I'm trying to reproduce your @VtlNmnk VtlNmnk code. When I start training I get the error:
Using TensorFlow backend.
Traceback (most recent call last):
File "/content/Mask_RCNN/samples/metal_blanco/metal_blanco.py", line 45, in
This is my model.py file
############################################################
############################################################
from keras.callbacks import Callback
class MeanAveragePrecisionCallback(Callback): def init(self, train_model: MaskRCNN, inference_model: MaskRCNN, dataset: Dataset, calculate_map_at_every_X_epoch=3, dataset_limit=None, verbose=1): super().init() self.train_model = train_model self.inference_model = inference_model self.dataset = dataset
############################################################ This is my file that starts training: ############################################################ def train(model): """Train the model."""
dataset_train = metal_blancoDataset()
dataset_train.load_metal_blanco(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = metal_blancoDataset()
dataset_val.load_metal_blanco(args.dataset, "val")
dataset_val.prepare()
model_inference = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(model,
model_inference, dataset_val, calculate_map_at_every_X_epoch=3, verbose=1)
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='heads',
custom_callbacks=[mean_average_precision_callback])
############################################################
Any help will be appreciated
Hello, I'm trying to reproduce your @VtlNmnk VtlNmnk code. When I start training I get the error:
Using TensorFlow backend. Traceback (most recent call last): File "/content/Mask_RCNN/samples/metal_blanco/metal_blanco.py", line 45, in from mrcnn import model as modellib, utils File "", line 971, in _find_and_load File "", line 955, in _find_and_load_unlocked File "", line 656, in _load_unlocked File "", line 626, in _load_backward_compatible File "/usr/local/lib/python3.6/dist-packages/mask_rcnn-2.1-py3.6.egg/mrcnn/model.py", line 2876, in NameError: name 'Callback' is not defined
This is my model.py file
############################################################
Custom Callbacks
############################################################
from keras.callbacks import Callback
class MeanAveragePrecisionCallback(Callback): def init(self, train_model: MaskRCNN, inference_model: MaskRCNN, dataset: Dataset, calculate_map_at_every_X_epoch=3, dataset_limit=None, verbose=1): super().init() self.train_model = train_model self.inference_model = inference_model self.dataset = dataset
############################################################ This is my file that starts training: ############################################################ def train(model): """Train the model."""
Training dataset.
dataset_train = metal_blancoDataset() dataset_train.load_metal_blanco(args.dataset, "train") dataset_train.prepare()
# Validation dataset dataset_val = metal_blancoDataset() dataset_val.load_metal_blanco(args.dataset, "val") dataset_val.prepare() model_inference = modellib.MaskRCNN(mode="inference", config=config, model_dir=args.logs) mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(model, model_inference, dataset_val, calculate_map_at_every_X_epoch=3, verbose=1) # *** This training schedule is an example. Update to your needs *** # Since we're using a very small dataset, and starting from # COCO trained weights, we don't need to train too long. Also, # no need to train all layers, just the heads should do it. print("Training network heads") model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=30, layers='heads', custom_callbacks=[mean_average_precision_callback])
############################################################
Any help will be appreciated
Hi @yoya93,Try checking train function in model.py and if custom_callbacks = None then please remove that None as this function will be accepting callback as parameter.
Hello @VtlNmnk thank you very much for your response. I am currently facing another problem. Training throws me this error:
For this reason configure my config.py like this:
class Config(object): """Base configuration class. For custom configurations, create a sub-class that inherits from this one and override properties that need to be changed. """
# Useful if your code needs to do things differently depending on which
# experiment is running.
NAME = None # Override in sub-classes
# NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
GPU_COUNT = 1
# Number of images to train with on each GPU. A 12GB GPU can typically
# handle 2 images of 1024x1024px.
# Adjust based on your GPU memory and image sizes. Use the highest
# number that your GPU can handle for best performance.
IMAGES_PER_GPU = 1
. . . def init(self): """Set values of computed attributes."""
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
This should be the bacth size = 1 but when I print it on the screen it continues with the value 2
And if you freeze the value of bacth size at 1 like this
def init(self): """Set values of computed attributes."""
self.BATCH_SIZE = 1
Print the error:
ValueError: slice index 1 of dimension 0 out of bounds. for 'ROI/strided_slice_12' (op: 'StridedSlice') with input shapes: [1,261888,4], [1], [1], [1] and with computed input tensors: input[1] = <1>, input[2] = <2>, input[3] = <1>.
If I ignore the line that check the BATCH_SIZE
if inference_model.config.BATCH_SIZE! = 1:
The system starts training but throws me another error when calculating the mAP
thank you very much in advance a greeting
Try using custom_callbacks. Counting accuracy takes a lot of time, so don't use it every epoch.
mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(model, model_inference, dataset_val, calculate_map_at_every_X_epoch=5, verbose=1)
model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=100, layers='heads', custom_callbacks=[mean_average_precision_callback])
Thank you for sharing this code, and I have a small question: I am wondering why is there only dataset_val in the mean_average_precision_callback,
@yoya93 and other guys, advice on any project. First achieve a well-known working condition. Then add the changes one at a time, and each time check whether it works or not. With the repetition of the Mask-RCNN project from Matterport, you first need to repeat their project as it is and get the same result. Then, when there is a working project, copy this project, change the input data to your own, change the number of classes, etc. And after each change, check that nothing is broken. Regarding your specific question: the source code of the "Config" class does not need to be changed. It is enough to access it from your code correctly. Here is an example of my call to the class. Naturally, you first need to import from mrcnn.config import Config
Then you can see the properties of your configuration for inference config = _InfConfig() config.display()
Hey @Juuustin. I divide the complete dataset into 3 sets: train, test, and val(60%-30%-10%). The first two are used when training the network, and the third is used to understand the accuracy on the data that the network does not see during training.
Hi @VtlNmnk do we need to provide annotation even for unseen data ???
Hey @Juuustin. I divide the complete dataset into 3 sets: train, test, and val(60%-30%-10%). The first two are used when training the network, and the third is used to understand the accuracy on the data that the network does not see during training.
Hey, @VtlNmnk, is it available to get the accuracy during training, it would be better if we could get the accuracy during training:)
Hey @Juuustin. I divide the complete dataset into 3 sets: train, test, and val(60%-30%-10%). The first two are used when training the network, and the third is used to understand the accuracy on the data that the network does not see during training.
Hello @VtlNmnk can you explain how the validation works during the training process??
and do we need to change the mean values of the image in the configuration file??
Hello and thanks for your previous answers. @VtlNmnk @rupa1118
When executed it behaves like this:
And I have fluctuations just like @rupa1118
I got a better answer from val_loss ?? How did you improve it? Greetings and thank you very much for your great help
How did you improve it?
It’s hard to say without additional information. How many images are in the dataset? And how many classes? How did you split the data? Do you have augmentation? If so, which one?
Can anyone in this thread explain how to get the loss output logging that we see in hardikmanik's screenshot? I'm talking about the various losses logged to stdout - I'm only seeing the total loss and none of the rest (like rpn_class_loss, rpn_box_loss, etc..).
I have tried all the solutions I'm facing the same issue ...... any solution?
I found I had the same using running on Colab. I think it's due to the Keras version they use (2.3.1 at the time of writing this). It seems the newer versions have a model.add_metric() method for adding a tensor to the list of metrics:
model = KM.Model(inputs, outputs, name='mask_rcnn')
# add metrics
model.add_metric(mask_loss, name="mask_loss")
model.add_metric(bbox_loss, name="bbox_loss")
model.add_metric(class_loss, name="class_loss")
model.add_metric(rpn_bbox_loss, name="rpn_bbox_loss")
model.add_metric(rpn_class_loss, name="rpn_class_loss")
Note that this is immediately after the model=KM.Model()
in the build
method. After adding these lines, the loss values appear while training:
12/100 [==>...........................] - ETA: 3:08 - loss: 1.3889 - mask_loss: 0.0680 - bbox_loss: 0.0478 - class_loss: 0.1614 - rpn_bbox_loss: 0.3475 - rpn_class_loss: 0.3476
还将其添加到model.py中的某个位置,例如在文件末尾
############################################################ # Custom Callbacks ############################################################ class MeanAveragePrecisionCallback(Callback): def __init__(self, train_model: MaskRCNN, inference_model: MaskRCNN, dataset: Dataset, calculate_map_at_every_X_epoch=5, dataset_limit=None, verbose=1): super().__init__() self.train_model = train_model self.inference_model = inference_model self.dataset = dataset self.calculate_map_at_every_X_epoch = calculate_map_at_every_X_epoch self.dataset_limit = len(self.dataset.image_ids) if dataset_limit is not None: self.dataset_limit = dataset_limit self.dataset_image_ids = self.dataset.image_ids.copy() if inference_model.config.BATCH_SIZE != 1: raise ValueError("This callback only works with the bacth size of 1") self._verbose_print = print if verbose > 0 else lambda *a, **k: None def on_epoch_end(self, epoch, logs=None): if epoch > 2 and (epoch+1)%self.calculate_map_at_every_X_epoch == 0: self._verbose_print("Calculating mAP...") self._load_weights_for_model() mAPs = self._calculate_mean_average_precision() mAP = np.mean(mAPs) if logs is not None: logs["val_mean_average_precision"] = mAP self._verbose_print("mAP at epoch {0} is: {1}".format(epoch+1, mAP)) super().on_epoch_end(epoch, logs) def _load_weights_for_model(self): last_weights_path = self.train_model.find_last() self._verbose_print("Loaded weights for the inference model (last checkpoint of the train model): {0}".format( last_weights_path)) self.inference_model.load_weights(last_weights_path, by_name=True) def _calculate_mean_average_precision(self): mAPs = [] # Use a random subset of the data when a limit is defined np.random.shuffle(self.dataset_image_ids) for image_id in self.dataset_image_ids[:self.dataset_limit]: image, image_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(self.dataset, self.inference_model.config, image_id, use_mini_mask=False) molded_images = np.expand_dims(mold_image(image, self.inference_model.config), 0) results = self.inference_model.detect(molded_images, verbose=0) r = results[0] # Compute mAP - VOC uses IoU 0.5 AP, _, _, _ = utils.compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks']) mAPs.append(AP) return np.array(mAPs)
Hello, thank you very much for your code. Your code is useful to me. How can I save the mAP value from training?
Can anyone in this thread explain how to get the loss output logging that we see in hardikmanik's screenshot? I'm talking about the various losses logged to stdout - I'm only seeing the total loss and none of the rest (like rpn_class_loss, rpn_box_loss, etc..).
In model.py, replace self.keras_model.fit_generator(..) with h=self.keras_model.fit_generator(..) and return h (Keras history object) as a result of train method of model.py. Then in your script, after h=model.train(...), refer to h.history['loss'], h.history['mrcnn_class_loss'], etc. for plotting loss diagrams.
By Default matterport added tensorboard callbacks during training.In your log directory you will get event files along with your weights files. You can refer that file for tensorboard or you can directly run tensorboard on that directly you will get the all the losses.
Tensorboard command : tensorboard --logdir "path/to/logs" --port 8888(or your opened port)
hi @mhtarora39
I am trying to use your code to calculate map, precision, and recall but get the following error:
ValueError Traceback (most recent call last)
<ipython-input-23-a2f082d763b6> in <module>
64 config = InferenceConfig()
65 evel = EvalImage(dataset_vall,modelinf,config)
---> 66 evel.evaluate_model()
67
<ipython-input-23-a2f082d763b6> in evaluate_model(self, len)
51 r = yhat[0]
52 # calculate statistics, including AP
---> 53 AP, precisions, recalls, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks'])
54 precisions_dict[image_id] = np.mean(precisions)
55 recall_dict[image_id] = np.mean(recalls)
D:/Desktop/aktwelve_mask_rcnn\mrcnn\utils.py in compute_ap(gt_boxes, gt_class_ids, gt_masks, pred_boxes, pred_class_ids, pred_scores, pred_masks, iou_threshold)
728 gt_boxes, gt_class_ids, gt_masks,
729 pred_boxes, pred_class_ids, pred_scores, pred_masks,
--> 730 iou_threshold)
731
732 # Compute precision and recall at each prediction box step
D:/Desktop/aktwelve_mask_rcnn\mrcnn\utils.py in compute_matches(gt_boxes, gt_class_ids, gt_masks, pred_boxes, pred_class_ids, pred_scores, pred_masks, iou_threshold, score_threshold)
680
681 # Compute IoU overlaps [pred_masks, gt_masks]
--> 682 overlaps = compute_overlaps_masks(pred_masks, gt_masks)
683
684 # Loop through predictions and find matching ground truth boxes
D:/Desktop/aktwelve_mask_rcnn\mrcnn\utils.py in compute_overlaps_masks(masks1, masks2)
113
114 # intersections and union
--> 115 intersections = np.dot(masks1.T, masks2)
116 union = area1[:, None] + area2[None, :] - intersections
117 overlaps = intersections / union
<__array_function__ internals> in dot(*args, **kwargs)
ValueError: shapes (5,1048576) and (3136,6) not aligned: 1048576 (dim 1) != 3136 (dim 0)
This is my implementation:
from mrcnn.utils import compute_ap
class EvalImage():
def __init__(self,dataset,model,cfg):
self.dataset = dataset
self.model = model
self.cfg = cfg
def evaluate_model(self , len = 50):
APs = list()
precisions_dict = {}
recall_dict = {}
for index,image_id in enumerate(self.dataset.image_ids):
if(index > len):
break;
# load image, bounding boxes and masks for the image id
image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(self.dataset, self.cfg,image_id)
# convert pixel values (e.g. center)
#scaled_image = modellib.mold_image(image, self.cfg)
# convert image into one sample
sample = np.expand_dims(image, 0)
# print(len(image))
# make prediction
yhat = self.model.detect(sample, verbose=1)
# extract results for first sample
r = yhat[0]
# calculate statistics, including AP
AP, precisions, recalls, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks'])
precisions_dict[image_id] = np.mean(precisions)
recall_dict[image_id] = np.mean(recalls)
# store
APs.append(AP)
# calculate the mean AP across all images
mAP = np.mean(APs)
return mAP,precisions_dict,recall_dict
dataset_vall = CocoLikeDataset()
dataset_vall.load_data('D:/Desktop/aktwelve_mask_rcnn/datasets/Acacia dataset', 'val')
dataset_vall.prepare()
config = InferenceConfig()
modelinf = modellib.MaskRCNN(mode="inference",
config=config,
model_dir=MODEL_DIR)
modelinf.load_weights(os.path.join(ROOT_DIR, "mask_rcnn_acacia-stp73-epch150_0075.h5"), by_name=True)
evel = EvalImage(dataset_vall,modelinf,config)
evel.evaluate_model()
Any suggestions would be appreciated
Can anyone give me a suggestion on how to solve this problem after the -------- ? I'm trying to run a Mask R-CNN code. PLEASE. `Configurations: BACKBONE resnet101 BACKBONE_STRIDES [4, 8, 16, 32, 64] BATCH_SIZE 1 BBOX_STD_DEV [0.1 0.1 0.2 0.2] COMPUTE_BACKBONE_SHAPE None DETECTION_MAX_INSTANCES 100 DETECTION_MIN_CONFIDENCE 0.95 DETECTION_NMS_THRESHOLD 0.3 FPN_CLASSIF_FC_LAYERS_SIZE 1024 GPU_COUNT 1 GRADIENT_CLIP_NORM 5.0 IMAGES_PER_GPU 1 IMAGE_MAX_DIM 1024 IMAGE_META_SIZE 14 IMAGE_MIN_DIM 704 IMAGE_MIN_SCALE 0 IMAGE_RESIZE_MODE square IMAGE_SHAPE [1024 1024 3] LEARNING_MOMENTUM 0.9 LEARNING_RATE 0.001 LOSS_WEIGHTS {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0} MASK_POOL_SIZE 14 MASK_SHAPE [28, 28] MAX_GT_INSTANCES 30 MEAN_PIXEL [123.7 116.8 103.9] MINI_MASK_SHAPE (56, 56) NAME shapes NUM_CLASSES 2 POOL_SIZE 7 POST_NMS_ROIS_INFERENCE 1000 POST_NMS_ROIS_TRAINING 2000 ROI_POSITIVE_RATIO 0.33 RPN_ANCHOR_RATIOS [0.5, 1, 2] RPN_ANCHOR_SCALES (48, 96, 192, 384, 768) RPN_ANCHOR_STRIDE 1 RPN_BBOX_STD_DEV [0.1 0.1 0.2 0.2] RPN_NMS_THRESHOLD 0.7 RPN_TRAIN_ANCHORS_PER_IMAGE 256 STEPS_PER_EPOCH 3500 TOP_DOWN_PYRAMID_SIZE 256 TRAIN_BN False TRAIN_ROIS_PER_IMAGE 300 USE_MINI_MASK True USE_RPN_ROIS True VALIDATION_STEPS 300 WEIGHT_DECAY 0.0001
ValueError Traceback (most recent call last)
<ipython-input-1-50b8c26e8d29> in <module>
214 # Create model in training mode
215 model = modellib.MaskRCNN(mode="training", config=config,
--> 216 model_dir=MODEL_DIR)
217
218 # Which weights to start with?
~\Fish-characteristic-measurement\Complete code\mrcnn\model.py in __init__(self, mode, config, model_dir)
1830 self.model_dir = model_dir
1831 self.set_log_dir()
-> 1832 self.keras_model = self.build(mode=mode, config=config)
1833
1834 def build(self, mode, config):
~\Fish-characteristic-measurement\Complete code\mrcnn\model.py in build(self, mode, config)
1927 anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)
1928 # A hack to get around Keras's bad support for constants
-> 1929 anchors = KL.Lambda(lambda x: tf.Variable(anchors), name="anchors")(input_image)
1930 else:
1931 anchors = input_anchors
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
920 not base_layer_utils.is_in_eager_or_tf_function()):
921 with auto_control_deps.AutomaticControlDependencies() as acd:
--> 922 outputs = call_fn(cast_inputs, *args, **kwargs)
923 # Wrap Tensors in `outputs` in `tf.identity` to avoid
924 # circular dependencies.
~\Anaconda3\lib\site-packages\tensorflow\python\keras\layers\core.py in call(self, inputs, mask, training)
887 variable_scope.variable_creator_scope(_variable_creator):
888 result = self.function(inputs, **kwargs)
--> 889 self._check_variables(created_variables, tape.watched_variables())
890 return result
891
~\Anaconda3\lib\site-packages\tensorflow\python\keras\layers\core.py in _check_variables(self, created_variables, accessed_variables)
914 Variables.'''
915 ).format(name=self.name, variable_str=variable_str)
--> 916 raise ValueError(error_str)
917
918 untracked_used_vars = [
ValueError:
The following Variables were created within a Lambda layer (anchors)
but are not tracked by said layer:
<tf.Variable 'anchors/Variable:0' shape=(1, 261888, 4) dtype=float32>
The layer cannot safely ensure proper Variable reuse across multiple
calls, and consquently this behavior is disallowed for safety. Lambda
layers are not well suited to stateful computation; instead, writing a
subclassed Layer is the recommend way to define layers with
Variables.
`
hi @mhtarora39
I am trying to use your code to calculate map, precision, and recall but get the following error:
ValueError Traceback (most recent call last) <ipython-input-23-a2f082d763b6> in <module> 64 config = InferenceConfig() 65 evel = EvalImage(dataset_vall,modelinf,config) ---> 66 evel.evaluate_model() 67 <ipython-input-23-a2f082d763b6> in evaluate_model(self, len) 51 r = yhat[0] 52 # calculate statistics, including AP ---> 53 AP, precisions, recalls, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks']) 54 precisions_dict[image_id] = np.mean(precisions) 55 recall_dict[image_id] = np.mean(recalls) D:/Desktop/aktwelve_mask_rcnn\mrcnn\utils.py in compute_ap(gt_boxes, gt_class_ids, gt_masks, pred_boxes, pred_class_ids, pred_scores, pred_masks, iou_threshold) 728 gt_boxes, gt_class_ids, gt_masks, 729 pred_boxes, pred_class_ids, pred_scores, pred_masks, --> 730 iou_threshold) 731 732 # Compute precision and recall at each prediction box step D:/Desktop/aktwelve_mask_rcnn\mrcnn\utils.py in compute_matches(gt_boxes, gt_class_ids, gt_masks, pred_boxes, pred_class_ids, pred_scores, pred_masks, iou_threshold, score_threshold) 680 681 # Compute IoU overlaps [pred_masks, gt_masks] --> 682 overlaps = compute_overlaps_masks(pred_masks, gt_masks) 683 684 # Loop through predictions and find matching ground truth boxes D:/Desktop/aktwelve_mask_rcnn\mrcnn\utils.py in compute_overlaps_masks(masks1, masks2) 113 114 # intersections and union --> 115 intersections = np.dot(masks1.T, masks2) 116 union = area1[:, None] + area2[None, :] - intersections 117 overlaps = intersections / union <__array_function__ internals> in dot(*args, **kwargs) ValueError: shapes (5,1048576) and (3136,6) not aligned: 1048576 (dim 1) != 3136 (dim 0)
This is my implementation:
from mrcnn.utils import compute_ap class EvalImage(): def __init__(self,dataset,model,cfg): self.dataset = dataset self.model = model self.cfg = cfg def evaluate_model(self , len = 50): APs = list() precisions_dict = {} recall_dict = {} for index,image_id in enumerate(self.dataset.image_ids): if(index > len): break; # load image, bounding boxes and masks for the image id image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(self.dataset, self.cfg,image_id) # convert pixel values (e.g. center) #scaled_image = modellib.mold_image(image, self.cfg) # convert image into one sample sample = np.expand_dims(image, 0) # print(len(image)) # make prediction yhat = self.model.detect(sample, verbose=1) # extract results for first sample r = yhat[0] # calculate statistics, including AP AP, precisions, recalls, _ = compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks']) precisions_dict[image_id] = np.mean(precisions) recall_dict[image_id] = np.mean(recalls) # store APs.append(AP) # calculate the mean AP across all images mAP = np.mean(APs) return mAP,precisions_dict,recall_dict dataset_vall = CocoLikeDataset() dataset_vall.load_data('D:/Desktop/aktwelve_mask_rcnn/datasets/Acacia dataset', 'val') dataset_vall.prepare() config = InferenceConfig() modelinf = modellib.MaskRCNN(mode="inference", config=config, model_dir=MODEL_DIR) modelinf.load_weights(os.path.join(ROOT_DIR, "mask_rcnn_acacia-stp73-epch150_0075.h5"), by_name=True) evel = EvalImage(dataset_vall,modelinf,config) evel.evaluate_model()
Any suggestions would be appreciated
Hey I have the exact same issue. Did you find a solution to this error??
@yoya93 and other guys, advice on any project. First achieve a well-known working condition. Then add the changes one at a time, and each time check whether it works or not. With the repetition of the Mask-RCNN project from Matterport, you first need to repeat their project as it is and get the same result. Then, when there is a working project, copy this project, change the input data to your own, change the number of classes, etc. And after each change, check that nothing is broken. Regarding your specific question: the source code of the "Config" class does not need to be changed. It is enough to access it from your code correctly. Here is an example of my call to the class. Naturally, you first need to import from mrcnn.config import Config
Then you can see the properties of your configuration for inference config = _InfConfig() config.display()
Hi @VtlNmnk , how did you get the save_each_n_epoch argument? I couldn't find it in model.py
I managed to run the balloon training with the custom callback MeanAveragePrecisionCallback, however the mAP calculation was never printed out during the training. Did I initialize the callback correctly?
class CustomConfig(BalloonConfig):
NAME = "ballon_custom"
IMAGES_PER_GPU = 1
GPU_COUNT = 1
IMAGE_RESIZE_MODE = "square"
DETECTION_MIN_CONFIDENCE = 0.0
NUM_CLASSES = 1 + 1
STEPS_PER_EPOCH = 100
USE_RPN_ROIS = False
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = BalloonDataset()
dataset_train.load_balloon(args.dataset, "train")
dataset_train.prepare()
# Validation dataset
dataset_val = BalloonDataset()
dataset_val.load_balloon(args.dataset, "val")
dataset_val.prepare()
model_inference = modellib.MaskRCNN(mode="inference", config=CustomConfig(),
model_dir=args.logs)
mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(model,
model_inference, dataset_val, calculate_map_at_every_X_epoch=10, verbose=1)
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='heads', # heads or all
custom_callbacks=[mean_average_precision_callback])
also add this somewhere in the model.py, for example at the end of the file
############################################################ # Custom Callbacks ############################################################ class MeanAveragePrecisionCallback(Callback): def __init__(self, train_model: MaskRCNN, inference_model: MaskRCNN, dataset: Dataset, calculate_map_at_every_X_epoch=5, dataset_limit=None, verbose=1): super().__init__() self.train_model = train_model self.inference_model = inference_model self.dataset = dataset self.calculate_map_at_every_X_epoch = calculate_map_at_every_X_epoch self.dataset_limit = len(self.dataset.image_ids) if dataset_limit is not None: self.dataset_limit = dataset_limit self.dataset_image_ids = self.dataset.image_ids.copy() if inference_model.config.BATCH_SIZE != 1: raise ValueError("This callback only works with the bacth size of 1") self._verbose_print = print if verbose > 0 else lambda *a, **k: None def on_epoch_end(self, epoch, logs=None): if epoch > 2 and (epoch+1)%self.calculate_map_at_every_X_epoch == 0: self._verbose_print("Calculating mAP...") self._load_weights_for_model() mAPs = self._calculate_mean_average_precision() mAP = np.mean(mAPs) if logs is not None: logs["val_mean_average_precision"] = mAP self._verbose_print("mAP at epoch {0} is: {1}".format(epoch+1, mAP)) super().on_epoch_end(epoch, logs) def _load_weights_for_model(self): last_weights_path = self.train_model.find_last() self._verbose_print("Loaded weights for the inference model (last checkpoint of the train model): {0}".format( last_weights_path)) self.inference_model.load_weights(last_weights_path, by_name=True) def _calculate_mean_average_precision(self): mAPs = [] # Use a random subset of the data when a limit is defined np.random.shuffle(self.dataset_image_ids) for image_id in self.dataset_image_ids[:self.dataset_limit]: image, image_meta, gt_class_id, gt_bbox, gt_mask = load_image_gt(self.dataset, self.inference_model.config, image_id, use_mini_mask=False) molded_images = np.expand_dims(mold_image(image, self.inference_model.config), 0) results = self.inference_model.detect(molded_images, verbose=0) r = results[0] # Compute mAP - VOC uses IoU 0.5 AP, _, _, _ = utils.compute_ap(gt_bbox, gt_class_id, gt_mask, r["rois"], r["class_ids"], r["scores"], r['masks']) mAPs.append(AP) return np.array(mAPs)
Many thanks for this! I was able to get this working straightaway; the only change I made was to calculate the mAP at the end of every epoch for my purposes.
Just a word of caution. I don't know if it's just me, but when viewing the plot on Tensorboard (which considers the epoch numbers to be 0-based as indicated by the steps on the X axes), the plot for val_mean_average_precision
is shifted to the right by 1. That is, assuming the mAP is calculated at the end of every epoch, the mAP from the 0th epoch is shown at Step 1 of the plot, the mAP from the 1st epoch is at Step 2, and so on. This causes the mAP from the final epoch to be absent from the plot (because the Step would be out of the bounds of the X axis).
It looks like this is due to the sequence in which the callbacks are called. In MaskRCNN.train()
in model.py
, the custom callbacks are added to the end of the callback list while the Tensorboard callback is the first one.
Therefore, when the MeanAveragePrecisionCallback
gets called for the n'th epoch, Tensorboard would have already finished preparing the summary for that epoch. So, it doesn't see the mAP value from the n'th epoch until it is preparing the summary for the (n+1)'th epoch, at which point that value is erroneously considered to be part of the (n+1)'th epoch.
I changed the sequence of the callbacks, to make the Tensorboard callback the last one. That corrected the problem, and I now see the mAP values at the correct step in the plot.
# The callback to save the model checkpoint
callbacks = [keras.callbacks.ModelCheckpoint(self.checkpoint_path, verbose=0, save_weights_only=True)]
# Add custom callbacks to the list
if custom_callbacks:
callbacks += custom_callbacks
# The tensorboard callback is last so that any metric logged by the custom callbacks would be picked up as part of the same epoch
callbacks.append(keras.callbacks.TensorBoard(log_dir=self.log_dir, histogram_freq=0, write_graph=True, write_images=False))
Hello @VtlNmnk thank you very much for your response. I am currently facing another problem. Training throws me this error:
Traceback (most recent call last):
File "/content/Mask_RCNN/samples/metal_blanco/metal_blanco.py", line 371, in train(model) File "/content/Mask_RCNN/samples/metal_blanco/metal_blanco.py", line 195, in train inference_model=model_inference, dataset=dataset_val, calculate_map_at_every_X_epoch=5, verbose=1) File "/usr/local/lib/python3.6/dist-packages/mask_rcnn-2.1-py3.6.egg/mrcnn/model.py", line 2893, in init ValueError: This callback only works with the bacth size of 1 For this reason configure my config.py like this:
class Config(object): """Base configuration class. For custom configurations, create a sub-class that inherits from this one and override properties that need to be changed. """
Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.
Useful if your code needs to do things differently depending on which
experiment is running.
NAME = None # Override in sub-classes
# NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1. GPU_COUNT = 1 # Number of images to train with on each GPU. A 12GB GPU can typically # handle 2 images of 1024x1024px. # Adjust based on your GPU memory and image sizes. Use the highest # number that your GPU can handle for best performance. IMAGES_PER_GPU = 1
. . . def init(self): """Set values of computed attributes."""
Effective batch size
self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT
This should be the bacth size = 1 but when I print it on the screen it continues with the value 2
And if you freeze the value of bacth size at 1 like this
def init(self): """Set values of computed attributes."""
Effective batch size
self.BATCH_SIZE = 1
Print the error:
ValueError: slice index 1 of dimension 0 out of bounds. for 'ROI/strided_slice_12' (op: 'StridedSlice') with input shapes: [1,261888,4], [1], [1], [1] and with computed input tensors: input[1] = <1>, input[2] = <2>, input[3] = <1>.
If I ignore the line that check the BATCH_SIZE
if inference_model.config.BATCH_SIZE! = 1:
The system starts training but throws me another error when calculating the mAP
thank you very much in advance a greeting
Hello @VtlNmnk, I still have this same error "This callback only works with the bacth size of 1". Now my BATCH_SIZE is '2' How can I fix it?
Below this what I did.
I added code to coco.py
I added code to model.py
Hello @VtlNmnk thank you very much for your response. I am currently facing another problem. Training throws me this error:
Traceback (most recent call last):
File "/content/Mask_RCNN/samples/metal_blanco/metal_blanco.py", line 371, in train(model) File "/content/Mask_RCNN/samples/metal_blanco/metal_blanco.py", line 195, in train inference_model=model_inference, dataset=dataset_val, calculate_map_at_every_X_epoch=5, verbose=1) File "/usr/local/lib/python3.6/dist-packages/mask_rcnn-2.1-py3.6.egg/mrcnn/model.py", line 2893, in init ValueError: This callback only works with the bacth size of 1
Hey! You need to use 2 configurations, created on the basis of the base one, which can be imported like this
from mrcnn.config import Config
And for the inference mode, a separate configuration must be created.
You use the same configuration for both training and inference modes.
thanks for reply!
After changing class CocoConfig(Config), BATCH_SIZE changed to "1" thanks for good explain. I overlooked the code here.
but an error has occurred. It works correctly when IMAGE_PER_GPU is returned to "2".
It works correctly when IMAGE_PER_GPU is returned to "2".
I wrote to you, and wrote about this earlier: use a separate instance of the configuration with BATCH_SIZE = 1 for Inference and another, separate instance, with a different name, for training. And of course, it will have BATCH_SIZE = 2 or how much your GPU can handle. Look at https://github.com/matterport/Mask_RCNN/blob/master/samples/balloon/balloon.py
I looked misplace! thanks Configuration is already separated for 'train' and 'inference'.
I'm confused. When 'train' how much GPU_COUNT I should use? I think should use IMAGE_PER_GPU = 1 because callback only works with BATCH_SIZE=1. But now, I'm using IMAGE_PER_GPU = 2. It configured from line 441 → line 71~87. Should I change line 81 to 'IMAGE_PER_GPU = 1' ?
When 'inference' how much IMAGE_PER_GPU I should use? now I'm using IMAGE_PER_GPU=1. It configured from line 443 ~ 448.
Thank you very much for your help.
Hello guys, thanks for sharing the code to have accuracy metrics. I try to implement the code but I have errors :( I pasted the class "MeanAveragePrecisionCallbacks" at the end of the model.py code.
Then, in train.py I defined the variable like you did @VtlNmnk:
mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(model, model_inference, dataset_val, calculate_map_at_every_X_epoch=5, verbose=1)
But when I launch my training, I have this error:
Traceback (most recent call last): File "Taraudage.py", line 372, in <module> train(model) File "Taraudage.py", line 196, in train mean_average_precision_callback = modellib.MeanAveragePrecisionCallback(train_model=model, inference_model=model_inference, NameError: name 'model_inference' is not defined
What variable do you expect for model inference ?
Thanks for you time, Regards, Antoine
I have the same issue , how did you solve it please !
Under
model.py
we see the keras model compileAnd then the metrics for the Mask RCNN metrics are added:
But when training the model, neither
train_accuracy
norval_accuracy
are reported. How to add these metrics and report them with each epoch, alongside the mrcnn metrics?