Open Durant35 opened 6 years ago
<!-- FLOBOT 3D Object Detector -->
<node pkg="object3d_detector" type="object3d_detector" name="object3d_detector" output="screen" respawn="false">
<param name="model_file_name" type="string" value="$(find object3d_detector)/libsvm/pedestrian.model"/>
<param name="range_file_name" type="string" value="$(find object3d_detector)/libsvm/pedestrian.range"/>
</node>
Object3dDetector::trajectoryCallback()
结合Tracking轨迹//TODO -- True Classified --> Positive Sample
if (trajectory->header.frame_id == "human_trajectory" && positive_ < max_positives_) {
svm_problem_.y[svm_problem_.l++] = 1; // 1, the positive label
++positive_ >= max_positives_ ? stop = true : stop = false;
//learned_cloud += *learnable_clusters_[j];
}
//TODO -- False Classified --> Negative Sample
//TODO Why need "train_round_ > 0"
if (train_round_ > 0 && trajectory->header.frame_id == "static_trajectory" &&
negative_ < max_negatives_) {
svm_problem_.y[svm_problem_.l++] = -1; // -1, the negative label
++negative_ >= max_negatives_ ? stop = true : stop = false;
//learned_cloud += *learnable_clusters_[j];
}
Object3dDetector::extractCluster()
直接利用human-like volumetric model分类segment//TODO How to classify positive/negative samples
//===> human-like volumetric model ==> filter out over- and under-segmented clusters TODO negative!!!
float width = max[0] - min[0];
float depth = max[1] - min[1];
float height = max[2] - min[2];
if ( width >= vfilter_min_x_ && width <= vfilter_max_x_
&& depth >= vfilter_min_y_ && depth <= vfilter_max_y_
&& height >= vfilter_min_z_ && height <= vfilter_max_z_
&& min[2] <= cluster_min_z_) {
Feature f;
extractFeature(cluster, f);
features_.push_back(f);
cluster->header.seq = learnable_cluster_id_++;
learnable_clusters_.push_back(cluster);
} else {
//TODO condition test only, to be removed
//TODO N-expert based on human-like volumetric model
if (positive_ == max_positives_ && negative_ < max_negatives_) {
Feature f;
extractFeature(cluster, f);
saveFeature(f, svm_problem_.x[svm_problem_.l]);
// -1, the negative label
svm_problem_.y[svm_problem_.l++] = -1;
++negative_;
}
}
online_learning 流程
train_round_ < max_trains_
,Detector的结果pose
才会输出给people tracker,后者维护human轨迹,当满足以下条件时publish回Detectorlearnable_clusters_[i]
,对于Classified as "human" by Human Classifier,利用轨迹来产生P/N samples,纠正SVM Human Classifiermax_positives_/max_negatives_
,利用这个训练周期产生的样本重训练SVM Human Classifierpose
并不会输出给people tracker,后者相当于停止工作了;Detector也不会再收集P/N samples和重训练SVM Human Classifier;Detector的工作只剩下Region-based分割,通过human-like volumetric model筛选,然后通过SVM Human Classifier检测出Human