Closed mateusandradepeixoto closed 2 years ago
I tried first creating only 1 node, later I create another node just for the subscriber.
RCCHECK(rclc_executor_init(&executor, &support.context, 1, &allocator));
and then you are adding two entities. Please use
RCCHECK(rclc_executor_init(&executor, &support.context, 2, &allocator));
std_msgs__msg__Int64MultiArray msg;
for publishing and subscriber. Please use one for the publisher and provide another to the executor in order to handle incoming data memory.int64_array_publisher
and the other /int64_array_publisher
. Remove the /
.I did the corrections but still don't work, only the topic "int_64_array_publisher" receive data. Can you check the code, please? In special the bold lines.
#include <stdio.h>
#include <unistd.h>
#include <rcl/rcl.h>
#include <rcl/error_handling.h>
#include <std_msgs/msg/int64_multi_array.h>
#include <std_msgs/msg/int64.h>
#include <rclc/rclc.h>
#include <rclc/executor.h>
#define BUFFER_SIZE 10
#define STRING_SIZE 30
#ifdef ESP_PLATFORM
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#endif
#define RCCHECK(fn) { rcl_ret_t temp_rc = fn; if((temp_rc != RCL_RET_OK)){printf("Failed status on line %d: %d. Aborting.\n",__LINE__,(int)temp_rc);vTaskDelete(NULL);}}
#define RCSOFTCHECK(fn) { rcl_ret_t temp_rc = fn; if((temp_rc != RCL_RET_OK)){printf("Failed status on line %d: %d. Continuing.\n",__LINE__,(int)temp_rc);}}
rcl_publisher_t publisher;
rcl_subscription_t subscriber;
std_msgs__msg__Int64MultiArray msg;
**std_msgs__msg__Int64MultiArray receive;**
void subscription_callback(const void * msgin)
{
const std_msgs__msg__Int64MultiArray * message = (const std_msgs__msg__Int64MultiArray *)msgin;
printf("Recebido: %lld\n", message->data.data[0]);
**RCSOFTCHECK(rcl_publish(&publisher, (const void*)message, NULL));**
}
void timer_callback(rcl_timer_t * timer, int64_t last_call_time)
{
RCLC_UNUSED(last_call_time);
if (timer != NULL) {
RCSOFTCHECK(rcl_publish(&publisher, &msg, NULL));
msg.data.data[0] = msg.data.data[0] + 2;
msg.data.data[1] = msg.data.data[1] + 4;
msg.data.data[2] = msg.data.data[2] + 7;
msg.data.data[3] = msg.data.data[3] + 12;
}
}
void appMain(void * arg)
{
rcl_allocator_t allocator = rcl_get_default_allocator();
rclc_support_t support;
// create init_options
RCCHECK(rclc_support_init(&support, 0, NULL, &allocator));
// create node
rcl_node_t node;
RCCHECK(rclc_node_init_default(&node, "int64_array_publisher", "", &support));
// create publisher
RCCHECK(rclc_publisher_init_default(
&publisher,
&node,
ROSIDL_GET_MSG_TYPE_SUPPORT(std_msgs, msg, Int64MultiArray),
"int64_array_publisher"));
// create subscriber
RCCHECK(rclc_subscription_init_default(
&subscriber,
&node,
**ROSIDL_GET_MSG_TYPE_SUPPORT(std_msgs, msg, Int64MultiArray),**
"int64_array_subscriber"));
// create timer,
rcl_timer_t timer;
const unsigned int timer_timeout = 1000;
RCCHECK(rclc_timer_init_default(
&timer,
&support,
RCL_MS_TO_NS(timer_timeout),
timer_callback));
// create executor with 2 entities
rclc_executor_t executor;
**RCCHECK(rclc_executor_init(&executor, &support.context, 2, &allocator));**
**RCCHECK(rclc_executor_add_subscription(&executor, &subscriber, &receive, &subscription_callback, ON_NEW_DATA));**
RCCHECK(rclc_executor_add_timer(&executor, &timer));
// Assing memory to msg
int64_t buffer[BUFFER_SIZE] = {};
msg.data.data = buffer;
msg.data.size = 0;
msg.data.capacity = BUFFER_SIZE;
std_msgs__msg__MultiArrayDimension dim[BUFFER_SIZE] = {};
msg.layout.dim.data = dim;
msg.layout.dim.size = 0;
msg.layout.dim.capacity = BUFFER_SIZE;
char labels[BUFFER_SIZE][STRING_SIZE] = {};
for (size_t i = 0; i < BUFFER_SIZE; i++)
{
msg.layout.dim.data[i].label.data = labels[i];
msg.layout.dim.data[i].label.size = 0;
msg.layout.dim.data[i].label.capacity = STRING_SIZE;
}
// Fill the message with dummy data
for (size_t i = 0; i < BUFFER_SIZE; i++)
{
msg.data.data[i] = i;
msg.data.size++;
}
msg.layout.data_offset = 42;
for (size_t i = 0; i < BUFFER_SIZE; i++)
{
snprintf(msg.layout.dim.data[i].label.data, STRING_SIZE, "label_%u", i);
msg.layout.dim.data[i].label.size = strlen(msg.layout.dim.data[i].label.data);
msg.layout.dim.data[i].size = 42;
msg.layout.dim.data[i].stride = 42;
}
// Assing memory to receive
receive.data.data = buffer;
receive.data.size = 0;
receive.data.capacity = BUFFER_SIZE;
receive.layout.dim.data = dim;
receive.layout.dim.size = 0;
receive.layout.dim.capacity = BUFFER_SIZE;
for (size_t i = 0; i < BUFFER_SIZE; i++)
{
receive.layout.dim.data[i].label.data = labels[i];
receive.layout.dim.data[i].label.size = 0;
receive.layout.dim.data[i].label.capacity = STRING_SIZE;
}
while(1){
rclc_executor_spin_some(&executor, RCL_MS_TO_NS(100));
usleep(100000);
}
// free resources
RCCHECK(rcl_publisher_fini(&publisher, &node));
RCCHECK(rcl_subscription_fini(&subscriber, &node));
RCCHECK(rcl_node_fini(&node));
vTaskDelete(NULL);
}
You are using the same memory space for both messages when:
msg.data.data = buffer;
...
receive.data.data = buffer;
and so on. That is not correct, please read the tutorial about handling memory in micro-ROS.
Regarding your screenshots, what are you expecting? You are receiving the topic published in the micro-ROS side? What is the expected behavior?
I fixed. Well, I'm testing if I can publish the same information in two different topics. I assumed that the _int64_arraysubscriber topic would appear:
Recebido: 40
Recebido: 42
Something like that.
I don't understand, in the micro-ROS side, you are publishing in int64_array_publisher
and subscribing in int64_array_subscriber
. On ROS 2 side you are subscribing both in int64_array_publisher
and int64_array_subscriber
. There is no data being published in int64_array_subscriber
. Which are your expected data flows?
Hi, again! I'm trying to develop a code that publishes datas in a topic and, in this code, there is a subscriber too with another topic, wich name is different, to receive these same datas. I don't know if this ideia make sense, the both topic names appear when I run the command "ros2 topic list" but none receive data when I use the command "ros2 topic echo". I apreciate any help or tip.
app-colcon.meta: