linaro-swg / linux

Linux kernel source tree
Other
41 stars 79 forks source link

About `thread_rpc_alloc_global_payload` always return NULL #113

Open soloicesky opened 1 year ago

soloicesky commented 1 year ago

OPTEE OS: v3.12 linux tee driver: Tag - optee v3.12 Hi, I was trying to enable dynamic shared memory in OP-TEE OS and use the API thread_rpc_alloc_global_payload to allocate shared memory for DMA usage, but I failed. After tracking the code, I found that the pages and page_num in the shared memory object were null, which led to the failure. See the details below:

        pages = tee_shm_get_pages(shm, &page_num);
        if (!pages || !page_num) {
            arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
            goto bad;
        }

After reviewing the tee_shm_alloc and pool_op_alloc, and found out that the 'pages' and 'num_pages' haven't been set, see below: tee_shm.c

struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
{
    struct tee_device *teedev = ctx->teedev;
    struct tee_shm_pool_mgr *poolm = NULL;
    struct tee_shm *shm;
    void *ret;
    int rc;

    if (!(flags & TEE_SHM_MAPPED)) {
        dev_err(teedev->dev.parent,
            "only mapped allocations supported\n");
        return ERR_PTR(-EINVAL);
    }

    if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
        dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
        return ERR_PTR(-EINVAL);
    }

    if (!tee_device_get(teedev))
        return ERR_PTR(-EINVAL);

    if (!teedev->pool) {
        /* teedev has been detached from driver */
        ret = ERR_PTR(-EINVAL);
        goto err_dev_put;
    }

    shm = kzalloc(sizeof(*shm), GFP_KERNEL);
    if (!shm) {
        ret = ERR_PTR(-ENOMEM);
        goto err_dev_put;
    }

    shm->flags = flags | TEE_SHM_POOL;
    shm->ctx = ctx;
    if (flags & TEE_SHM_DMA_BUF)
        poolm = teedev->pool->dma_buf_mgr;
    else
        poolm = teedev->pool->private_mgr;

    rc = poolm->ops->alloc(poolm, shm, size);
    if (rc) {
        ret = ERR_PTR(rc);
        goto err_kfree;
    }

    if (flags & TEE_SHM_DMA_BUF) {
        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

        mutex_lock(&teedev->mutex);
        shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
        mutex_unlock(&teedev->mutex);
        if (shm->id < 0) {
            ret = ERR_PTR(shm->id);
            goto err_pool_free;
        }

        exp_info.ops = &tee_shm_dma_buf_ops;
        exp_info.size = shm->size;
        exp_info.flags = O_RDWR;
        exp_info.priv = shm;

        shm->dmabuf = dma_buf_export(&exp_info);
        if (IS_ERR(shm->dmabuf)) {
            ret = ERR_CAST(shm->dmabuf);
            goto err_rem;
        }
    }

    teedev_ctx_get(ctx);

    return shm;
err_rem:
    if (flags & TEE_SHM_DMA_BUF) {
        mutex_lock(&teedev->mutex);
        idr_remove(&teedev->idr, shm->id);
        mutex_unlock(&teedev->mutex);
    }
err_pool_free:
    poolm->ops->free(poolm, shm);
err_kfree:
    kfree(shm);
err_dev_put:
    tee_device_put(teedev);
    return ret;
}
optee/shm_pool.c
static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
             struct tee_shm *shm, size_t size)
{
    unsigned int order = get_order(size);
    struct page *page;
    int rc = 0;

    page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
    if (!page)
        return -ENOMEM;

    shm->kaddr = page_address(page);
    shm->paddr = page_to_phys(page);
    shm->size = PAGE_SIZE << order;

    if (shm->flags & TEE_SHM_DMA_BUF) {
        unsigned int nr_pages = 1 << order, i;
        struct page **pages;

        pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
        if (!pages)
            return -ENOMEM;

        for (i = 0; i < nr_pages; i++) {
            pages[i] = page;
            page++;
        }

        shm->flags |= TEE_SHM_REGISTER;
        rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
                    (unsigned long)shm->kaddr);
        kfree(pages);
    }

    return rc;
}
optee/rpc.c
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
                      struct optee_msg_arg *arg,
                      struct optee_call_ctx *call_ctx)
{
    phys_addr_t pa;
    struct tee_shm *shm;
    size_t sz;
    size_t n;

    arg->ret_origin = TEEC_ORIGIN_COMMS;

    if (!arg->num_params ||
        arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
        arg->ret = TEEC_ERROR_BAD_PARAMETERS;
        return;
    }

    for (n = 1; n < arg->num_params; n++) {
        if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
            arg->ret = TEEC_ERROR_BAD_PARAMETERS;
            return;
        }
    }

    sz = arg->params[0].u.value.b;
    switch (arg->params[0].u.value.a) {
    case OPTEE_MSG_RPC_SHM_TYPE_APPL:
        shm = cmd_alloc_suppl(ctx, sz);
        break;
    case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
        shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
        break;
    case OPTEE_MSG_RPC_SHM_TYPE_GLOBAL:
        shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
        break;
    default:
        arg->ret = TEEC_ERROR_BAD_PARAMETERS;
        return;
    }

    if (IS_ERR(shm)) {
        arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
        return;
    }

    if (tee_shm_get_pa(shm, 0, &pa)) {
        arg->ret = TEEC_ERROR_BAD_PARAMETERS;
        goto bad;
    }

    sz = tee_shm_get_size(shm);

    if (tee_shm_is_registered(shm)) {
        struct page **pages;
        u64 *pages_list;
        size_t page_num;

        pages = tee_shm_get_pages(shm, &page_num);
        if (!pages || !page_num) {
            arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
            goto bad;
        }

        pages_list = optee_allocate_pages_list(page_num);
        if (!pages_list) {
            arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
            goto bad;
        }

        call_ctx->pages_list = pages_list;
        call_ctx->num_entries = page_num;

        arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
                      OPTEE_MSG_ATTR_NONCONTIG;
        /*
         * In the least bits of u.tmem.buf_ptr we store buffer offset
         * from 4k page, as described in OP-TEE ABI.
         */
        arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
            (tee_shm_get_page_offset(shm) &
             (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
        arg->params[0].u.tmem.size = tee_shm_get_size(shm);
        arg->params[0].u.tmem.shm_ref = (unsigned long)shm;

        optee_fill_pages_list(pages_list, pages, page_num,
                      tee_shm_get_page_offset(shm));
    } else {
        arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
        arg->params[0].u.tmem.buf_ptr = pa;
        arg->params[0].u.tmem.size = sz;
        arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
    }

    arg->ret = TEEC_SUCCESS;
    return;
bad:
    tee_shm_free(shm);
}

Q1, When will the shm->pages be assigned? And, it will be freed in the tee_shm_release -> release_registered_pages. Q2, Can the memory allocated by this API thread_rpc_alloc_global_payload is suitable for DMA usage?

hearyouagain commented 1 year ago

i have the same problem with it, when i allocate DMA memory, it always declared can't create mobj, and i search the source of this problem which is caused by shm->pages == NULL, i don't know why this happen after the optee_shm_register is called. @jenswi-linaro

soloicesky commented 1 year ago

您好,你的邮件已收到,我会尽快回复你。                               best regards                                      zaixing_liu