Jittor / jittor

Jittor is a high-performance deep learning framework based on JIT compiling and meta-operators.
https://cg.cs.tsinghua.edu.cn/jittor/
Apache License 2.0
3.08k stars 311 forks source link

Encountered an error while running a simple script. #151

Open Agent-INF opened 4 years ago

Agent-INF commented 4 years ago

Hi! Thanks for the great project. But I encountered an error while running a simple script. The code is simple:

import jittor as jt

def main():
  a = jt.float32([1, 2, 3])
  print(a)

if __name__ == "__main__":
    main()

The following is the log:

[i 1026 07:52:26.029810 92 __init__.py:246] Found g++(5.4.0) at /usr/bin/g++.
[i 1026 07:52:26.957751 40 compiler.py:839] Jittor(1.2.0.6) src: /usr/local/lib/python3.7/dist-packages/jittor
[i 1026 07:52:26.957939 40 compiler.py:840] cache_path: /root/.cache/jittor/default/g++
[i 1026 07:52:27.127582 40 __init__.py:246] Found /usr/local/cuda/bin/nvcc(10.0.130) at /usr/local/cuda/bin/nvcc.
[i 1026 07:52:27.140334 40 __init__.py:246] Found addr2line(2.26.1) at /usr/bin/addr2line.
[i 1026 07:52:27.738108 40 compiler.py:889] pybind_include: -I/usr/include/python3.7m -I/usr/local/lib/python3.7/dist-packages/pybind11/include
[i 1026 07:52:28.032747 40 compiler.py:891] extension_suffix: .cpython-37m-x86_64-linux-gnu.so
[i 1026 07:52:30.440535 40 __init__.py:165] Total mem: 251.81GB, using 16 procs for compiling.
[i 1026 07:52:33.431563 40 jit_compiler.cc:20] Load cc_path: /usr/bin/g++
[i 1026 07:52:33.637188 40 init.cc:51] Found cuda archs: [61,]
[i 1026 07:52:34.019319 40 compile_extern.py:355] mpicc not found, distribution disabled.
[i 1026 07:52:35.090452 40 compile_extern.py:15] found /usr/local/cuda/include/cublas.h
[i 1026 07:52:35.090592 40 compile_extern.py:15] found /usr/local/cuda/lib64/libcublas.so
[i 1026 07:52:35.597623 40 compile_extern.py:15] found /usr/include/cudnn.h
[i 1026 07:52:35.597807 40 compile_extern.py:15] found /usr/lib/x86_64-linux-gnu/libcudnn.so
[i 1026 07:52:35.666860 40 compiler.py:646] handle pyjt_include/usr/local/lib/python3.7/dist-packages/jittor/extern/cuda/cudnn/inc/cudnn_warper.h
[i 1026 07:52:37.074078 40 compile_extern.py:15] found /usr/local/cuda/include/curand.h
[i 1026 07:52:37.074200 40 compile_extern.py:15] found /usr/local/cuda/lib64/libcurand.so
[e 1026 07:52:37.267001 40 parallel_compiler.cc:229] [Error] source file location: /root/.cache/jittor/default/g++/jit/_opkey0:unary_Tx:int32__Ty:float32__OP:cast__JIT:1__JIT_cpu:1__index_t:int32___JIT:1__JIT_...hash:7f7700e1d539876b_op.cc
[e 1026 07:52:37.267014 40 parallel_compiler.cc:232] Compile fused operator(0/1) failed: [Op(0x2f7f7ed0:0:1:1:i1:o1:s0,unary.cast->0x30733d80),] 

Reason: [f 1026 07:52:37.266796 40 op_compiler.cc:838] Check failed: cm.size()>=2  Something wrong... Could you please report this issue?
 #define JIT 1
#define JIT_cpu 1

#include <cmath>
#include "var.h"
#include "ops/unary_op.h"
#include "ops/unary_op_defs.h"
#include "ops/op_register.h"
#define Tx int32
#define Ty float32
#define OP cast
#define index_t int32

namespace jittor {

static auto make_binary = get_op_info("binary")
    .get_constructor<VarPtr, Var*, Var*, NanoString>();
static auto make_unary = get_op_info("unary")
    .get_constructor<VarPtr, Var*, NanoString>();
static auto make_ternary = get_op_info("ternary")
    .get_constructor<VarPtr, Var*, Var*, Var*>();
static auto make_number = get_op_info("number")
    .get_constructor<VarPtr, float, Var*>();

static unordered_set<string> unary_ops = {
    "bool",
    "int8",
    "int16",
    "int32",
    "int64",
    "uint8",
    "uint16",
    "uint32",
    "uint64",
    "float32",
    "float64",

    "abs",

    "negative",
    "logical_not",
    "bitwise_not",
    "log",
    "exp",
    "sqrt",
    "round",
    "floor",
    "ceil",
    "sin",

    "asin",
    "sinh",

    "asinh",
    "tan",

    "atan",
    "tanh",

    "atanh",
    "cos",

    "acos",
    "cosh",

    "acosh",
    "sigmoid",
};

UnaryOp::UnaryOp(Var* x, NanoString op) : x(x) {
    flags.set(NodeFlags::_cpu);
    flags.set(NodeFlags::_cuda);
    set_type(OpType::element);
    ns = op;
    ASSERT(ns.is_unary() | ns.is_dtype());
    NanoString dtype;
    if (ns.is_dtype()) {
        if (ns == x->dtype()) {
            forward(x);
            return;
        }
        dtype = ns;
        ns = ns_cast;
    } else if (ns.is_bool())
        dtype = ns_bool;
    else if (ns.is_float())
        dtype = dtype_infer(x->ns, x->ns, 2);
    else if (ns.is_int())
        dtype = dtype_infer(x->ns, x->ns, 1);
    else {
        dtype = x->ns;
    }
    y = create_output(nullptr, dtype);
}

VarPtr UnaryOp::grad(Var* out, Var* dout, Var* v, int v_index) {
    if (!x->is_float()) return nullptr;
    if (ns == ns_cast) return make_unary(dout, x->dtype());
    if (ns == ns_negative) return make_unary(dout, ns);
    if (ns == ns_abs) {
        auto neg = make_unary(dout, ns_negative);
        auto zeros = make_number(0, x);
        auto cond = make_binary(x, zeros, ns_greater_equal);
        return make_ternary(cond, dout, neg);
    }
    if (ns == ns_log)
        return make_binary(dout, x, ns_divide);
    if (ns == ns_exp)
        return make_binary(dout, y, ns_multiply);
    if (ns == ns_sqrt){
        auto two = make_number(2, x);
        auto twoy = make_binary(two, y, ns_multiply);
        return make_binary(dout, twoy, ns_divide);
    }

    if (ns == ns_sin)
        return make_binary(dout, make_unary(x, ns_cos), ns_multiply);

    if (ns == ns_cos)
        return make_binary(dout, make_unary(make_unary(x, ns_sin), ns_negative), ns_multiply);

    if (ns == ns_tan) {
        auto one = make_number(1, x);
        auto cosx = make_unary(x, ns_cos);
        auto cos2x = make_binary(cosx, cosx, ns_multiply);
        return make_binary(dout, cos2x, ns_divide);
    }

    if (ns == ns_asin) {
        auto one = make_number(1, x);
        auto x2 = make_binary(x, x, ns_multiply);
        x2 = make_binary(one, x2, ns_subtract);
        x2 = make_unary(x2, ns_sqrt);
        return make_binary(dout, x2, ns_divide);
    }

    if (ns == ns_acos) {
        auto one = make_number(1, x);
        auto x2 = make_binary(x, x, ns_multiply);
        x2 = make_binary(one, x2, ns_subtract);
        x2 = make_unary(x2, ns_sqrt);
        return make_unary(make_binary(dout, x2, ns_divide), ns_negative);
    }

    if (ns == ns_atan) {
        auto one = make_number(1, x);
        auto x2 = make_binary(x, x, ns_multiply);
        x2 = make_binary(one, x2, ns_add);
        return make_binary(dout, x2, ns_divide);
    }

    if (ns == ns_sinh)
        return make_binary(dout, make_unary(x, ns_cosh), ns_multiply);

    if (ns == ns_cosh)
        return make_binary(dout, make_unary(x, ns_sinh), ns_multiply);

    if (ns == ns_tanh) {
        auto cosx = make_unary(x, ns_cosh);
        auto cos2x = make_binary(cosx, cosx, ns_multiply);
        return make_binary(dout, cos2x, ns_divide);
    }

    if (ns == ns_asinh) {
        auto one = make_number(1, x);
        auto x2 = make_binary(x, x, ns_multiply);
        x2 = make_binary(x2, one, ns_add);
        x2 = make_unary(x2, ns_sqrt);
        return make_binary(dout, x2, ns_divide);
    }

    if (ns == ns_acosh) {
        auto one = make_number(1, x);
        auto x2 = make_binary(x, x, ns_multiply);
        x2 = make_binary(x2, one, ns_subtract);
        x2 = make_unary(x2, ns_sqrt);
        return make_binary(dout, x2, ns_divide);
    }

    if (ns == ns_atanh) {
        auto one = make_number(1, x);
        auto x2 = make_binary(x, x, ns_multiply);
        x2 = make_binary(one, x2, ns_subtract);
        return make_binary(dout, x2, ns_divide);
    }

    if (ns == ns_sigmoid) {
        auto r = make_binary(out, out, ns_multiply);
        r = make_binary(out, r, ns_subtract);
        return make_binary(dout, r, ns_multiply);
    }
    return nullptr;
}

void UnaryOp::infer_shape() {
    y->set_shape(x->shape);
}

void UnaryOp::jit_prepare() {
    add_jit_define("Tx", x->dtype());
    add_jit_define("Ty", y->dtype());
    add_jit_define("OP", ns.to_cstring());
}

} 

Traceback (most recent call last):
  File "main.py", line 8, in <module>
    main()
  File "main.py", line 5, in main
    print(a)
  File "/usr/local/lib/python3.7/dist-packages/jittor/__init__.py", line 849, in vtos
    return f"jt.Var({v.data}, dtype={v.dtype})"
RuntimeError: Wrong inputs arguments, Please refer to examples(help(jt.data)).

Types of your inputs are:
 self   = Var,

The function declarations are:
 inline DataView data()

Failed reason:[f 1026 07:52:37.267032 40 parallel_compiler.cc:282] Error happend during compilation, see error above.

The Jittor was installed via pip following the instructions on the readme file.

cjld commented 3 years ago

Thanks for reporting this issue, we cannot reproduce this error, could you please reinstall the newest Jittor and try again?

Python 3.8.5 (default, Jul 28 2020, 12:59:40)
[GCC 9.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import jittor as jt
[i 1030 11:24:04.321740 28 __init__.py:246] Found g++(9.3.0) at /usr/bin/g++.
[i 1030 11:24:04.342438 00 compiler.py:839] Jittor(1.2.0.7) src: /home/cjld/new_jittor/jittor/python/jittor
[i 1030 11:24:04.342561 00 compiler.py:840] cache_path: /home/cjld/.cache/jittor/master/g++
[i 1030 11:24:04.391553 00 __init__.py:246] Found gdb(9.1) at /usr/bin/gdb.
[i 1030 11:24:04.397609 00 __init__.py:246] Found addr2line(2.34) at /usr/bin/addr2line.
[i 1030 11:24:04.433453 00 compiler.py:889] pybind_include: -I/usr/include/python3.8 -I/usr/local/lib/python3.8/dist-packages/pybind11/include
[i 1030 11:24:04.447997 00 compiler.py:891] extension_suffix: .cpython-38-x86_64-linux-gnu.so
[i 1030 11:24:04.561022 00 __init__.py:165] Total mem: 15.07GB, using 5 procs for compiling.
[i 1030 11:24:06.432639 00 jit_compiler.cc:20] Load cc_path: /usr/bin/g++
[i 1030 11:24:06.553587 00 compile_extern.py:355] mpicc not found, distribution disabled.
>>>
>>> def main():
...   a = jt.float32([1, 2, 3])
...   print(a)
...
>>> if __name__ == "__main__":
...     main()
...
jt.Var([1. 2. 3.], dtype=float32)
>>>

Feel free to reply us if you have any other problems