Tiramisu-Compiler / tiramisu

A polyhedral compiler for expressing fast and portable data parallel algorithms
http://tiramisu-compiler.org
MIT License
916 stars 132 forks source link

Any luck building the autoscheduler tutorial with the latest Halide #375

Open tekstop opened 1 year ago

tekstop commented 1 year ago

After a few iterations, I was able to build tiramisu with the latest Halide and get the tutorials working. Now, when I try to get the autoscheduler tutorial working, I get the following errors:

% ../generator
{ conv[n, fout, y, x, fin, k_y, k_x] -> src[n', fin', y_pad, x_pad] : -y + y_pad = 0 }
{ conv[n, fout, y, x, fin, k_y, k_x] -> src[n', fin', y_pad, x_pad] : -y - k_y + y_pad = 0 }
{ conv[n, fout, y, x, fin, k_y, k_x] -> src[n', fin', y_pad, x_pad] : -x + x_pad = 0 }
{ conv[n, fout, y, x, fin, k_y, k_x] -> src[n', fin', y_pad, x_pad] : -x - k_x + x_pad = 0 }

Generated Halide IR:
assert(reinterpret<uint64>((halide_buffer_t *)buf_weights.buffer) != (uint64)0, halide_error_buffer_argument_is_null("buf_weights"))
assert(reinterpret<uint64>((halide_buffer_t *)buf_src.buffer) != (uint64)0, halide_error_buffer_argument_is_null("buf_src"))
assert(reinterpret<uint64>((halide_buffer_t *)buf_output.buffer) != (uint64)0, halide_error_buffer_argument_is_null("buf_output"))
assert(reinterpret<uint64>((halide_buffer_t *)buf_bias.buffer) != (uint64)0, halide_error_buffer_argument_is_null("buf_bias"))
let buf_bias = (void *)_halide_buffer_get_host((halide_buffer_t *)buf_bias.buffer)
let buf_output = (void *)_halide_buffer_get_host((halide_buffer_t *)buf_output.buffer)
let buf_src = (void *)_halide_buffer_get_host((halide_buffer_t *)buf_src.buffer)
let buf_weights = (void *)_halide_buffer_get_host((halide_buffer_t *)buf_weights.buffer)
produce  {
 allocate _weights_b2[int32 * 1024 * 1024 * 2 * 8]
 allocate _src_b1[int32 * 1026 * 1026 * 3 * 8]
 allocate _conv_init_b3[int32 * 1024 * 1024 * 2 * 8]
 allocate _conv_b4[int32 * 3 * 3 * 3 * 1024 * 1024 * 2 * 8]
 allocate _bias_b0[int32 * 2]
 for (c1, 0, 8) {
  for (c3, 0, 2) {
   for (c5, 0, 1024) {
    for (c7, 0, 1024) {
     buf_output[(c1*2097152) + ((c3*1048576) + ((c5*1024) + c7))] = buf_bias[c3]
     for (c9, 0, 3) {
      for (c11, 0, 3) {
       for (c13, 0, 3) {
        buf_output[(c1*2097152) + ((c3*1048576) + ((c5*1024) + c7))] = buf_output[(c1*2097152) + ((c3*1048576) + ((c5*1024) + c7))] + (buf_src[(c1*3158028) + ((c9*1052676) + (((c11 + c5)*1026) + (c13 + c7)))]*buf_weights[(c3*27) + ((c9*9) + ((c11*3) + c13))])
       }
      }
     }
    }
   }
  }
 }
}
Traceback (most recent call last):
  File "/local/tiramisu/tiramisu/tutorials/tutorial_autoscheduler/model/main.py", line 39, in <module>
    tree_tensor = get_representation(prog_json, sched_json)
  File "/local/tiramisu/tiramisu/tutorials/tutorial_autoscheduler/model/json_to_tensor.py", line 54, in get_representation
    iterators_repr.append(+(iterator_name in comp_dict['real_dimensions']))
KeyError: 'real_dimensions'
Segmentation fault

Note, without the autoscheduler boolean set to true, I am able to generate as well as compile and execute with the wrapper.

Python 3.9.2 Torch version: 1.13.0+cpu

Any pointers would be helpful