hughperkins / clnn

OpenCL backend for Torch nn neural networks library
BSD 2-Clause "Simplified" License
126 stars 16 forks source link

SpatialConvolutionMM gives incorrect output with nOutputPlane == 7 #37

Closed hughperkins closed 8 years ago

hughperkins commented 8 years ago

to reproduce:

  local batchSize = 1
  local inFeatures = 1
  local outFeatures = 7
  local sentenceLength = 3
  local kernelSize = 3
  local net = nn.SpatialConvolutionMM(inFeatures, outFeatures, 1, kernelSize)
  net:cl()
  local weights = net.weight
  weights:uniform(-1, 1)
  net.bias:zero()  -- to simplify test
  local input = torch.ClTensor(batchSize, inFeatures, sentenceLength, 1):uniform()
  local output = net:forward(input)
  print('weights:size()', weights:size())
  weights = weights:view(torch.LongStorage({outFeatures, inFeatures, kernelSize}))
  print('weights:size()', weights:size())
  print('output:size()', output:size())
  local outLength = sentenceLength - math.floor(kernelSize / 2) * 2
  local ourOut = torch.FloatTensor(batchSize, outFeatures, outLength, 1):zero()

  for b=1,batchSize do
    -- each output feature is independnet from other outputs
    for outFeature=1,outFeatures do
      -- each output point along outS dimensino is indepdnent from other outputs
      for outS=1,outLength do
        local sum = 0
        -- convolve is sum over kernel size, and over the input features
        for k=1,kernelSize do
          local inS = outS + (k - 1)
          for inFeature=1,inFeatures do
            local weight = weights[outFeature][inFeature][k]
            sum = sum + weight * input[b][inFeature][inS][1]
          end
        end
        ourOut[b][outFeature][outS][1] = sum
      end
    end
  end
  print('output[1]')
  print(output[1])
  print('ourOut[1]')
  print(ourOut[1])
  print('output[1] - ourOut[1]')
  print(output[1]:float() - ourOut[1])
  mytester:assertlt((output:float() - ourOut):abs():max(), 0.0001)

(embedded as a test case)

For other nOutputPlane values seems to work ok...

hughperkins commented 8 years ago

Added failing unit test for this in 53f3a3f

hughperkins commented 8 years ago

Seems like might be a clBLAS issue?

#include <iostream>
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
#include <clBLAS.h>
using namespace std;

//                        m  n  k  alpha    lda     ldb beta       ldc
//THCLBlas_Gemm('t', 'n', 1, 7 ,1, 1, ones, 1, bias, 1, 0, output_n, 1);
//THCLBlas_Gemm('n', 'n', 1, 7 ,3, 1, columns, 1, weight, 3, 1, output_n, 1);

//    SUBROUTINE DGEMM ( TRANSA, TRANSB, M, N, K, ALPHA, A, LDA,
//                       B, LDB, BETA, C, LDC )

//          CHARACTER*1  TRANSA, TRANSB

//          INTEGER      M, N, K, LDA, LDB, LDC

//          DOUBLE       PRECISION ALPHA, BETA

//          DOUBLE       PRECISION A( LDA, * ), B( LDB, * ), C( LDC,
//                       * )
void clgemm(char transAchar, char transBchar, int M, int N, int K, float alpha, float *A, int lda,
     float *B, int ldb, float beta, float *C, int ldc, float *result) {
cout << "clgemm 1" << endl;
clblasTranspose transA = transAchar == 'n' ? clblasNoTrans : clblasTrans;
clblasTranspose transB = transBchar == 'n' ? clblasNoTrans : clblasTrans;

//size_t off  = 1;
//size_t offA = K + 1;   /* K + off */
//size_t offB = N + 1;   /* N + off */
//size_t offC = N + 1;   /* N + off */
size_t off = 0;
size_t offA = 0;
size_t offB = 0;
size_t offC = 0;

static const clblasOrder order = clblasColumnMajor;

  cl_int err;
  cl_platform_id platform = 0;
  cl_device_id device = 0;
  cl_context_properties props[3] = { CL_CONTEXT_PLATFORM, 0, 0 };
  cl_context ctx = 0;
  cl_command_queue queue = 0;
  cl_mem bufA, bufB, bufC;
  cl_event event = NULL;
  int ret = 0;

cout << "clgemm 2" << endl;
  /* Setup OpenCL environment. */
  err = clGetPlatformIDs(1, &platform, NULL);
  if (err != CL_SUCCESS) {
      printf( "clGetPlatformIDs() failed with %d\n", err );
      return;
  }
  cout << "got platforms" << endl;

  err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, NULL);
  if (err != CL_SUCCESS) {
      printf( "clGetDeviceIDs() failed with %d\n", err );
      return;
  }

  props[1] = (cl_context_properties)platform;
  ctx = clCreateContext(props, 1, &device, NULL, NULL, &err);
  if (err != CL_SUCCESS) {
      printf( "clCreateContext() failed with %d\n", err );
      return;
  }

  queue = clCreateCommandQueue(ctx, device, 0, &err);
  if (err != CL_SUCCESS) {
      printf( "clCreateCommandQueue() failed with %d\n", err );
      clReleaseContext(ctx);
      return;
  }

  /* Setup clblas. */
  err = clblasSetup();
  if (err != CL_SUCCESS) {
      printf("clblasSetup() failed with %d\n", err);
      clReleaseCommandQueue(queue);
      clReleaseContext(ctx);
      return;
  }

  /* Prepare OpenCL memory objects and place matrices inside them. */
  bufA = clCreateBuffer(ctx, CL_MEM_READ_ONLY, M * K * sizeof(*A),
                        NULL, &err);
  bufB = clCreateBuffer(ctx, CL_MEM_READ_ONLY, K * N * sizeof(*B),
                        NULL, &err);
  bufC = clCreateBuffer(ctx, CL_MEM_READ_WRITE, M * N * sizeof(*C),
                        NULL, &err);

  err = clEnqueueWriteBuffer(queue, bufA, CL_TRUE, 0,
      M * K * sizeof(*A), A, 0, NULL, NULL);
  err = clEnqueueWriteBuffer(queue, bufB, CL_TRUE, 0,
      K * N * sizeof(*B), B, 0, NULL, NULL);
  err = clEnqueueWriteBuffer(queue, bufC, CL_TRUE, 0,
      M * N * sizeof(*C), C, 0, NULL, NULL);

  cout << "about to call blas" << endl;
  /* Call clblas extended function. Perform gemm for the lower right sub-matrices */
  err = clblasSgemm(order, transA, transB, M - off, N - off, K - off,
                       alpha, bufA, offA, lda,
                       bufB, offB, ldb, beta,
                       bufC, offC, ldc,
                       1, &queue, 0, NULL, &event);
//  float *result = new float[M * N];
  cout << "called blas" << endl;
  if (err != CL_SUCCESS) {
      printf("clblasSgemmEx() failed with %d\n", err);
      ret = 1;
  }
  else {
      /* Wait for calculations to be finished. */
      err = clWaitForEvents(1, &event);

      /* Fetch results of calculations from GPU memory. */
      err = clEnqueueReadBuffer(queue, bufC, CL_TRUE, 0,
                                M * N * sizeof(*result),
                                result, 0, NULL, NULL);
      cout << "got result" << endl;
      /* At this point you will get the result of SGEMM placed in 'result' array. */
      puts("");
//      printResult("clblasSgemmEx result");
  }

//  for(int i = 0; i < M * N; i++ ) {
//    C[i] = result[i];
//  }

  /* Release OpenCL memory objects. */
  clReleaseMemObject(bufC);
  clReleaseMemObject(bufB);
  clReleaseMemObject(bufA);

  /* Finalize work with clblas. */
  clblasTeardown();

  /* Release OpenCL working objects. */
  clReleaseCommandQueue(queue);
  clReleaseContext(ctx);
}

void test1() {
  char transa = 't';
  char transb = 'n';
  int m = 1;
  int n = 7;
  int k = 1;
  float alpha = 1;
  int lda = 1;
  int ldb = 1;
//  int ldb = k;
  float beta = 0;
  int ldc = 1;

  // [1 x 1] [1 x 7] => [1 x 7]

  int Arows = m;
  int Acols = k;
  int Brows = k;
  int Bcols = n;
  int Crows = m;
  int Ccols = n;

  float *A = new float[m * k];
  float *B = new float[k * n];
  float *C = new float[m * n];
  A[0] = 0.5;
  for(int i = 0; i < 7; i++) {
    B[i] = 1.0f / (float)(1.0f + i) - 0.5f;
  }

  for(int i = 0; i < 7; i++) {
    C[i] = 0.0f;
  }
  float *Cours = new float[m * n];
  for(int i = 0; i < 7; i++) {
    Cours[i] = A[0] * B[i];
    cout << "Cours[" << i << "]=" << Cours[i] << endl;
  }

  float *clout = new float[m * n];
  clgemm(transa, transb, m, n, k, alpha, A, lda,
     B, ldb, beta, C, ldc, clout);
  for(int i = 0; i < 7; i++) {
    cout << "clout[" << i << "]=" << clout[i] << endl;
  }
}

int main(int argc, char *argv[]) {
  clewInit();
  test1();
  return 0;
}

output:

Cours[0]=0.25
Cours[1]=0
Cours[2]=-0.0833333
Cours[3]=-0.125
Cours[4]=-0.15
Cours[5]=-0.166667
Cours[6]=-0.178571
clgemm 1
clgemm 2
got platforms
about to call blas
called blas
got result

clout[0]=0.25
clout[1]=0
clout[2]=-0.0833333
clout[3]=-0.125
clout[4]=0
clout[5]=-0.166667
clout[6]=-0.15
hughperkins commented 8 years ago

Seems I can workaround it by just changing 'transA' from 't' to 'n', since k is 1 anyway, so the result is the same (or would be, if there wasnt an issue when using transA == 't'

Addressed in a548012