Matrix-vector multiplications using OpenCL

13,167

Solution 1

To increase GPU efficiency in this case, more work-items are needed (one per output value is not enough) and the compute/memory access ratio should be higher (i.e. reuse values multiple times when possible).

I have written a few pages on this problem some time ago, if you are interested: GPU matrix-vector product.

Solution 2

Have you tried using local memory for vectorB? Each element is read by all work items, so it makes sense to read it from local. I hard-coded the local memory size to 8192 below, but you can play around with the number yourself. (8192 floats being the max for opencl 1.1/1.2)

Also, try using a work group size a multiple of 16 (64 or 128 should work well) for GPU if you can.

__kernel void matrixVectorMul(__global float* resultVector,
    __global float* matrixA,
    __global float* vectorB, 
    int width_A)
{
    int tx = get_global_id(0);
    __local float vectB[4096*2];

    event_t copy_event = async_work_group_copy(vectB, vectorB, 4096*2, 0);
    wait_group_events(1,copy_event);

    float value = 0;
    for (unsigned int k = 0; k < width_A; ++k) {
        value += matrixA[tx * width_A + k] * vectB[k];
    }

    resultVector[tx] = value;
}
Share:
13,167
int
Author by

int

Updated on July 21, 2022

Comments

  • int
    int almost 2 years

    I have 2 different programs.

    First makes matrix-matrix multiplication using opencl. On my GPU it produces much better results, then on host CPU (0.2 seconds vs 18 seconds, for example).

    Second makes matrix-vector multiplication using opencl, and it works on GPU slightly slower, then on host CPU.

    What are the reasons?

    Here is kernel

    __kernel void matrixVectorMul(__global float* resultVector,
        __global float* matrixA,
        __global float* vectorB, 
        int width_A)
    {
        int tx = get_global_id(0); 
    
        float value = 0;
        for (unsigned int k = 0; k < width_A; ++k) {
            value += matrixA[tx * width_A + k] * vectorB[k];
        }
    
        resultVector[tx] = value;
    }
    

    And host code

    #include <stdlib.h>
    #define __CL_ENABLE_EXCEPTIONS
    #include "cl.hpp"
    #include <fstream>
    #include <iostream>
    #include <time.h>
    #include <cmath>
    
    #define LOCAL_SIZE 512
    #define WIDTH_A (4096*2)
    #define HEIGHT_A (4096*2)
    
    float *matrix_A;
    float *vector_B;
    float *result_vector;
    float *result_vector_host;
    
    void randomInit(float *data, int size) {
        for (unsigned int i = 0; i < size; ++i)
            data[i] = rand() / (float)RAND_MAX;
    }
    
    void GenerateTestData() {
        srand((unsigned int)time(NULL));    
    
        unsigned int size_A = WIDTH_A * HEIGHT_A;
        matrix_A = new float[size_A];
    
        vector_B = new float[WIDTH_A];
    
        randomInit(matrix_A, size_A);
        randomInit(vector_B, WIDTH_A);
    
        result_vector = new float[WIDTH_A];
        result_vector_host = new float[WIDTH_A];
    }
    
    void PerformCalculationOnDevice(cl::Device device) {
        clock_t start_t, end_t;
        start_t = clock();
        std::vector<cl::Device> contextDevices;
        contextDevices.push_back(device);
        cl::Context context(contextDevices);
    
        cl::CommandQueue queue(context, device);
    
        std::fill_n(result_vector, WIDTH_A, 0);
    
        cl::Buffer cl_matrix_A = cl::Buffer(context, CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR, WIDTH_A * HEIGHT_A * sizeof(float), matrix_A);
        cl::Buffer cl_vector_B = cl::Buffer(context, CL_MEM_READ_ONLY|CL_MEM_COPY_HOST_PTR, WIDTH_A * sizeof(float), vector_B);
        cl::Buffer cl_result_vector = cl::Buffer(context, CL_MEM_WRITE_ONLY|CL_MEM_COPY_HOST_PTR, WIDTH_A * sizeof(float), result_vector);
        end_t = clock();
        std::cout << "Context, queue, buffers " << (float)(end_t - start_t) / CLOCKS_PER_SEC << std::endl;
    
        std::ifstream sourceFile("MatrixVectorMultiplicationKernel.cl");
        std::string sourceCode(std::istreambuf_iterator<char>(sourceFile),(std::istreambuf_iterator<char>()));
    
        cl::Program::Sources source(1, std::make_pair(sourceCode.c_str(), sourceCode.length()+1));
        cl::Program program = cl::Program(context, source);
        program.build(contextDevices);
        cl::Kernel kernel(program, "matrixVectorMul");
    
        int iArg = 0;
        kernel.setArg(iArg++, cl_result_vector);
        kernel.setArg(iArg++, cl_matrix_A);
        kernel.setArg(iArg++, cl_vector_B);
        kernel.setArg(iArg++, WIDTH_A);
    
        start_t = clock();
        queue.enqueueNDRangeKernel(kernel, cl::NullRange, cl::NDRange(HEIGHT_A), cl::NDRange(LOCAL_SIZE));
        queue.finish();
    
        queue.enqueueReadBuffer(cl_result_vector, CL_TRUE, 0, WIDTH_A * sizeof(float), result_vector);
        end_t = clock();
        std::cout << "enqueueNDRangeKernel and enqueueReadBuffer " << (float)(end_t - start_t) / CLOCKS_PER_SEC << std::endl;
    }
    
    void PerformCalculationOnHost() {
        float tmp;
        for(int row_A = 0; row_A < HEIGHT_A; row_A++) {
            tmp = 0;
            for(int col_A = 0; col_A < WIDTH_A; col_A++) {
                tmp += matrix_A[row_A * WIDTH_A + col_A] * vector_B[col_A];
            }
            result_vector_host[row_A] = tmp;
        }
    }
    
    int main(int argc, char** argv) {
        GenerateTestData();
    
        std::vector<cl::Platform> platforms;
        cl::Platform::get(&platforms);
        std::vector<cl::Device> devices;
        clock_t start_t = clock();
        for (unsigned int iPlatform=0; iPlatform<platforms.size(); iPlatform++) {
            platforms[iPlatform].getDevices(CL_DEVICE_TYPE_ALL, &devices);
            for (unsigned int iDevice=0; iDevice<devices.size(); iDevice++) {
                try {
                    PerformCalculationOnDevice(devices[iDevice]);
                } catch (cl::Error error) {
                    std::cout << error.what() << "(" << error.err() << ")" << std::endl;   
                }
            }
        }
        clock_t end_t = clock();
        std::cout << "Device: " << (float)(end_t - start_t) / CLOCKS_PER_SEC << " seconds" << std::endl;
        start_t = clock();
        PerformCalculationOnHost();
        end_t = clock();
        std::cout << "Host: " << (float)(end_t - start_t) / CLOCKS_PER_SEC << " seconds" << std::endl;
        int errors = 0;
        float mean_deviation = 0;
        FILE *f, *f_host;
        f = fopen("device_result", "w");
        f_host = fopen("host_result", "w");
        for(int i = 0; i < WIDTH_A; i++) {
                if(fabs(result_vector[i] - result_vector_host[i]) > 1E-3) {
                    errors++;
                }
                fprintf(f, "%.2f\n", result_vector[i]);
                fprintf(f_host, "%.2f\n", result_vector_host[i]);
                mean_deviation += fabs(result_vector[i] - result_vector_host[i]);
        }
        fclose(f); fclose(f_host);
        mean_deviation /= WIDTH_A;
        std::cout << "Errors = " << errors << std::endl;
        std::cout << "Mean deviation = " << mean_deviation << std::endl;
    
        delete[](matrix_A);
        delete[](vector_B);
        delete[](result_vector);
        delete[](result_vector_host);
        return 0;
    }
    

    And when i run it, i get following results

    Context, queue, buffers 0.45
    enqueueNDRangeKernel and enqueueReadBuffer 1.31
    Device: 1.79 seconds
    Host: 1.42 seconds
    Errors = 0
    Mean deviation = 8.78572e-05
    
  • nat chouf
    nat chouf about 11 years
    One idea to speed this up, would be to compute simultaneously the product of the same matrix with multiple vectors.