安装测试

1
2
3
conda install numba
conda install cudatoolkit
python cuda_start_test.py
  • 测试代码
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import numpy as np

from numba import cuda, float32

# Controls threads per block and shared memory usage.
# The computation will be done on blocks of TPBxTPB elements.
TPB = 16

@cuda.jit
def fast_matmul(A, B, C):
    # Define an array in the shared memory
    # The size and type of the arrays must be known at compile time
    sA = cuda.shared.array(shape=(TPB, TPB), dtype=float32)
    sB = cuda.shared.array(shape=(TPB, TPB), dtype=float32)

    x, y = cuda.grid(2)

    tx = cuda.threadIdx.x
    ty = cuda.threadIdx.y
    bpg = cuda.gridDim.x    # blocks per grid

    if x >= C.shape[0] and y >= C.shape[1]:
        # Quit if (x, y) is outside of valid C boundary
        return

    # Each thread computes one element in the result matrix.
    # The dot product is chunked into dot products of TPB-long vectors.
    tmp = 0.
    for i in range(bpg):
        # Preload data into shared memory
        sA[tx, ty] = A[x, ty + i * TPB]
        sB[tx, ty] = B[tx + i * TPB, y]

        # Wait until all threads finish preloading
        cuda.syncthreads()

        # Computes partial product on the shared memory
        for j in range(TPB):
            tmp += sA[tx, j] * sB[j, ty]

        # Wait until all threads finish computing
        cuda.syncthreads()

    C[x, y] = tmp

@cuda.jit
def matmul(A, B, C):
    """Perform square matrix multiplication of C = A * B
    """
    i, j = cuda.grid(2)
    if i < C.shape[0] and j < C.shape[1]:
        tmp = 0.
        for k in range(A.shape[1]):
            tmp += A[i, k] * B[k, j]
        C[i, j] = tmp


x=np.random.rand(10000,10000)
y=np.random.rand(10000,10000)
z=np.random.rand(10000,10000)
print("xxxxx")
for i in range(100):
    print(i)
    fast_matmul(x,y,z)
print(z)

cuda基础概念

  • threadIdx

  • blockIdx

  • blockDim

  • gridDim

  • grid

cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x

  • gridsize cuda.blockDim.x * cuda.gridDim.x

cuda_dim.jpg.jpeg

grid(8, 4, 1), block(8, 2, 1)

1
2
3
4
5
6
7
__global__ void vector_add(float* vec1, float* vec2, float* vecres, int length) {  
    // 在第几个块中 * 块的大小 + 块中的x, y维度(几行几列)  
    int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x;  
    if (tid < length) {  
        vecres[tid] = vec1[tid] + vec2[tid];  
    }  
}  
1
2
3
dim3 griddim(1, 2);
dim3 blockdim(3, 4);
foo<<<griddim, blockdim>>>(aryA, aryB);
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
@cuda.jit('int32(int32, int32)', device=True)
def bar(a, b):
    ...
# inline 只用被其他cuda函数调用    
@cuda.jit('int32(int32, int32)', device=True, inline=True)
def bar_forced_inline(a, b):
    ...

@cuda.jit('void(int32[:], int32[:], int32[:])')
def use_bar(aryA, aryB, aryOut):
    i = cuda.grid(1) # global position of the thread for a 1D grid.
    aryOut[i] = bar(aryA[i], aryB[i])

@cuda.jit('void(int32[:], int32[:])')
def foo(aryA, aryB):
  ...

#autojit, Function signature is not needed as this will capture the type at call time. Each signature of the kernel is cached for future use.
@cuda.autojit
def foo(aryA, aryB):
  ...

griddim = 1, 2
blockdim = 3, 4
foo[griddim, blockdim](aryA, aryB)

Writing CUDA-Python

Writing CUDA Kernels

NVIDIA CUDA Driver API

grid、block、thread的关系及thread索引的计算