1. 安装CUDA驱动
2.安装CUDA TOOLKIT
3.在NVIDIA Corporation文件夹下下载CUDA Samples
4.系统环境变量中添加
- CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.6
- CUDA_SDK_PATH=C:\ProgramData\NVIDIA Corporation\CUDA Samples\v11.6
-
- CUDA_BIN_PATH=%CUDA_PAT%\bin
- CUDA_LIB_PATH=%CUDA_PATH%\lib\x64
- CUDA_SDK_BIN_PATH=%CUDA_SDK_PATH%\bin\win64
- CUDA_SDK_LIB_PATH=%CUDA_SDK_PATH%\common\lib\x64
- CUDA_SKD_PATH=C:\ProgramData\NVIDIA Corporation\CUDA Sample\v11.3
5.在系统环境变量里添加
- %CUDA_BIN_PATH%
- %CUDA_LIB_PATH%
- %CUDA_SDK_BIN_PATH%
- %CUDA_SDK_LIB_path%
6.在cude的文件夹下: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\extras\visual_studio_integration\MSBuildExtensions 将所有文件复制到D:\Program Files (x86)\Microsoft Visual Studio\2019\Professional\MSBuild\Microsoft\VC\v160\BuildCustomizations 文件夹下;
7.VS中新建项目,右键项目,选择生成依赖项,选择生成自定义,在生成自定义中勾选CUDA。
8.将新建的源文件右击,选择属性->常规->项类型,将项类型设置为CUDA C/C++
9.打开项目属性,项目->“属性”->“配置属性”->“VC++目录”->"包含目录,添加包含目录:$(CUDA_PATH)\include
10.打开项目属性,“VC++目录”->“库目录”,添加库目录:$(CUDA_PATH)\lib\x64
11.“配置属性”->“链接器”->“输入”->“附加依赖项”
- cublas.lib
- cuda.lib
- cudadevrt.lib
- cudart.lib
- cudart_static.lib
- OpenCL.lib
12.测试代码
- #include "cuda_runtime.h"
- #include "device_launch_parameters.h"
- #include<iostream>
- #include <stdio.h>
- using namespace std;
- constexpr size_t MAXSIZE = 20;
-
- __global__ void addKernel(int* const c, const int* const b, const int* const a)
- {
- int i = threadIdx.x;
- c[i] = a[i] + b[i];
- }
-
- int main()
- {
- constexpr size_t length = 6;
- int host_a[length] = { 1,2,3,4,5,6 };
- int host_b[length] = { 10,20,30,40,50,60 };
- int host_c[length];
- //为三个向量在GPU上分配显存
- int* dev_a, *dev_b, *dev_c;
- cudaMalloc((void**)&dev_c, length * sizeof(int));
- cudaMalloc((void**)&dev_a, length * sizeof(int));
- cudaMalloc((void**)&dev_b, length * sizeof(int));
- //将主机端的数据拷贝到设备端
- cudaMemcpy(dev_a, host_a, length * sizeof(int), cudaMemcpyHostToDevice);
- cudaMemcpy(dev_b, host_b, length * sizeof(int), cudaMemcpyHostToDevice);
- cudaMemcpy(dev_c, host_c, length * sizeof(int), cudaMemcpyHostToDevice);
- //在GPU上运行核函数,每个线程进行一个元素的计算
- addKernel << <1, length >> > (dev_c, dev_b, dev_a);
- //将设备端的运算结果拷贝回主机端
- cudaMemcpy(host_c, dev_c, length * sizeof(int), cudaMemcpyDeviceToHost);
- //释放显存
- cudaFree(dev_a);
- cudaFree(dev_b);
- cudaFree(dev_c);
- for (int i = 0; i < length; ++i)
- cout << host_c[i] << " ";
- cout << endl;
- getchar();
- system("pause");
- return 0;
- }