Merge pull request #6380 from scivision/cuda_qc

refactor/doc: cuda tests
pull/6393/head
Jussi Pakkanen 5 years ago committed by GitHub
commit 8dd4e63eb3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 13
      docs/markdown/Contributing.md
  2. 3
      run_project_tests.py
  3. 2
      test cases/cuda/1 simple/prog.cu
  4. 2
      test cases/cuda/10 cuda dependency/c/prog.c
  5. 4
      test cases/cuda/11 cuda dependency (nvcc)/modules/prog.cu
  6. 4
      test cases/cuda/11 cuda dependency (nvcc)/version_reqs/prog.cu
  7. 2
      test cases/cuda/12 cuda dependency (mixed)/kernel.cu
  8. 2
      test cases/cuda/12 cuda dependency (mixed)/prog.cpp
  9. 2
      test cases/cuda/2 split/lib.cu
  10. 4
      test cases/cuda/2 split/main.cpp
  11. 4
      test cases/cuda/2 split/static/main_static.cpp
  12. 2
      test cases/cuda/3 cudamodule/prog.cu
  13. 2
      test cases/cuda/4 shared/main.cu
  14. 2
      test cases/cuda/5 threads/main.cu
  15. 4
      test cases/cuda/6 std/main.cu
  16. 4
      test cases/cuda/7 static vs runtime/main.cu
  17. 4
      test cases/cuda/8 release/main.cu
  18. 4
      test cases/cuda/9 optimize for space/main.cu

@ -127,6 +127,17 @@ project tests. To run all tests, execute `./run_tests.py`. Unit tests
can be run with `./run_unittests.py` and project tests with can be run with `./run_unittests.py` and project tests with
`./run_project_tests.py`. `./run_project_tests.py`.
Subsets of project tests can be selected with
`./run_project_tests.py --only` option. This can save a great deal of
time when only a certain part of Meson is being tested.
For example, a useful and easy contribution to Meson is making
sure the full set of compilers is supported. One could for example test
various Fortran compilers by setting `FC=ifort` or `FC=flang` or similar
with `./run_project_test.py --only fortran`.
Some families of tests require a particular backend to run.
For example, all the CUDA project tests run and pass on Windows via
`./run_project_tests.py --only cuda --backend ninja`
Each project test is a standalone project that can be compiled on its Each project test is a standalone project that can be compiled on its
own. They are all in `test cases` subdirectory. The simplest way to own. They are all in `test cases` subdirectory. The simplest way to
run a single project test is to do something like `./meson.py test\ run a single project test is to do something like `./meson.py test\
@ -225,8 +236,6 @@ those are simple.
- indent 4 spaces, no tabs ever - indent 4 spaces, no tabs ever
- brace always on the same line as if/for/else/function definition - brace always on the same line as if/for/else/function definition
## External dependencies ## External dependencies
The goal of Meson is to be as easily usable as possible. The user The goal of Meson is to be as easily usable as possible. The user

@ -581,7 +581,7 @@ def skippable(suite, test):
# Other framework tests are allowed to be skipped on other platforms # Other framework tests are allowed to be skipped on other platforms
return True return True
def skip_csharp(backend): def skip_csharp(backend) -> bool:
if backend is not Backend.ninja: if backend is not Backend.ninja:
return True return True
if not shutil.which('resgen'): if not shutil.which('resgen'):
@ -671,6 +671,7 @@ def detect_tests_to_run(only: typing.List[str]) -> typing.List[typing.Tuple[str,
('objective c++', 'objcpp', backend not in (Backend.ninja, Backend.xcode) or not have_objcpp_compiler()), ('objective c++', 'objcpp', backend not in (Backend.ninja, Backend.xcode) or not have_objcpp_compiler()),
('fortran', 'fortran', skip_fortran or backend != Backend.ninja), ('fortran', 'fortran', skip_fortran or backend != Backend.ninja),
('swift', 'swift', backend not in (Backend.ninja, Backend.xcode) or not shutil.which('swiftc')), ('swift', 'swift', backend not in (Backend.ninja, Backend.xcode) or not shutil.which('swiftc')),
# CUDA tests on Windows: use Ninja backend: python run_project_tests.py --only cuda --backend ninja
('cuda', 'cuda', backend not in (Backend.ninja, Backend.xcode) or not shutil.which('nvcc')), ('cuda', 'cuda', backend not in (Backend.ninja, Backend.xcode) or not shutil.which('nvcc')),
('python3', 'python3', backend is not Backend.ninja), ('python3', 'python3', backend is not Backend.ninja),
('python', 'python', backend is not Backend.ninja), ('python', 'python', backend is not Backend.ninja),

@ -1,6 +1,6 @@
#include <iostream> #include <iostream>
int main(int argc, char **argv) { int main(void) {
int cuda_devices = 0; int cuda_devices = 0;
std::cout << "CUDA version: " << CUDART_VERSION << "\n"; std::cout << "CUDA version: " << CUDART_VERSION << "\n";
cudaGetDeviceCount(&cuda_devices); cudaGetDeviceCount(&cuda_devices);

@ -1,7 +1,7 @@
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <stdio.h> #include <stdio.h>
int cuda_devices() { int cuda_devices(void) {
int result = 0; int result = 0;
cudaGetDeviceCount(&result); cudaGetDeviceCount(&result);
return result; return result;

@ -2,13 +2,13 @@
#include <cublas_v2.h> #include <cublas_v2.h>
#include <iostream> #include <iostream>
int cuda_devices() { int cuda_devices(void) {
int result = 0; int result = 0;
cudaGetDeviceCount(&result); cudaGetDeviceCount(&result);
return result; return result;
} }
int main() { int main(void) {
int n = cuda_devices(); int n = cuda_devices();
if (n == 0) { if (n == 0) {
std::cout << "No CUDA hardware found. Exiting.\n"; std::cout << "No CUDA hardware found. Exiting.\n";

@ -1,13 +1,13 @@
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <iostream> #include <iostream>
int cuda_devices() { int cuda_devices(void) {
int result = 0; int result = 0;
cudaGetDeviceCount(&result); cudaGetDeviceCount(&result);
return result; return result;
} }
int main() { int main(void) {
std::cout << "Compiled against CUDA version: " << CUDART_VERSION << "\n"; std::cout << "Compiled against CUDA version: " << CUDART_VERSION << "\n";
int runtime_version = 0; int runtime_version = 0;
cudaError_t r = cudaRuntimeGetVersion(&runtime_version); cudaError_t r = cudaRuntimeGetVersion(&runtime_version);

@ -3,6 +3,6 @@
__global__ void kernel (void){ __global__ void kernel (void){
} }
void do_cuda_stuff() { void do_cuda_stuff(void) {
kernel<<<1,1>>>(); kernel<<<1,1>>>();
} }

@ -2,7 +2,7 @@
#include <cublas_v2.h> #include <cublas_v2.h>
#include <iostream> #include <iostream>
void do_cuda_stuff(); void do_cuda_stuff(void);
int cuda_devices(void) { int cuda_devices(void) {
int result = 0; int result = 0;

@ -4,7 +4,7 @@
__global__ void kernel (void){ __global__ void kernel (void){
} }
int do_cuda_stuff() { int do_cuda_stuff(void) {
kernel<<<1,1>>>(); kernel<<<1,1>>>();
printf("Hello, World!\n"); printf("Hello, World!\n");

@ -1,7 +1,7 @@
#include<iostream> #include<iostream>
int do_cuda_stuff(); int do_cuda_stuff(void);
int main(int argc, char **argv) { int main(void) {
return do_cuda_stuff(); return do_cuda_stuff();
} }

@ -1,7 +1,7 @@
#include<iostream> #include<iostream>
int do_cuda_stuff(); int do_cuda_stuff(void);
int main(int argc, char **argv) { int main(void) {
return do_cuda_stuff(); return do_cuda_stuff();
} }

@ -1,6 +1,6 @@
#include <iostream> #include <iostream>
int main(int argc, char **argv) { int main(void) {
int cuda_devices = 0; int cuda_devices = 0;
std::cout << "CUDA version: " << CUDART_VERSION << "\n"; std::cout << "CUDA version: " << CUDART_VERSION << "\n";
cudaGetDeviceCount(&cuda_devices); cudaGetDeviceCount(&cuda_devices);

@ -3,7 +3,7 @@
#include "shared/kernels.h" #include "shared/kernels.h"
int main(int argc, char **argv) { int main(void) {
int cuda_devices = 0; int cuda_devices = 0;
cudaGetDeviceCount(&cuda_devices); cudaGetDeviceCount(&cuda_devices);
if(cuda_devices == 0) { if(cuda_devices == 0) {

@ -3,7 +3,7 @@
#include "shared/kernels.h" #include "shared/kernels.h"
int main(int argc, char **argv) { int main(void) {
int cuda_devices = 0; int cuda_devices = 0;
cudaGetDeviceCount(&cuda_devices); cudaGetDeviceCount(&cuda_devices);
if(cuda_devices == 0) { if(cuda_devices == 0) {

@ -1,14 +1,14 @@
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <iostream> #include <iostream>
auto cuda_devices() { auto cuda_devices(void) {
int result = 0; int result = 0;
cudaGetDeviceCount(&result); cudaGetDeviceCount(&result);
return result; return result;
} }
int main() { int main(void) {
int n = cuda_devices(); int n = cuda_devices();
if (n == 0) { if (n == 0) {
std::cout << "No Cuda hardware found. Exiting.\n"; std::cout << "No Cuda hardware found. Exiting.\n";

@ -1,14 +1,14 @@
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <iostream> #include <iostream>
int cuda_devices() { int cuda_devices(void) {
int result = 0; int result = 0;
cudaGetDeviceCount(&result); cudaGetDeviceCount(&result);
return result; return result;
} }
int main() { int main(void) {
int n = cuda_devices(); int n = cuda_devices();
if (n == 0) { if (n == 0) {
std::cout << "No Cuda hardware found. Exiting.\n"; std::cout << "No Cuda hardware found. Exiting.\n";

@ -1,14 +1,14 @@
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <iostream> #include <iostream>
int cuda_devices() { int cuda_devices(void) {
int result = 0; int result = 0;
cudaGetDeviceCount(&result); cudaGetDeviceCount(&result);
return result; return result;
} }
int main() { int main(void) {
int n = cuda_devices(); int n = cuda_devices();
if (n == 0) { if (n == 0) {
std::cout << "No Cuda hardware found. Exiting.\n"; std::cout << "No Cuda hardware found. Exiting.\n";

@ -1,14 +1,14 @@
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <iostream> #include <iostream>
int cuda_devices() { int cuda_devices(void) {
int result = 0; int result = 0;
cudaGetDeviceCount(&result); cudaGetDeviceCount(&result);
return result; return result;
} }
int main() { int main(void) {
int n = cuda_devices(); int n = cuda_devices();
if (n == 0) { if (n == 0) {
std::cout << "No Cuda hardware found. Exiting.\n"; std::cout << "No Cuda hardware found. Exiting.\n";

Loading…
Cancel
Save