Add cuda-cpp lang id, grammar and problem matchers (#119444)
* Adding a language ID for CUDA * Adding aliases for CUDA C++ * Add problem matcher for cuda-cpp based on nvcc output * Adding a grammar for cuda-cpp * Adding a language configuration file for cuda-cpp * Adding a colorizer test for cuda-cpp * Update the update-grammars.js to pick up cuda-cpp.tmLanguage.json * Add cgmanifest entry for cuda grammar Co-authored-by: Alex Ross <alros@microsoft.com>pull/120306/head
parent
3287cedd34
commit
95420b39f2
|
@ -10,6 +10,8 @@ updateGrammar.update('jeff-hykin/cpp-textmate-grammar', 'syntaxes/c.tmLanguage.j
|
|||
updateGrammar.update('jeff-hykin/cpp-textmate-grammar', 'syntaxes/cpp.tmLanguage.json', './syntaxes/cpp.tmLanguage.json', undefined, 'master', 'source/languages/cpp/');
|
||||
updateGrammar.update('jeff-hykin/cpp-textmate-grammar', 'syntaxes/cpp.embedded.macro.tmLanguage.json', './syntaxes/cpp.embedded.macro.tmLanguage.json', undefined, 'master', 'source/languages/cpp/');
|
||||
|
||||
updateGrammar.update('NVIDIA/cuda-cpp-grammar', 'syntaxes/cuda-cpp.tmLanguage.json', './syntaxes/cuda-cpp.tmLanguage.json', undefined, 'master');
|
||||
|
||||
// `source.c.platform` which is still included by other grammars
|
||||
updateGrammar.update('textmate/c.tmbundle', 'Syntaxes/Platform.tmLanguage', './syntaxes/platform.tmLanguage.json');
|
||||
|
||||
|
|
|
@ -39,6 +39,19 @@
|
|||
],
|
||||
"license": "TextMate Bundle License",
|
||||
"version": "0.0.0"
|
||||
},
|
||||
{
|
||||
"component": {
|
||||
"type": "git",
|
||||
"git": {
|
||||
"name": "NVIDIA/cuda-cpp-grammar",
|
||||
"repositoryUrl": "https://github.com/NVIDIA/cuda-cpp-grammar",
|
||||
"commitHash": "81e88eaec5170aa8585736c63627c73e3589998c"
|
||||
}
|
||||
},
|
||||
"license": "MIT",
|
||||
"version": "0.0.0",
|
||||
"description": "The file syntaxes/cuda-cpp.tmLanguage.json was derived from https://github.com/jeff-hykin/cpp-textmate-grammar, which was derived from https://github.com/atom/language-c, which was originally converted from the C TextMate bundle https://github.com/textmate/c.tmbundle."
|
||||
}
|
||||
],
|
||||
"version": 1
|
||||
|
|
|
@ -50,6 +50,17 @@
|
|||
"cpp"
|
||||
],
|
||||
"configuration": "./language-configuration.json"
|
||||
},
|
||||
{
|
||||
"id": "cuda-cpp",
|
||||
"extensions": [
|
||||
".cu",
|
||||
".cuh"
|
||||
],
|
||||
"aliases": [
|
||||
"CUDA C++"
|
||||
],
|
||||
"configuration": "./language-configuration.json"
|
||||
}
|
||||
],
|
||||
"grammars": [
|
||||
|
@ -71,8 +82,35 @@
|
|||
{
|
||||
"scopeName": "source.c.platform",
|
||||
"path": "./syntaxes/platform.tmLanguage.json"
|
||||
},
|
||||
{
|
||||
"language": "cuda-cpp",
|
||||
"scopeName": "source.cuda-cpp",
|
||||
"path": "./syntaxes/cuda-cpp.tmLanguage.json"
|
||||
}
|
||||
],
|
||||
"problemPatterns": [
|
||||
{
|
||||
"name": "nvcc-location",
|
||||
"regexp": "^(.*)\\((\\d+)\\):\\s+(warning|error):\\s+(.*)",
|
||||
"kind": "location",
|
||||
"file": 1,
|
||||
"location": 2,
|
||||
"severity": 3,
|
||||
"message": 4
|
||||
}
|
||||
],
|
||||
"problemMatchers": [
|
||||
{
|
||||
"name": "nvcc",
|
||||
"owner": "cuda-cpp",
|
||||
"fileLocation": [
|
||||
"relative",
|
||||
"${workspaceFolder}"
|
||||
],
|
||||
"pattern": "$nvcc-location"
|
||||
}
|
||||
],
|
||||
"snippets": [
|
||||
{
|
||||
"language": "c",
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,149 @@
|
|||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
|
||||
#if defined(assert)
|
||||
#undef assert
|
||||
#endif
|
||||
|
||||
#define assert(c) \
|
||||
do { \
|
||||
if(!(c)) { \
|
||||
fprintf(stderr, "Assertion \"%s\" failed. (%s:%d)\n", \
|
||||
#c, __FILE__, __LINE__); \
|
||||
exit(1); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define assertSucceeded(c) \
|
||||
do { \
|
||||
unsigned __tmp = c; \
|
||||
if(__tmp != cudaSuccess) { \
|
||||
fprintf(stderr, "Operation \"%s\" failed with error code %x. (%s:%d)\n", \
|
||||
#c, (__tmp), __FILE__, __LINE__); \
|
||||
exit(__tmp); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define ARRAY_LENGTH(x) (sizeof(x) / sizeof(x[0]))
|
||||
|
||||
constexpr int dataLength = 1 << 24;
|
||||
constexpr int threadsPerBlock = 128;
|
||||
|
||||
typedef unsigned char byte;
|
||||
|
||||
struct TestType
|
||||
{
|
||||
union {
|
||||
struct
|
||||
{
|
||||
unsigned lowHalf;
|
||||
unsigned highHalf;
|
||||
} halfAndHalf;
|
||||
|
||||
unsigned long long whole;
|
||||
} takeYourPick;
|
||||
|
||||
int arr[5];
|
||||
|
||||
struct {
|
||||
char a;
|
||||
char b;
|
||||
} structArr[5];
|
||||
|
||||
float theFloats[2];
|
||||
double theDouble;
|
||||
};
|
||||
|
||||
__global__ void cudaComputeHash(TestType* input, unsigned *results)
|
||||
{
|
||||
int idx = blockIdx.x * threadsPerBlock + threadIdx.x;
|
||||
TestType* myInput = input + idx;
|
||||
|
||||
unsigned myResult = 0;
|
||||
|
||||
myResult += myInput->takeYourPick.halfAndHalf.lowHalf - idx;
|
||||
myResult += myInput->takeYourPick.halfAndHalf.highHalf - idx;
|
||||
|
||||
for(size_t i = 0; i < ARRAY_LENGTH(myInput->arr); i++)
|
||||
{
|
||||
myResult += myInput->arr[i] - idx;
|
||||
}
|
||||
|
||||
for(size_t i = 0; i < sizeof(myInput->structArr); i++)
|
||||
{
|
||||
myResult += reinterpret_cast<byte *>(myInput->structArr)[i] - '0';
|
||||
}
|
||||
|
||||
__syncthreads();
|
||||
|
||||
results[idx] = myResult;
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
int cudaDeviceCount;
|
||||
assertSucceeded(cudaGetDeviceCount(&cudaDeviceCount));
|
||||
assert(cudaDeviceCount > 0);
|
||||
|
||||
assertSucceeded(cudaSetDevice(0));
|
||||
|
||||
TestType* input;
|
||||
unsigned* results;
|
||||
|
||||
assertSucceeded(cudaMallocManaged(&input, sizeof(TestType) * dataLength));
|
||||
assert(!!input);
|
||||
|
||||
for (size_t i = 0; i < dataLength; i++)
|
||||
{
|
||||
input[i].takeYourPick.halfAndHalf.lowHalf = i + 1;
|
||||
input[i].takeYourPick.halfAndHalf.highHalf = i + 3;
|
||||
|
||||
for(size_t j = 0; j < ARRAY_LENGTH(input[i].arr); j++)
|
||||
{
|
||||
input[i].arr[j] = i + j + 2;
|
||||
}
|
||||
|
||||
for(size_t j = 0; j < sizeof(input[i].structArr); j++)
|
||||
{
|
||||
reinterpret_cast<byte *>(input[i].structArr)[j] = '0' + static_cast<char>((i + j) % 10);
|
||||
}
|
||||
|
||||
input[i].theFloats[0] = i + 1;
|
||||
input[i].theFloats[1] = input[i].theFloats[0] / 2;
|
||||
|
||||
input[i].theDouble = input[i].theFloats[1] + 1;
|
||||
}
|
||||
|
||||
assertSucceeded(cudaMallocManaged(reinterpret_cast<void **>(&results), sizeof(unsigned) * dataLength));
|
||||
assert(!!results);
|
||||
|
||||
constexpr int blocks = dataLength / threadsPerBlock;
|
||||
cudaComputeHash<<<blocks, threadsPerBlock>>>(input, results);
|
||||
|
||||
assertSucceeded(cudaDeviceSynchronize());
|
||||
|
||||
const unsigned expectedResult =
|
||||
1 +
|
||||
3 +
|
||||
ARRAY_LENGTH(input[0].arr) * (ARRAY_LENGTH(input[0].arr) - 1) / 2 +
|
||||
ARRAY_LENGTH(input[0].arr) * 2 +
|
||||
sizeof(input[0].structArr) * (sizeof(input[0].structArr) - 1) / 2;
|
||||
|
||||
for (unsigned i = 0; i < dataLength; i++)
|
||||
{
|
||||
if (results[i] != expectedResult){
|
||||
fprintf(stderr, "results[%u] (%u) != %u\n", i, results[i], expectedResult);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
assertSucceeded(cudaFree(input));
|
||||
assertSucceeded(cudaFree(results));
|
||||
|
||||
fprintf(stderr, "Success\n");
|
||||
|
||||
exit(0);
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue