Allocate more shared memory to attention kernel (#1154)

This commit is contained in:
Antoni Baum
2023-09-26 22:27:13 -07:00
committed by GitHub
parent 03ffd0a022
commit cf5cb1e33e
7 changed files with 87 additions and 3 deletions

View File

@ -0,0 +1,14 @@
int get_device_attribute(
int attribute,
int device_id)
{
int device, value;
if (device_id < 0) {
cudaGetDevice(&device);
}
else {
device = device_id;
}
cudaDeviceGetAttribute(&value, static_cast<cudaDeviceAttr>(attribute), device);
return value;
}