Skip to content
Snippets Groups Projects
Commit 1f8f0a33 authored by cmaffeo2's avatar cmaffeo2
Browse files

Add Resource class

parent 5b80d11f
No related branches found
No related tags found
2 merge requests!5Revert "modifications",!3Fix issue with templated callSync function so that it can be used easily on...
...@@ -19,9 +19,12 @@ Linux workstation with CUDA-compatible GPU (minimum 3.5 compute capability) ...@@ -19,9 +19,12 @@ Linux workstation with CUDA-compatible GPU (minimum 3.5 compute capability)
- CMake >= 3.9 - CMake >= 3.9
- gcc >= 4.9 - gcc >= 4.9
- cuda >= 9.0 (> 11.5 recommended) - cuda >= 9.0 (> 11.5 recommended)
- spdlog >= 1.10.0 (note: this is normally installed to extern/spdlog by running `git submodule update --init` from this directory)
### Build process ### Build process
From the root arbd directory (where this README is found), ensure you have spdlog installed to the extern directory, usually by running `git submodule update --init`.
From the root arbd directory (where this README is found), run: From the root arbd directory (where this README is found), run:
``` ```
## Determine the compute capability of your CUDA-enabled graphics card ## Determine the compute capability of your CUDA-enabled graphics card
...@@ -52,4 +55,4 @@ ARBD2 is being developed by the Aksimentiev group ...@@ -52,4 +55,4 @@ ARBD2 is being developed by the Aksimentiev group
- Christopher Maffeo <mailto:cmaffeo2@illinois.edu> - Christopher Maffeo <mailto:cmaffeo2@illinois.edu>
- Han-yi Chao - Han-yi Chao
Please direct questions or problems to Chris. Please direct questions, problems or suggestions to Chris.
\ No newline at end of file
add_library("lib${PROJECT_NAME}" add_library("lib${PROJECT_NAME}"
ARBDException.cpp ARBDException.cpp
Resource.cu
GPUManager.cpp GPUManager.cpp
ParticlePatch.cpp ParticlePatch.cpp
SimSystem.cpp SimSystem.cpp
......
...@@ -39,6 +39,7 @@ private: ...@@ -39,6 +39,7 @@ private:
// Particle data // Particle data
size_t num_particles; size_t num_particles;
idx_t* global_idx; // global index of particle
size_t* type_ids; size_t* type_ids;
Pos* pos; Pos* pos;
Force* force; Force* force;
......
...@@ -2,44 +2,7 @@ ...@@ -2,44 +2,7 @@
#include <future> #include <future>
#include <iostream> #include <iostream>
#include "ARBDException.h" #include "Resource.h"
/**
* @brief Represents a resource that can store data and perform computations.
*/
struct Resource {
/**
* @brief Enum to specify the type of the resource (e.g., CPU or GPU).
*/
enum ResourceType {CPU, MPI, GPU};
ResourceType type; ///< Type of the resource.
size_t id; ///< ID or any other identifier associated with the resource.
// Q: should this return True for GPU that is attached/assigned to current thread? Currently assuming yes.
HOST DEVICE bool is_local() const {
bool ret = true;
// #ifdef __CUDA_ACC__
// ret = (type == GPU);
// #else
// ret = (type == CPU);
// #endif
LOGWARN("Resource::is_local() not fully implemented; returning {}",ret);
return ret;
};
// HOST DEVICE static bool is_local() { // check if thread/gpu idx matches some global idx };
static Resource Local() {
LOGWARN("Resource::Local() not properly implemented");
#ifdef __CUDA_ACC__
return Resource{ GPU, 0 };
#else
return Resource{ CPU, 0 };
#endif
};
bool operator==(const Resource& other) const { return type == other.type && id == other.id; };
};
// START traits // START traits
// These ugly bits of code help implement SFINAE in C++14 and should likely be removed if a newer standard is adopted // These ugly bits of code help implement SFINAE in C++14 and should likely be removed if a newer standard is adopted
......
#include "Resource.h"
HOST DEVICE size_t current_thread_idx() {
#ifdef USE_MPI
Exception( NotImplementedError, "current_thread_idx() not implemented on GPU" );
int world_rank;
return MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
#else
#ifdef __CUDA_ACC__
Exception( NotImplementedError, "current_thread_idx() not implemented on GPU" );
#else
return 0;
#endif
#endif
}
#pragma once
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "ARBDException.h"
#include "GPUManager.h"
HOST DEVICE size_t current_thread_idx();
/**
* @brief Represents a resource that can store data and perform computations.
*/
struct Resource {
/**
* @brief Enum to specify the type of the resource (e.g., CPU or GPU).
*/
enum ResourceType {CPU, MPI, GPU};
ResourceType type; ///< Type of the resource.
size_t id; ///< Unique identifier associated with the resource.
Resource* parent; ///< Parent resource; nullptr unless type is GPU
/**
* @brief Checks if the resource is running on the calling thread.
*/
HOST DEVICE bool is_running() const {
bool ret = true;
// #ifdef __CUDA_ACC__
// ret = (type == GPU);
// #else
// ret = (type == CPU);
// #endif
return ret;
}
// Q: should this return True for GPU that is attached/assigned to current thread? Currently assuming yes.
HOST DEVICE bool is_local() const {
bool ret = true;
#ifdef __CUDA_ACC__
ret = (type == GPU);
LOGWARN("Resource::is_local() not fully implemented on GPU devices; returning {}",ret);
#else
if (type == GPU && parent != nullptr) {
ret = parent->is_local();
} else {
ret = (current_thread_idx() == id);
}
#endif
return ret;
};
// HOST DEVICE static bool is_local() { // check if thread/gpu idx matches some global idx };
static Resource Local() {
LOGWARN("Resource::Local() not properly implemented");
#ifdef __CUDA_ACC__
return Resource{ GPU, 0 };
#else
return Resource{ CPU, 0 };
#endif
};
bool operator==(const Resource& other) const { return type == other.type && id == other.id; };
};
...@@ -55,3 +55,6 @@ HOST DEVICE inline Vector3_t<size_t> index_to_ijk(size_t idx, const Vector3_t<si ...@@ -55,3 +55,6 @@ HOST DEVICE inline Vector3_t<size_t> index_to_ijk(size_t idx, const Vector3_t<si
return index_to_ijk(idx, n.x, n.y, n.z); return index_to_ijk(idx, n.x, n.y, n.z);
} }
using idx_t = size_t; /* We will sometimes refer to global
* particle index, which may be too
* large to represent via size_t */
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment