HTGS  v2.0
The Hybrid Task Graph Scheduler
CudaMemoryManager.hpp
Go to the documentation of this file.
1 
2 // NIST-developed software is provided by NIST as a public service. You may use, copy and distribute copies of the software in any medium, provided that you keep intact this entire notice. You may improve, modify and create derivative works of the software or any portion of the software, and you may copy and distribute such modifications or works. Modified works should carry a notice stating that you changed the software and should note the date and nature of any such change. Please explicitly acknowledge the National Institute of Standards and Technology as the source of the software.
3 // NIST-developed software is expressly provided "AS IS." NIST MAKES NO WARRANTY OF ANY KIND, EXPRESS, IMPLIED, IN FACT OR ARISING BY OPERATION OF LAW, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT AND DATA ACCURACY. NIST NEITHER REPRESENTS NOR WARRANTS THAT THE OPERATION OF THE SOFTWARE WILL BE UNINTERRUPTED OR ERROR-FREE, OR THAT ANY DEFECTS WILL BE CORRECTED. NIST DOES NOT WARRANT OR MAKE ANY REPRESENTATIONS REGARDING THE USE OF THE SOFTWARE OR THE RESULTS THEREOF, INCLUDING BUT NOT LIMITED TO THE CORRECTNESS, ACCURACY, RELIABILITY, OR USEFULNESS OF THE SOFTWARE.
4 // You are solely responsible for determining the appropriateness of using and distributing the software and you assume all risks associated with its use, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and the unavailability or interruption of operation. This software is not intended to be used in any situation where a failure could cause risk of injury or damage to property. The software developed by NIST employees is not subject to copyright protection within the United States.
5 
13 #ifndef HTGS_CUDAMEMORYMANAGER_HPP
14 #define HTGS_CUDAMEMORYMANAGER_HPP
15 #ifdef USE_CUDA
16 
19 #include <cuda_runtime_api.h>
20 
21 namespace htgs {
33 template<class T>
34 class CudaMemoryManager : public MemoryManager<T> {
35 
36  public:
48  CudaMemoryManager(std::string name,
49  int *cudaIds,
50  size_t memoryPoolSize,
51  std::shared_ptr<IMemoryAllocator < T>> memoryAllocator,
52  MMType type) :
53  MemoryManager<T>(name, memoryPoolSize, memoryAllocator, type) {
54  this->cudaIds = cudaIds;
55  if (type != MMType::Static)
56  {
57  std::cerr << "WARNING: The CudaMemoryManagers " << name << " should use Static memory allocation to avoid "
58  "unnecessary GPU synchronization" << std::endl;
59  }
60  }
61 
68  void initialize() override {
69  cudaSetDevice(this->cudaIds[this->getPipelineId()]);
71  }
72 
77  std::string getName() override {
78  return "Cuda" + MemoryManager<T>::getName();
79  }
80 
85  MemoryManager <T> *copy() override {
86  return new CudaMemoryManager(this->getMemoryManagerName(),
87  this->cudaIds,
88  this->getMemoryPoolSize(),
89  this->getAllocator(),
90  this->getType());
91  }
92 
93  private:
94  int *cudaIds;
95 };
96 
97 }
98 #endif //USE_CUDA
99 #endif //HTGS_CUDAMEMORYMANAGER_HPP
MemoryManager< T > * copy() override
Creates a shallow copy of the CudaMemoryManager.
Definition: CudaMemoryManager.hpp:85
std::string getMemoryManagerName()
Gets the name of the memory manager.
Definition: MemoryManager.hpp:197
Implements a MemoryManager that binds the thread responsible for the MemoryManager to a CUDA GPU prio...
Definition: CudaMemoryManager.hpp:34
size_t memoryPoolSize
The size of the memory pool.
Definition: MemoryManager.hpp:258
MMType
The memory manager types.
Definition: MMType.hpp:38
int * cudaIds
The array of CUDA contexts.
Definition: CudaMemoryManager.hpp:94
virtual size_t getMemoryPoolSize()
Virtual function to gets the size of the MemoryPool.
Definition: MemoryManager.hpp:180
std::string name
The name of the memory manager.
Definition: MemoryManager.hpp:260
void initialize() override
Initializes the CudaMemoryManager by setting which GPU the CudaMemoryManager is repsonsible prior to ...
Definition: CudaMemoryManager.hpp:68
std::string getName() override
Gets the name of the CudaMemoryManager.
Definition: CudaMemoryManager.hpp:77
size_t getPipelineId()
Gets the pipeline ID.
Definition: AnyITask.hpp:367
virtual std::string getName() override
Gets the name of the MemoryManager.
Definition: MemoryManager.hpp:155
void initialize() override
Initializes the MemoryManager, getting the size of the memory pool, and filling the memory pool with ...
Definition: MemoryManager.hpp:87
Processes MemoryData between two ITasks using a memory pool.
Definition: MemoryManager.hpp:47
Abstract class that describes how memory is allocated and freed.
Definition: IMemoryAllocator.hpp:67
MMType getType() const
Gets the memory manager type.
Definition: MemoryManager.hpp:221
MMType type
The memory manager type.
Definition: MemoryManager.hpp:261
virtual std::shared_ptr< IMemoryAllocator< T > > getAllocator()
Gets the allocator that is responsible for allocating and freeing memory for the MemoryPool.
Definition: MemoryManager.hpp:188
CudaMemoryManager(std::string name, int *cudaIds, size_t memoryPoolSize, std::shared_ptr< IMemoryAllocator< T >> memoryAllocator, MMType type)
Creates a CudaMemoryManager.
Definition: CudaMemoryManager.hpp:48
Implements the MemoryManager class that processes MemoryData between two ITasks.
Definition: Bookkeeper.hpp:23
Defines how memory is allocated and freed.