19#ifndef OPM_GPUISTL_GPUOWNEROVERLAPCOPY_HPP
20#define OPM_GPUISTL_GPUOWNEROVERLAPCOPY_HPP
22#include <dune/istl/owneroverlapcopy.hh>
41template<
class field_type,
class OwnerOverlapCopyCommunicationType>
44 using X = GpuVector<field_type>;
79 void dot(
const X& x,
const X& y, field_type& output)
const
93 field_type
norm(
const X& x)
const
95 auto xDotX = field_type(0);
99 return std::sqrt(xDotX);
119template <
class field_type,
int block_size,
class OwnerOverlapCopyCommunicationType>
123 using X = GpuVector<field_type>;
126 :
GPUSender<field_type, OwnerOverlapCopyCommunicationType>(cpuOwnerOverlapCopy)
134 auto sourceAsDuneVector = source.template asDuneBlockVector<block_size>();
135 auto destAsDuneVector = dest.template asDuneBlockVector<block_size>();
137 dest.copyFromHost(destAsDuneVector);
141 void initIndexSet()
const override
146 std::vector<int> indicesCopyOnCPU;
147 std::vector<int> indicesOwnerCPU;
148 for (
const auto& index : pis) {
149 if (index.local().attribute() == Dune::OwnerOverlapCopyAttributeSet::copy) {
150 for (
int component = 0; component < block_size; ++component) {
151 indicesCopyOnCPU.push_back(index.local().local() * block_size + component);
155 if (index.local().attribute() == Dune::OwnerOverlapCopyAttributeSet::owner) {
156 for (
int component = 0; component < block_size; ++component) {
157 indicesOwnerCPU.push_back(index.local().local() * block_size + component);
162 this->
m_indicesCopy = std::make_unique<GpuVector<int>>(indicesCopyOnCPU);
163 this->
m_indicesOwner = std::make_unique<GpuVector<int>>(indicesOwnerCPU);
175template <
class field_type,
int block_size,
class OwnerOverlapCopyCommunicationType>
179 using X = GpuVector<field_type>;
182 :
GPUSender<field_type, OwnerOverlapCopyCommunicationType>(cpuOwnerOverlapCopy)
188 OPM_ERROR_IF(&source != &dest,
"The provided GpuVectors' address did not match");
192 dest.prepareSendBuf(*m_GPUSendBuf, *m_commpairIndicesOwner);
196 std::vector<MPI_Request> sendRequests(m_messageInformation.size());
197 std::vector<MPI_Request> recvRequests(m_messageInformation.size());
198 std::vector<int> processMap(m_messageInformation.size());
199 size_t numberOfRealRecvRequests = 0;
201 using const_iterator =
typename InformationMap::const_iterator;
202 const const_iterator end = m_messageInformation.end();
206 for (const_iterator info = m_messageInformation.begin(); info != end; ++info, ++i) {
207 processMap[i]=info->first;
208 if (info->second.second.m_size) {
209 MPI_Irecv(m_GPURecvBuf->data()+info->second.second.m_start,
214 this->m_cpuOwnerOverlapCopy.communicator(),
216 numberOfRealRecvRequests += 1;
219 recvRequests[i] = MPI_REQUEST_NULL;
226 for (const_iterator info = m_messageInformation.begin(); info != end; ++info, ++i) {
227 if (info->second.first.m_size) {
228 MPI_Issend(m_GPUSendBuf->data()+info->second.first.m_start,
233 this->m_cpuOwnerOverlapCopy.communicator(),
236 sendRequests[i] = MPI_REQUEST_NULL;
240 int finished = MPI_UNDEFINED;
242 for (
size_t i = 0; i < numberOfRealRecvRequests; i++) {
243 status.MPI_ERROR=MPI_SUCCESS;
244 MPI_Waitany(m_messageInformation.size(), recvRequests.data(), &finished, &status);
246 if (status.MPI_ERROR!=MPI_SUCCESS) {
247 OPM_THROW(std::runtime_error,
248 fmt::format(
"MPI_Error occurred while rank {} received a message from rank {}",
249 rank, processMap[finished]));
252 MPI_Status recvStatus;
253 for (
size_t i = 0; i < m_messageInformation.size(); i++) {
254 if (MPI_SUCCESS != MPI_Wait(&sendRequests[i], &recvStatus)) {
255 OPM_THROW(std::runtime_error,
256 fmt::format(
"MPI_Error occurred while rank {} sent a message from rank {}",
257 rank, processMap[finished]));
262 dest.syncFromRecvBuf(*m_GPURecvBuf, *m_commpairIndicesCopy);
266 mutable std::unique_ptr<GpuVector<int>> m_commpairIndicesCopy;
267 mutable std::unique_ptr<GpuVector<int>> m_commpairIndicesOwner;
268 mutable std::unique_ptr<GpuVector<field_type>> m_GPUSendBuf;
269 mutable std::unique_ptr<GpuVector<field_type>> m_GPURecvBuf;
271 struct MessageInformation
273 MessageInformation() : m_start(0), m_size(0) {}
274 MessageInformation(
size_t start,
size_t size) : m_start(start), m_size(size) {}
279 using InformationMap = std::map<int,std::pair<MessageInformation,MessageInformation> >;
280 mutable InformationMap m_messageInformation;
281 using IM = std::map<int,std::pair<std::vector<int>,std::vector<int> > >;
284 constexpr static int m_commTag = 0;
286 void buildCommPairIdxs()
const
289 std::vector<int> commpairIndicesCopyOnCPU;
290 std::vector<int> commpairIndicesOwnerCPU;
292 for (
const auto& process : ri) {
293 m_im[process.first] = std::pair(std::vector<int>(), std::vector<int>());
294 for (
int send = 0; send < 2; ++send) {
295 auto remoteEnd = send ? process.second.first->end()
296 : process.second.second->end();
297 auto remote = send ? process.second.first->begin()
298 : process.second.second->begin();
300 while (remote != remoteEnd) {
301 if (send ? (remote->localIndexPair().local().attribute() == 1)
302 : (remote->attribute() == 1)) {
304 m_im[process.first].first.push_back(remote->localIndexPair().local().local());
307 m_im[process.first].second.push_back(remote->localIndexPair().local().local());
317 for (
auto it = m_im.begin(); it != m_im.end(); it++) {
318 int noSend = it->second.first.size();
319 int noRecv = it->second.second.size();
321 if (noSend + noRecv > 0) {
322 m_messageInformation.insert(
323 std::make_pair(it->first,
324 std::make_pair(MessageInformation(
325 sendBufIdx * block_size,
326 noSend * block_size *
sizeof(field_type)),
328 recvBufIdx * block_size,
329 noRecv * block_size *
sizeof(field_type)))));
331 for (
int x = 0; x < noSend; x++) {
332 for (
int bs = 0; bs < block_size; bs++) {
333 commpairIndicesOwnerCPU.push_back(it->second.first[x] * block_size + bs);
336 for (
int x = 0; x < noRecv; x++) {
337 for (
int bs = 0; bs < block_size; bs++) {
338 commpairIndicesCopyOnCPU.push_back(it->second.second[x] * block_size + bs);
341 sendBufIdx += noSend;
342 recvBufIdx += noRecv;
346 m_commpairIndicesCopy = std::make_unique<GpuVector<int>>(commpairIndicesCopyOnCPU);
347 m_commpairIndicesOwner = std::make_unique<GpuVector<int>>(commpairIndicesOwnerCPU);
349 m_GPUSendBuf = std::make_unique<GpuVector<field_type>>(sendBufIdx * block_size);
350 m_GPURecvBuf = std::make_unique<GpuVector<field_type>>(recvBufIdx * block_size);
353 void initIndexSet()
const override
358 std::vector<int> indicesCopyOnCPU;
359 std::vector<int> indicesOwnerCPU;
360 for (
const auto& index : pis) {
361 if (index.local().attribute() == Dune::OwnerOverlapCopyAttributeSet::copy) {
362 for (
int component = 0; component < block_size; ++component) {
363 indicesCopyOnCPU.push_back(index.local().local() * block_size + component);
367 if (index.local().attribute() == Dune::OwnerOverlapCopyAttributeSet::owner) {
368 for (
int component = 0; component < block_size; ++component) {
369 indicesOwnerCPU.push_back(index.local().local() * block_size + component);
374 this->
m_indicesCopy = std::make_unique<GpuVector<int>>(indicesCopyOnCPU);
375 this->
m_indicesOwner = std::make_unique<GpuVector<int>>(indicesOwnerCPU);
393template <
class field_type,
int block_size,
class OwnerOverlapCopyCommunicationType>
397 using X = GpuVector<field_type>;
405 m_sender->copyOwnerToAll(source, dest);
408 void dot(
const X& x,
const X& y, field_type& output)
const
410 m_sender->dot(x, y, output);
415 return m_sender->norm(x);
420 m_sender->project(x);
424 std::shared_ptr<GPUSender<field_type, OwnerOverlapCopyCommunicationType>> m_sender;
Derived class of GPUSender that handles MPI made with CUDA aware MPI The copOwnerToAll function uses ...
Definition: GpuOwnerOverlapCopy.hpp:177
void copyOwnerToAll(const X &source, X &dest) const override
copyOwnerToAll will copy source to the CPU, then call OwnerOverlapCopyCommunicationType::copyOwnerToA...
Definition: GpuOwnerOverlapCopy.hpp:186
GPUAwareMPISender(const OwnerOverlapCopyCommunicationType &cpuOwnerOverlapCopy)
Definition: GpuOwnerOverlapCopy.hpp:181
Derived class of GPUSender that handles MPI calls that should NOT use GPU direct communicatoin The im...
Definition: GpuOwnerOverlapCopy.hpp:121
void copyOwnerToAll(const X &source, X &dest) const override
copyOwnerToAll will copy source to the CPU, then call OwnerOverlapCopyCommunicationType::copyOwnerToA...
Definition: GpuOwnerOverlapCopy.hpp:130
GPUObliviousMPISender(const OwnerOverlapCopyCommunicationType &cpuOwnerOverlapCopy)
Definition: GpuOwnerOverlapCopy.hpp:125
GPUSender is a wrapper class for classes which will implement copOwnerToAll This is implemented with ...
Definition: GpuOwnerOverlapCopy.hpp:42
GpuVector< field_type > X
Definition: GpuOwnerOverlapCopy.hpp:44
std::unique_ptr< GpuVector< int > > m_indicesCopy
Definition: GpuOwnerOverlapCopy.hpp:108
const OwnerOverlapCopyCommunicationType & m_cpuOwnerOverlapCopy
Definition: GpuOwnerOverlapCopy.hpp:109
void project(X &x) const
project will project x to the owned subspace
Definition: GpuOwnerOverlapCopy.hpp:66
virtual void copyOwnerToAll(const X &source, X &dest) const =0
copyOwnerToAll will copy source to the CPU, then call OwnerOverlapCopyCommunicationType::copyOwnerToA...
std::unique_ptr< GpuVector< int > > m_indicesOwner
Definition: GpuOwnerOverlapCopy.hpp:107
virtual ~GPUSender()=default
virtual void initIndexSet() const =0
void dot(const X &x, const X &y, field_type &output) const
dot will carry out the dot product between x and y on the owned indices, then sum up the result acros...
Definition: GpuOwnerOverlapCopy.hpp:79
field_type norm(const X &x) const
norm computes the l^2-norm of x across processes.
Definition: GpuOwnerOverlapCopy.hpp:93
GPUSender(const OwnerOverlapCopyCommunicationType &cpuOwnerOverlapCopy)
Definition: GpuOwnerOverlapCopy.hpp:46
std::once_flag m_initializedIndices
Definition: GpuOwnerOverlapCopy.hpp:106
CUDA compatiable variant of Dune::OwnerOverlapCopyCommunication.
Definition: GpuOwnerOverlapCopy.hpp:395
GpuOwnerOverlapCopy(std::shared_ptr< GPUSender< field_type, OwnerOverlapCopyCommunicationType > > sender)
Definition: GpuOwnerOverlapCopy.hpp:399
void project(X &x) const
Definition: GpuOwnerOverlapCopy.hpp:418
void dot(const X &x, const X &y, field_type &output) const
Definition: GpuOwnerOverlapCopy.hpp:408
GpuVector< field_type > X
Definition: GpuOwnerOverlapCopy.hpp:397
field_type norm(const X &x) const
Definition: GpuOwnerOverlapCopy.hpp:413
void copyOwnerToAll(const X &source, X &dest) const
Definition: GpuOwnerOverlapCopy.hpp:403
int to_int(std::size_t s)
to_int converts a (on most relevant platforms) 64 bits unsigned size_t to a signed 32 bits signed int
Definition: safe_conversion.hpp:52
Definition: autotuner.hpp:30