Thyra Package Browser (Single Doxygen Collection) Version of the Day
Loading...
Searching...
No Matches
Thyra_EpetraThyraWrappers.cpp
Go to the documentation of this file.
1// @HEADER
2// ***********************************************************************
3//
4// Thyra: Interfaces and Support for Abstract Numerical Algorithms
5// Copyright (2004) Sandia Corporation
6//
7// Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8// license for use of this work by or on behalf of the U.S. Government.
9//
10// Redistribution and use in source and binary forms, with or without
11// modification, are permitted provided that the following conditions are
12// met:
13//
14// 1. Redistributions of source code must retain the above copyright
15// notice, this list of conditions and the following disclaimer.
16//
17// 2. Redistributions in binary form must reproduce the above copyright
18// notice, this list of conditions and the following disclaimer in the
19// documentation and/or other materials provided with the distribution.
20//
21// 3. Neither the name of the Corporation nor the names of the
22// contributors may be used to endorse or promote products derived from
23// this software without specific prior written permission.
24//
25// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36//
37// Questions? Contact Roscoe A. Bartlett (bartlettra@ornl.gov)
38//
39// ***********************************************************************
40// @HEADER
41
43#include "Thyra_DefaultSpmdVectorSpace.hpp"
44#include "Thyra_DefaultSpmdMultiVector.hpp"
45#include "Thyra_DefaultSpmdVector.hpp"
46#include "Thyra_DetachedVectorView.hpp"
47#include "Thyra_DetachedMultiVectorView.hpp"
48#include "Thyra_VectorSpaceFactoryBase.hpp"
49#include "Thyra_ProductVectorSpaceBase.hpp"
50#include "Teuchos_Assert.hpp"
51#include "Teuchos_dyn_cast.hpp"
52
54#ifdef HAVE_MPI
56#endif
57
58#include "Epetra_Map.h"
59#include "Epetra_Comm.h"
60#include "Epetra_SerialComm.h"
61#ifdef HAVE_MPI
62# include "Epetra_MpiComm.h"
63#endif
64#include "Epetra_MultiVector.h"
65#include "Epetra_Vector.h"
66
67//
68// Helpers
69//
70
71
72namespace {
73
74
76unwrapSingleProductVectorSpace(
77 const Teuchos::RCP<const Thyra::VectorSpaceBase<double> > &vs_in
78 )
79{
80 using Teuchos::RCP;
81 using Teuchos::rcp_dynamic_cast;
82 using Thyra::ProductVectorSpaceBase;
83 const RCP<const ProductVectorSpaceBase<double> > pvs =
84 rcp_dynamic_cast<const ProductVectorSpaceBase<double> >(vs_in);
85 if (nonnull(pvs)) {
86 TEUCHOS_ASSERT_EQUALITY( pvs->numBlocks(), 1 );
87 return pvs->getBlock(0);
88 }
89 return vs_in;
90}
91
92
93} // namespace
94
95
96//
97// Implementations of user function
98//
99
100
103{
104 using Teuchos::rcp;
105 using Teuchos::rcp_dynamic_cast;
106 using Teuchos::set_extra_data;
107
109 serialEpetraComm = rcp_dynamic_cast<const Epetra_SerialComm>(epetraComm);
110 if( serialEpetraComm.get() ) {
112 serialComm = rcp(new Teuchos::SerialComm<Ordinal>());
113 set_extra_data( serialEpetraComm, "serialEpetraComm", Teuchos::inOutArg(serialComm) );
114 return serialComm;
115 }
116
117#ifdef HAVE_MPI
118
120 mpiEpetraComm = rcp_dynamic_cast<const Epetra_MpiComm>(epetraComm);
121 if( mpiEpetraComm.get() ) {
123 rawMpiComm = Teuchos::opaqueWrapper(mpiEpetraComm->Comm());
124 set_extra_data( mpiEpetraComm, "mpiEpetraComm", Teuchos::inOutArg(rawMpiComm) );
126 mpiComm = rcp(new Teuchos::MpiComm<Ordinal>(rawMpiComm));
127 return mpiComm;
128 }
129
130#endif // HAVE_MPI
131
132 // If you get here then the failed!
133 return Teuchos::null;
134
135}
136
137
140 const RCP<const Epetra_Map> &epetra_map
141 )
142{
143 using Teuchos::as; using Teuchos::inoutArg;
144
145#ifdef TEUCHOS_DEBUG
147 !epetra_map.get(), std::invalid_argument,
148 "create_VectorSpace::initialize(...): Error!" );
149#endif // TEUCHOS_DEBUG
150
152 create_Comm(Teuchos::rcpFromRef(epetra_map->Comm())).assert_not_null();
153
154 Teuchos::set_extra_data(epetra_map, "epetra_map", inoutArg(comm));
155 const Ordinal localSubDim = epetra_map->NumMyElements();
156
158 defaultSpmdVectorSpace<double>(
159 comm, localSubDim, epetra_map->NumGlobalElements64(),
160 !epetra_map->DistributedGlobal());
161
162 TEUCHOS_ASSERT_EQUALITY(vs->dim(), as<Ordinal>(epetra_map->NumGlobalElements64()));
163 // NOTE: the above assert just checks to make sure that the size of the
164 // Ordinal type can hold the size returned from NumGlobalElemenets64(). A
165 // 64 bit system will always have Ordinal=ptrdiff_t by default which will
166 // always be 64 bit so this should be fine. However, if Ordinal were
167 // defined to only be 32 bit and then this exception could trigger. Because
168 // this assert will only likely trigger in a non-debug build, we will leave
169 // the assert unguarded since it is very cheap to perform.
170 Teuchos::set_extra_data( epetra_map, "epetra_map", inoutArg(vs) );
171
172 return vs;
173}
174
175
178 const RCP<const VectorSpaceBase<double> > &parentSpace,
179 const int dim
180 )
181{
182 using Teuchos::rcp_dynamic_cast;
183#ifdef TEUCHOS_DEBUG
184 TEUCHOS_TEST_FOR_EXCEPT(parentSpace.get()==NULL);
185 Teuchos::dyn_cast<const SpmdVectorSpaceBase<double> >(*parentSpace);
186 TEUCHOS_TEST_FOR_EXCEPT(dim <= 0);
187#endif
188 return parentSpace->smallVecSpcFcty()->createVecSpc(dim);
189}
190
191
194 const RCP<Epetra_Vector> &epetra_v,
195 const RCP<const VectorSpaceBase<double> > &space_in
196 )
197{
198#ifdef TEUCHOS_DEBUG
199 TEUCHOS_TEST_FOR_EXCEPT(space_in.get()==NULL);
200#endif
202 space = Teuchos::rcp_dynamic_cast<const SpmdVectorSpaceBase<double> >(
203 space_in,true);
204 // mfh 06 Dec 2017: This return should not trigger an issue like
205 // #1941, because if epetra_v is NULL on some process but not
206 // others, then that process should not be participating in
207 // collectives with the other processes anyway.
208 if(!epetra_v.get())
209 return Teuchos::null;
210 // New local view of raw data
211 double *localValues;
212 epetra_v->ExtractView( &localValues );
213 // Build the Vector
215 v = Teuchos::rcp(
216 new DefaultSpmdVector<double>(
217 space,
218 Teuchos::arcp(localValues,0,epetra_v->Map().NumMyElements(),false),
219 1
220 )
221 );
222 Teuchos::set_extra_data<RCP<Epetra_Vector> >( epetra_v, "Epetra_Vector",
223 Teuchos::inOutArg(v) );
224 return v;
225}
226
227
230 const RCP<const Epetra_Vector> &epetra_v,
231 const RCP<const VectorSpaceBase<double> > &space_in
232 )
233{
234#ifdef TEUCHOS_DEBUG
235 TEUCHOS_TEST_FOR_EXCEPT(space_in.get()==NULL);
236#endif
238 space = Teuchos::rcp_dynamic_cast<const SpmdVectorSpaceBase<double> >(
239 space_in,true);
240 // mfh 06 Dec 2017: This return should not trigger an issue like
241 // #1941, because if epetra_v is NULL on some process but not
242 // others, then that process should not be participating in
243 // collectives with the other processes anyway.
244 if(!epetra_v.get())
245 return Teuchos::null;
246 // New local view of raw data
247 double *localValues;
248 epetra_v->ExtractView( &localValues );
249 // Build the Vector
251 v = Teuchos::rcp(
252 new DefaultSpmdVector<double>(
253 space,
254 Teuchos::arcp(localValues,0,epetra_v->Map().NumMyElements(),false),
255 1
256 )
257 );
258 Teuchos::set_extra_data<RCP<const Epetra_Vector> >( epetra_v, "Epetra_Vector",
259 Teuchos::inOutArg(v) );
260 return v;
261}
262
263
266 const RCP<Epetra_MultiVector> &epetra_mv,
267 const RCP<const VectorSpaceBase<double> > &range_in,
268 const RCP<const VectorSpaceBase<double> > &domain_in
269 )
270{
271 using Teuchos::rcp_dynamic_cast;
272#ifdef TEUCHOS_DEBUG
273 TEUCHOS_TEST_FOR_EXCEPT(range_in.get()==NULL);
274#endif
276 Teuchos::rcp_dynamic_cast<const SpmdVectorSpaceBase<double> >(
277 unwrapSingleProductVectorSpace(range_in),
278 true
279 );
281 Teuchos::rcp_dynamic_cast<const ScalarProdVectorSpaceBase<double> >(
282 unwrapSingleProductVectorSpace(domain_in),
283 true
284 );
285 // mfh 06 Dec 2017: This return should not trigger an issue like
286 // #1941, because if epetra_mv is NULL on some process but not
287 // others, then that process should not be participating in
288 // collectives with the other processes anyway.
289 if (!epetra_mv.get() )
290 return Teuchos::null;
291 if ( is_null(domain) ) {
292 domain = rcp_dynamic_cast<const ScalarProdVectorSpaceBase<double> >(
293 create_LocallyReplicatedVectorSpace(range,epetra_mv->NumVectors())
294 );
295 }
296 // New local view of raw data
297 double *localValues; int leadingDim;
298 if( epetra_mv->ConstantStride() ) {
299 epetra_mv->ExtractView( &localValues, &leadingDim );
300 }
301 else {
302 TEUCHOS_TEST_FOR_EXCEPT(true); // ToDo: Implement views of non-contiguous mult-vectors!
303 }
304 // Build the MultiVector
306 mv = Teuchos::rcp(
307 new DefaultSpmdMultiVector<double>(
308 range,
309 domain,
310 Teuchos::arcp(localValues,0,leadingDim*epetra_mv->NumVectors(),false),
311 leadingDim
312 )
313 );
314 Teuchos::set_extra_data<RCP<Epetra_MultiVector> >(
315 epetra_mv, "Epetra_MultiVector", Teuchos::inOutArg(mv) );
316 return mv;
317}
318
319
322 const RCP<const Epetra_MultiVector> &epetra_mv,
323 const RCP<const VectorSpaceBase<double> > &range_in,
324 const RCP<const VectorSpaceBase<double> > &domain_in
325 )
326{
327 using Teuchos::rcp_dynamic_cast;
328#ifdef TEUCHOS_DEBUG
329 TEUCHOS_TEST_FOR_EXCEPT(range_in.get()==NULL);
330#endif
332 Teuchos::rcp_dynamic_cast<const SpmdVectorSpaceBase<double> >(
333 unwrapSingleProductVectorSpace(range_in),
334 true
335 );
337 Teuchos::rcp_dynamic_cast<const ScalarProdVectorSpaceBase<double> >(
338 unwrapSingleProductVectorSpace(domain_in),
339 true
340 );
341 // mfh 06 Dec 2017: This return should not trigger an issue like
342 // #1941, because if epetra_mv is NULL on some process but not
343 // others, then that process should not be participating in
344 // collectives with the other processes anyway.
345 if (!epetra_mv.get())
346 return Teuchos::null;
347 if ( is_null(domain) ) {
348 domain = rcp_dynamic_cast<const ScalarProdVectorSpaceBase<double> >(
349 create_LocallyReplicatedVectorSpace(range,epetra_mv->NumVectors())
350 );
351 }
352 // New local view of raw data
353 double *localValues; int leadingDim;
354 if( epetra_mv->ConstantStride() ) {
355 epetra_mv->ExtractView( &localValues, &leadingDim );
356 }
357 else {
358 TEUCHOS_TEST_FOR_EXCEPT(true); // ToDo: Implement views of non-contiguous mult-vectors!
359 }
360 // Build the MultiVector
362 mv = Teuchos::rcp(
363 new DefaultSpmdMultiVector<double>(
364 range,
365 domain,
366 Teuchos::arcp(localValues,0,leadingDim*epetra_mv->NumVectors(),false),
367 leadingDim
368 )
369 );
370 Teuchos::set_extra_data<RCP<const Epetra_MultiVector> >(
371 epetra_mv, "Epetra_MultiVector", Teuchos::inOutArg(mv) );
372 return mv;
373}
374
375
378{
379
380 using Teuchos::rcp;
381 using Teuchos::ptrFromRef;
382 using Teuchos::ptr_dynamic_cast;
384#ifdef HAVE_MPI
385 using Teuchos::MpiComm;
386#endif
387
388 const Ptr<const Teuchos::Comm<Ordinal> > comm = Teuchos::ptrFromRef(comm_in);
389
390 const Ptr<const SerialComm<Ordinal> > serialComm =
391 ptr_dynamic_cast<const SerialComm<Ordinal> >(comm);
392
393 RCP<const Epetra_Comm> epetraComm;
394
395#ifdef HAVE_MPI
396
397 const Ptr<const MpiComm<Ordinal> > mpiComm =
398 ptr_dynamic_cast<const MpiComm<Ordinal> >(comm);
399
400 TEUCHOS_TEST_FOR_EXCEPTION(is_null(mpiComm) && is_null(serialComm),
401 std::runtime_error,
402 "SPMD std::vector space has a communicator that is "
403 "neither a serial comm nor an MPI comm");
404
405 if (nonnull(mpiComm)) {
406 epetraComm = rcp(new Epetra_MpiComm(*mpiComm->getRawMpiComm()()));
407 }
408 else {
409 epetraComm = rcp(new Epetra_SerialComm());
410 }
411
412#else
413
414 TEUCHOS_TEST_FOR_EXCEPTION(is_null(serialComm), std::runtime_error,
415 "SPMD std::vector space has a communicator that is "
416 "neither a serial comm nor an MPI comm");
417
418 epetraComm = rcp(new Epetra_SerialComm());
419
420#endif
421
422 TEUCHOS_TEST_FOR_EXCEPTION(is_null(epetraComm), std::runtime_error,
423 "null communicator created");
424
425 return epetraComm;
426
427}
428
429
432 const VectorSpaceBase<double>& vs_in,
433 const RCP<const Epetra_Comm>& comm)
434{
435
436 using Teuchos::rcpFromRef;
437 using Teuchos::rcpFromPtr;
438 using Teuchos::rcp_dynamic_cast;
439 using Teuchos::ptrFromRef;
440 using Teuchos::ptr_dynamic_cast;
441
442 const Ptr<const VectorSpaceBase<double> > vs_ptr = ptrFromRef(vs_in);
443
444 const Ptr<const SpmdVectorSpaceBase<double> > spmd_vs =
445 ptr_dynamic_cast<const SpmdVectorSpaceBase<double> >(vs_ptr);
446
447 const Ptr<const ProductVectorSpaceBase<double> > &prod_vs =
448 ptr_dynamic_cast<const ProductVectorSpaceBase<double> >(vs_ptr);
449
450 TEUCHOS_TEST_FOR_EXCEPTION( is_null(spmd_vs) && is_null(prod_vs), std::logic_error,
451 "Error, the concrete VectorSpaceBase object of type "
452 +Teuchos::demangleName(typeid(vs_in).name())+" does not support the"
453 " SpmdVectorSpaceBase or the ProductVectorSpaceBase interfaces!" );
454
455 const int numBlocks = (nonnull(prod_vs) ? prod_vs->numBlocks() : 1);
456
457 // Get an array of SpmdVectorBase objects for the blocks
458
459 Array<RCP<const SpmdVectorSpaceBase<double> > > spmd_vs_blocks;
460 if (nonnull(prod_vs)) {
461 for (int block_i = 0; block_i < numBlocks; ++block_i) {
462 const RCP<const SpmdVectorSpaceBase<double> > spmd_vs_i =
463 rcp_dynamic_cast<const SpmdVectorSpaceBase<double> >(
464 prod_vs->getBlock(block_i), true);
465 spmd_vs_blocks.push_back(spmd_vs_i);
466 }
467 }
468 else {
469 spmd_vs_blocks.push_back(rcpFromPtr(spmd_vs));
470 }
471
472 // Find the number of local elements, summed over all blocks
473
474 int myLocalElements = 0;
475 for (int block_i = 0; block_i < numBlocks; ++block_i) {
476 myLocalElements += spmd_vs_blocks[block_i]->localSubDim();
477 }
478
479 // Find the GIDs owned by this processor, taken from all blocks
480
481 int count=0;
482 int blockOffset = 0;
483#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
484 Array<int> myGIDs(myLocalElements);
485#else
486 Array<long long> myGIDs(myLocalElements);
487#endif
488 for (int block_i = 0; block_i < numBlocks; ++block_i) {
489 const RCP<const SpmdVectorSpaceBase<double> > spmd_vs_i = spmd_vs_blocks[block_i];
490 const int lowGIDInBlock = spmd_vs_i->localOffset();
491 const int numLocalElementsInBlock = spmd_vs_i->localSubDim();
492 for (int i=0; i < numLocalElementsInBlock; ++i, ++count) {
493 myGIDs[count] = blockOffset + lowGIDInBlock + i;
494 }
495 blockOffset += spmd_vs_i->dim();
496 }
497
498#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
499 const int globalDim = vs_in.dim();
500#else
501 const long long globalDim = vs_in.dim();
502#endif
503
504 return Teuchos::rcp(
505 new Epetra_Map(globalDim, myLocalElements, myGIDs.getRawPtr(), 0, *comm));
506
507}
508
509// Almost like the above one, but working on an RCP vs as input, we can check for the
510// presence of RCP<const Epetra_Map> in the RCP extra data, to save us time.
513 const RCP<const VectorSpaceBase<double>>& vs,
514 const RCP<const Epetra_Comm>& comm)
515{
516 //
517 // First, try to grab the Epetra_Map straight out of the
518 // RCP since this is the fastest way.
519 //
520 const Ptr<const RCP<const Epetra_Map> >
521 epetra_map_ptr = Teuchos::get_optional_extra_data<RCP<const Epetra_Map> >(
522 vs,"epetra_map");
523 // mfh 06 Dec 2017: This should be consistent over all processes
524 // that participate in v's communicator.
525 if(!is_null(epetra_map_ptr)) {
526 return *epetra_map_ptr;
527 }
528
529 // No luck. We need to call get_Epetra_Map(*vs,comm).
530 TEUCHOS_TEST_FOR_EXCEPTION(comm.is_null(), std::runtime_error,
531 "Error! No RCP Epetra_Map attached to the input vector space RCP, "
532 "and the input comm RCP is null.\n");
533
534 return get_Epetra_Map(*vs,comm);
535}
536
539 const Epetra_Map &map,
540 const RCP<VectorBase<double> > &v
541 )
542{
543 using Teuchos::get_optional_extra_data;
544#ifdef TEUCHOS_DEBUG
546 epetra_vs = create_VectorSpace(Teuchos::rcp(&map,false));
547 THYRA_ASSERT_VEC_SPACES(
548 "Thyra::get_Epetra_Vector(map,v)", *epetra_vs, *v->space() );
549#endif
550 //
551 // First, try to grab the Epetra_Vector straight out of the
552 // RCP since this is the fastest way.
553 //
554 const Ptr<const RCP<Epetra_Vector> >
555 epetra_v_ptr = get_optional_extra_data<RCP<Epetra_Vector> >(
556 v,"Epetra_Vector");
557 // mfh 06 Dec 2017: This should be consistent over all processes
558 // that participate in v's communicator.
559 if(!is_null(epetra_v_ptr)) {
560 return *epetra_v_ptr;
561 }
562 //
563 // The assumption that we (rightly) make here is that if the vector spaces
564 // are compatible, that either the multi-vectors are both in-core or the
565 // vector spaces are both derived from SpmdVectorSpaceBase and have
566 // compatible maps.
567 //
568 const VectorSpaceBase<double> &vs = *v->range();
569 const SpmdVectorSpaceBase<double> *mpi_vs
570 = dynamic_cast<const SpmdVectorSpaceBase<double>*>(&vs);
571 const Ordinal localOffset = ( mpi_vs ? mpi_vs->localOffset() : 0 );
572 const Ordinal localSubDim = ( mpi_vs ? mpi_vs->localSubDim() : vs.dim() );
573 //
574 // Here we will extract a view of the local elements in the underlying
575 // VectorBase object. In most cases, no data will be allocated or copied
576 // and only some small objects will be created and a few virtual functions
577 // will be called so the overhead should be low and fixed.
578 //
579 // Create a *mutable* view of the local elements, this view will be set on
580 // the RCP that is returned. As a result, this view will be relased
581 // when the returned Epetra_Vector is released.
582 //
583 // Note that the input vector 'v' will be remembered through this detached
584 // view!
585 //
587 emvv = Teuchos::rcp(
588 new DetachedVectorView<double>(
589 v
590 ,Range1D(localOffset,localOffset+localSubDim-1)
591 ,true // forceContiguous
592 )
593 );
594 // Create a temporary Epetra_Vector object and give it
595 // the above local view
597 epetra_v = Teuchos::rcp(
598 new Epetra_Vector(
599 ::View // CV
600 ,map // Map
601 ,const_cast<double*>(emvv->values()) // V
602 )
603 );
604 // Give the explict view object to the above Epetra_Vector smart pointer
605 // object. In this way, when the client is finished with the Epetra_Vector
606 // view the destructor from the object in emvv will automatically commit the
607 // changes to the elements in the input v VectorBase object (reguardless of
608 // its implementation). This is truly an elegant result!
609 Teuchos::set_extra_data( emvv, "emvv", Teuchos::inOutArg(epetra_v),
611 // We are done!
612 return epetra_v;
613}
614
615// Same as above, except allows to not pass the map (in case the RCP of v
616// already has an attached RCP<Epetra_Vector>)
619 const RCP<VectorBase<double> > &v,
620 const RCP<const Epetra_Map>& map
621 )
622{
623 //
624 // First, try to grab the Epetra_Vector straight out of the
625 // RCP since this is the fastest way.
626 //
627 const Ptr<const RCP<Epetra_Vector> >
628 epetra_v_ptr = Teuchos::get_optional_extra_data<RCP<Epetra_Vector> >(
629 v,"Epetra_Vector");
630 // mfh 06 Dec 2017: This should be consistent over all processes
631 // that participate in v's communicator.
632 if(!is_null(epetra_v_ptr)) {
633 return *epetra_v_ptr;
634 }
635
636 // No luck. We need to call get_Epetra_Vector(*map,v).
637 TEUCHOS_TEST_FOR_EXCEPTION(map.is_null(), std::runtime_error,
638 "Error! No RCP Epetra_Vector attached to the input vector RCP, "
639 "and the input map RCP is null.\n");
640
641 return get_Epetra_Vector(*map,v);
642}
643
646 const Epetra_Map &map,
647 const RCP<const VectorBase<double> > &v
648 )
649{
650 using Teuchos::get_optional_extra_data;
651#ifdef TEUCHOS_DEBUG
653 epetra_vs = create_VectorSpace(Teuchos::rcp(&map,false));
654 THYRA_ASSERT_VEC_SPACES(
655 "Thyra::get_Epetra_Vector(map,v)", *epetra_vs, *v->space() );
656#endif
657 //
658 // First, try to grab the Epetra_Vector straight out of the
659 // RCP since this is the fastest way.
660 //
661 const Ptr<const RCP<const Epetra_Vector> >
662 epetra_v_ptr = get_optional_extra_data<RCP<const Epetra_Vector> >(
663 v,"Epetra_Vector");
664 // mfh 06 Dec 2017: This should be consistent over all processes
665 // that participate in v's communicator.
666 if(!is_null(epetra_v_ptr))
667 return *epetra_v_ptr;
668 const Ptr<const RCP<Epetra_Vector> >
669 epetra_nonconst_v_ptr = get_optional_extra_data<RCP<Epetra_Vector> >(
670 v,"Epetra_Vector");
671 // mfh 06 Dec 2017: This should be consistent over all processes
672 // that participate in v's communicator.
673 if(!is_null(epetra_nonconst_v_ptr))
674 return *epetra_nonconst_v_ptr;
675 //
676 // Same as above function except as stated below
677 //
678 const VectorSpaceBase<double> &vs = *v->range();
679 const SpmdVectorSpaceBase<double> *mpi_vs
680 = dynamic_cast<const SpmdVectorSpaceBase<double>*>(&vs);
681 const Ordinal localOffset = ( mpi_vs ? mpi_vs->localOffset() : 0 );
682 const Ordinal localSubDim = ( mpi_vs ? mpi_vs->localSubDim() : vs.dim() );
683 // Get an explicit *non-mutable* view of all of the elements in the multi
684 // vector. Note that 'v' will be remembered by this view!
686 evv = Teuchos::rcp(
687 new ConstDetachedVectorView<double>(
688 v
689 ,Range1D(localOffset,localOffset+localSubDim-1)
690 ,true // forceContiguous
691 )
692 );
693 // Create a temporary Epetra_Vector object and give it
694 // the above view
696 epetra_v = Teuchos::rcp(
697 new Epetra_Vector(
698 ::View // CV
699 ,map // Map
700 ,const_cast<double*>(evv->values()) // V
701 )
702 );
703 // This next statement will cause the destructor to free the view if
704 // needed (see above function). Since this view is non-mutable,
705 // only a releaseDetachedView(...) and not a commit will be called.
706 // This is the whole reason there is a seperate implementation for
707 // the const and non-const cases.
708 Teuchos::set_extra_data( evv, "evv", Teuchos::inOutArg(epetra_v),
710 // We are done!
711 return epetra_v;
712}
713
714// Same as above, except allows to not pass the map (in case the RCP of v
715// already has an attached RCP<Epetra_Vector>)
718 const RCP<const VectorBase<double> > &v,
719 const RCP<const Epetra_Map>& map
720 )
721{
722 //
723 // First, try to grab the Epetra_Vector straight out of the
724 // RCP since this is the fastest way.
725 //
726 const Ptr<const RCP<const Epetra_Vector> >
727 epetra_v_ptr = Teuchos::get_optional_extra_data<RCP<const Epetra_Vector> >(
728 v,"Epetra_Vector");
729 // mfh 06 Dec 2017: This should be consistent over all processes
730 // that participate in v's communicator.
731 if(!is_null(epetra_v_ptr)) {
732 return *epetra_v_ptr;
733 }
734
735 // No luck. We need to call get_Epetra_Vector(*map,v).
736 TEUCHOS_TEST_FOR_EXCEPTION(map.is_null(), std::runtime_error,
737 "Error! No RCP to Epetra_Vector attached to the input vector RCP, "
738 "and the input map RCP is null.\n");
739
740 return get_Epetra_Vector(*map,v);
741}
742
745 const Epetra_Map &map,
746 const RCP<MultiVectorBase<double> > &mv
747 )
748{
749 using Teuchos::get_optional_extra_data;
750#ifdef TEUCHOS_DEBUG
752 epetra_vs = create_VectorSpace(Teuchos::rcp(&map,false));
753 THYRA_ASSERT_VEC_SPACES(
754 "Thyra::get_Epetra_MultiVector(map,mv)", *epetra_vs, *mv->range() );
755#endif
756 //
757 // First, try to grab the Epetra_MultiVector straight out of the
758 // RCP since this is the fastest way.
759 //
760 const Ptr<const RCP<Epetra_MultiVector> >
761 epetra_mv_ptr = get_optional_extra_data<RCP<Epetra_MultiVector> >(
762 mv,"Epetra_MultiVector");
763 // mfh 06 Dec 2017: This should be consistent over all processes
764 // that participate in v's communicator.
765 if(!is_null(epetra_mv_ptr)) {
766 return *epetra_mv_ptr;
767 }
768 //
769 // The assumption that we (rightly) make here is that if the vector spaces
770 // are compatible, that either the multi-vectors are both in-core or the
771 // vector spaces are both derived from SpmdVectorSpaceBase and have
772 // compatible maps.
773 //
774 const VectorSpaceBase<double> &vs = *mv->range();
775 const SpmdVectorSpaceBase<double> *mpi_vs
776 = dynamic_cast<const SpmdVectorSpaceBase<double>*>(&vs);
777 const Ordinal localOffset = ( mpi_vs ? mpi_vs->localOffset() : 0 );
778 const Ordinal localSubDim = ( mpi_vs ? mpi_vs->localSubDim() : vs.dim() );
779 //
780 // Here we will extract a view of the local elements in the underlying
781 // MultiVectorBase object. In most cases, no data will be allocated or
782 // copied and only some small objects will be created and a few virtual
783 // functions will be called so the overhead should be low and fixed.
784 //
785 // Create a *mutable* view of the local elements, this view will be set on
786 // the RCP that is returned. As a result, this view will be relased
787 // when the returned Epetra_MultiVector is released.
788 //
790 emmvv = Teuchos::rcp(
791 new DetachedMultiVectorView<double>(
792 *mv
793 ,Range1D(localOffset,localOffset+localSubDim-1)
794 )
795 );
796 // Create a temporary Epetra_MultiVector object and give it
797 // the above local view
799 epetra_mv = Teuchos::rcp(
800 new Epetra_MultiVector(
801 ::View // CV
802 ,map // Map
803 ,const_cast<double*>(emmvv->values()) // A
804 ,emmvv->leadingDim() // MyLDA
805 ,emmvv->numSubCols() // NumVectors
806 )
807 );
808 // Give the explict view object to the above Epetra_MultiVector
809 // smart pointer object. In this way, when the client is finished
810 // with the Epetra_MultiVector view the destructor from the object
811 // in emmvv will automatically commit the changes to the elements in
812 // the input mv MultiVectorBase object (reguardless of its
813 // implementation). This is truly an elegant result!
814 Teuchos::set_extra_data( emmvv, "emmvv", Teuchos::inOutArg(epetra_mv),
816 // Also set the mv itself as extra data just to be safe
817 Teuchos::set_extra_data( mv, "mv", Teuchos::inOutArg(epetra_mv) );
818 // We are done!
819 return epetra_mv;
820}
821
822// Same as above, except allows to not pass the map (in case the RCP of v
823// already has an attached RCP<const Epetra_MultiVector>)
826 const RCP<MultiVectorBase<double> > &mv,
827 const RCP<const Epetra_Map>& map
828 )
829{
830 //
831 // First, try to grab the Epetra_MultiVector straight out of the
832 // RCP since this is the fastest way.
833 //
834 const Ptr<const RCP<Epetra_MultiVector> >
835 epetra_mv_ptr = Teuchos::get_optional_extra_data<RCP<Epetra_MultiVector> >(
836 mv,"Epetra_MultiVector");
837 // mfh 06 Dec 2017: This should be consistent over all processes
838 // that participate in v's communicator.
839 if(!is_null(epetra_mv_ptr)) {
840 return *epetra_mv_ptr;
841 }
842
843 // No luck. We need to call get_Epetra_MultiVector(*map,mv).
844 TEUCHOS_TEST_FOR_EXCEPTION(map.is_null(), std::runtime_error,
845 "Error! No RCP to Epetra_MultiVector attached to the input vector RCP, "
846 "and the input map RCP is null.\n");
847
848 return get_Epetra_MultiVector(*map,mv);
849}
850
853 const Epetra_Map &map,
854 const RCP<const MultiVectorBase<double> > &mv
855 )
856{
857 using Teuchos::get_optional_extra_data;
858
859#ifdef TEUCHOS_DEBUG
861 epetra_vs = create_VectorSpace(Teuchos::rcp(&map,false));
862
863 THYRA_ASSERT_VEC_SPACES(
864 "Thyra::get_Epetra_MultiVector(map,mv)",
865 *epetra_vs, *mv->range() );
866#endif
867
868 //
869 // First, try to grab the Epetra_MultiVector straight out of the
870 // RCP since this is the fastest way.
871 //
872 const Ptr<const RCP<const Epetra_MultiVector> >
873 epetra_mv_ptr
874 = get_optional_extra_data<RCP<const Epetra_MultiVector> >(
875 mv,"Epetra_MultiVector" );
876 // mfh 06 Dec 2017: This should be consistent over all processes
877 // that participate in v's communicator.
878 if(!is_null(epetra_mv_ptr)) {
879 return *epetra_mv_ptr;
880 }
881
882 //
883 // Same as above function except as stated below
884 //
885 const VectorSpaceBase<double> &vs = *mv->range();
886 const SpmdVectorSpaceBase<double> *mpi_vs
887 = dynamic_cast<const SpmdVectorSpaceBase<double>*>(&vs);
888 const Ordinal localOffset = ( mpi_vs ? mpi_vs->localOffset() : 0 );
889 const Ordinal localSubDim = ( mpi_vs ? mpi_vs->localSubDim() : vs.dim() );
890 // Get an explicit *non-mutable* view of all of the elements in
891 // the multi vector.
893 emvv = Teuchos::rcp(
894 new ConstDetachedMultiVectorView<double>(
895 *mv
896 ,Range1D(localOffset,localOffset+localSubDim-1)
897 )
898 );
899
900 // Create a temporary Epetra_MultiVector object and give it
901 // the above view
903 epetra_mv = Teuchos::rcp(
904 new Epetra_MultiVector(
905 ::View // CV
906 ,map // Map
907 ,const_cast<double*>(emvv->values()) // A
908 ,emvv->leadingDim() // MyLDA
909 ,emvv->numSubCols() // NumVectors
910 )
911 );
912
913 // This next statement will cause the destructor to free the view if
914 // needed (see above function). Since this view is non-mutable,
915 // only a releaseDetachedView(...) and not a commit will be called.
916 // This is the whole reason there is a seperate implementation for
917 // the const and non-const cases.
918 Teuchos::set_extra_data( emvv, "emvv", Teuchos::inOutArg(epetra_mv),
920 // Also set the mv itself as extra data just to be safe
921 Teuchos::set_extra_data( mv, "mv", Teuchos::inOutArg(epetra_mv) );
922
923 // We are done!
924 return epetra_mv;
925}
926
927// Same as above, except allows to not pass the map (in case the RCP of v
928// already has an attached RCP<const Epetra_MultiVector>)
931 const RCP<const MultiVectorBase<double> > &mv,
932 const RCP<const Epetra_Map>& map
933 )
934{
935 //
936 // First, try to grab the Epetra_MultiVector straight out of the
937 // RCP since this is the fastest way.
938 //
939 const Ptr<const RCP<const Epetra_MultiVector> >
940 epetra_mv_ptr = Teuchos::get_optional_extra_data<RCP<const Epetra_MultiVector> >(
941 mv,"Epetra_MultiVector");
942 // mfh 06 Dec 2017: This should be consistent over all processes
943 // that participate in v's communicator.
944 if(!is_null(epetra_mv_ptr)) {
945 return *epetra_mv_ptr;
946 }
947
948 // No luck. We need to call get_Epetra_MultiVector(*map,mv).
949 TEUCHOS_TEST_FOR_EXCEPTION(map.is_null(), std::runtime_error,
950 "Error! No RCP to Epetra_MultiVector attached to the input vector RCP, "
951 "and the input map RCP is null.\n");
952
953 return get_Epetra_MultiVector(*map,mv);
954}
955
958 const Epetra_Map &map,
959 MultiVectorBase<double> &mv
960 )
961{
962 using Teuchos::rcpWithEmbeddedObj;
963 using Teuchos::ptrFromRef;
964 using Teuchos::ptr_dynamic_cast;
965 using Teuchos::outArg;
966
967 Ptr<SpmdMultiVectorBase<double> > mvSpmdMv =
968 ptr_dynamic_cast<SpmdMultiVectorBase<double> >(ptrFromRef(mv));
969
970 ArrayRCP<double> mvData;
971 Ordinal mvLeadingDim = 0;
972 if (nonnull(mvSpmdMv)) {
973 mvSpmdMv->getNonconstLocalData(outArg(mvData), outArg(mvLeadingDim));
974 }
975
976 return rcpWithEmbeddedObj(
977 new Epetra_MultiVector(
978 ::View, map, mvData.getRawPtr(), mvLeadingDim, mv.domain()->dim()
979 ),
980 mvData
981 );
982}
983
984
987 const Epetra_Map &map,
988 const MultiVectorBase<double> &mv
989 )
990{
991 using Teuchos::rcpWithEmbeddedObj;
992 using Teuchos::ptrFromRef;
993 using Teuchos::ptr_dynamic_cast;
994 using Teuchos::outArg;
995
996 Ptr<const SpmdMultiVectorBase<double> > mvSpmdMv =
997 ptr_dynamic_cast<const SpmdMultiVectorBase<double> >(ptrFromRef(mv));
998
999 ArrayRCP<const double> mvData;
1000 Ordinal mvLeadingDim = 0;
1001 if (nonnull(mvSpmdMv)) {
1002 mvSpmdMv->getLocalData(outArg(mvData), outArg(mvLeadingDim));
1003 }
1004
1005 return rcpWithEmbeddedObj(
1006 new Epetra_MultiVector(
1007 ::View, map, const_cast<double*>(mvData.getRawPtr()), mvLeadingDim, mv.domain()->dim()
1008 ),
1009 mvData
1010 );
1011}
bool is_null(const RCP< T > &p)
const RCP< T > & assert_not_null() const
T * get() const
RCP< const Epetra_Comm > get_Epetra_Comm(const Teuchos::Comm< Ordinal > &comm)
Get (or create) and Epetra_Comm given a Teuchos::Comm object.
RCP< VectorBase< double > > create_Vector(const RCP< Epetra_Vector > &epetra_v, const RCP< const VectorSpaceBase< double > > &space=Teuchos::null)
Create a non-const VectorBase object from a non-const Epetra_Vector object.
RCP< MultiVectorBase< double > > create_MultiVector(const RCP< Epetra_MultiVector > &epetra_mv, const RCP< const VectorSpaceBase< double > > &range=Teuchos::null, const RCP< const VectorSpaceBase< double > > &domain=Teuchos::null)
Create a non-const MultiVectorBase object from a non-const Epetra_MultiVector object.
RCP< const Teuchos::Comm< Ordinal > > create_Comm(const RCP< const Epetra_Comm > &epetraComm)
Given an Epetra_Comm object, return an equivalent Teuchos::Comm object.
RCP< const VectorSpaceBase< double > > create_VectorSpace(const RCP< const Epetra_Map > &epetra_map)
Create an VectorSpaceBase object given an Epetra_Map object.
RCP< Epetra_Vector > get_Epetra_Vector(const Epetra_Map &map, const RCP< VectorBase< double > > &v)
Get a non-const Epetra_Vector view from a non-const VectorBase object if possible.
RCP< const Epetra_Map > get_Epetra_Map(const VectorSpaceBase< double > &vs, const RCP< const Epetra_Comm > &comm)
Get (or create) an Epetra_Map object given an VectorSpaceBase object an optionally an extra Epetra_Co...
RCP< Epetra_MultiVector > get_Epetra_MultiVector(const Epetra_Map &map, const RCP< MultiVectorBase< double > > &mv)
Get a non-const Epetra_MultiVector view from a non-const MultiVectorBase object if possible.
RCP< const VectorSpaceBase< double > > create_LocallyReplicatedVectorSpace(const RCP< const VectorSpaceBase< double > > &parentSpace, const int dim)
Create a VectorSpaceBase object that creates locally replicated vector objects.
#define TEUCHOS_TEST_FOR_EXCEPT(throw_exception_test)
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
#define TEUCHOS_ASSERT_EQUALITY(val1, val2)
bool is_null(const boost::shared_ptr< T > &p)
bool nonnull(const boost::shared_ptr< T > &p)
Teuchos_Ordinal Ordinal
TypeTo as(const TypeFrom &t)
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
TEUCHOSCORE_LIB_DLL_EXPORT std::string demangleName(const std::string &mangledName)