00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00017 #ifndef DATA_H
00018 #define DATA_H
00019 #include "system_dep.h"
00020
00021 #include "DataTypes.h"
00022 #include "DataAbstract.h"
00023 #include "DataAlgorithm.h"
00024 #include "FunctionSpace.h"
00025 #include "BinaryOp.h"
00026 #include "UnaryOp.h"
00027 #include "DataException.h"
00028
00029
00030 extern "C" {
00031 #include "DataC.h"
00032
00033 }
00034
00035 #include "esysmpi.h"
00036 #include <string>
00037 #include <algorithm>
00038 #include <sstream>
00039
00040 #include <boost/shared_ptr.hpp>
00041 #include <boost/python/object.hpp>
00042 #include <boost/python/tuple.hpp>
00043
00044 #include "BufferGroup.h"
00045
00046 namespace escript {
00047
00048
00049
00050 class DataConstant;
00051 class DataTagged;
00052 class DataExpanded;
00053 class DataLazy;
00054
00068 class Data {
00069
00070 public:
00071
00072
00073
00074 typedef double (*UnaryDFunPtr)(double);
00075 typedef double (*BinaryDFunPtr)(double,double);
00076
00077
00087 ESCRIPT_DLL_API
00088 Data();
00089
00095 ESCRIPT_DLL_API
00096 Data(const Data& inData);
00097
00104 ESCRIPT_DLL_API
00105 Data(const Data& inData,
00106 const FunctionSpace& what);
00107
00112 ESCRIPT_DLL_API
00113 Data(const DataTypes::ValueType& value,
00114 const DataTypes::ShapeType& shape,
00115 const FunctionSpace& what=FunctionSpace(),
00116 bool expanded=false);
00117
00129 ESCRIPT_DLL_API
00130 Data(double value,
00131 const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
00132 const FunctionSpace& what=FunctionSpace(),
00133 bool expanded=false);
00134
00142 ESCRIPT_DLL_API
00143 Data(const Data& inData,
00144 const DataTypes::RegionType& region);
00145
00156 ESCRIPT_DLL_API
00157 Data(const boost::python::object& value,
00158 const FunctionSpace& what=FunctionSpace(),
00159 bool expanded=false);
00160
00170 ESCRIPT_DLL_API
00171 Data(const boost::python::object& value,
00172 const Data& other);
00173
00178 ESCRIPT_DLL_API
00179 Data(double value,
00180 const boost::python::tuple& shape=boost::python::make_tuple(),
00181 const FunctionSpace& what=FunctionSpace(),
00182 bool expanded=false);
00183
00184
00185
00190 ESCRIPT_DLL_API
00191 explicit Data(DataAbstract* underlyingdata);
00192
00196 ESCRIPT_DLL_API
00197 explicit Data(DataAbstract_ptr underlyingdata);
00198
00203 ESCRIPT_DLL_API
00204 ~Data();
00205
00209 ESCRIPT_DLL_API
00210 void
00211 copy(const Data& other);
00212
00216 ESCRIPT_DLL_API
00217 Data
00218 copySelf();
00219
00220
00224 ESCRIPT_DLL_API
00225 Data
00226 delay();
00227
00231 ESCRIPT_DLL_API
00232 void
00233 delaySelf();
00234
00235
00245 ESCRIPT_DLL_API
00246 void
00247 setProtection();
00248
00254 ESCRIPT_DLL_API
00255 bool
00256 isProtected() const;
00257
00258
00263 ESCRIPT_DLL_API
00264 const boost::python::object
00265 getValueOfDataPointAsTuple(int dataPointNo);
00266
00271 ESCRIPT_DLL_API
00272 void
00273 setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
00274
00279 ESCRIPT_DLL_API
00280 void
00281 setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
00282
00287 ESCRIPT_DLL_API
00288 void
00289 setValueOfDataPoint(int dataPointNo, const double);
00290
00294 ESCRIPT_DLL_API
00295 const boost::python::object
00296 getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
00297
00303 ESCRIPT_DLL_API
00304 int
00305 getTagNumber(int dpno);
00306
00311 ESCRIPT_DLL_API
00312 escriptDataC
00313 getDataC();
00314
00315
00316
00321 ESCRIPT_DLL_API
00322 escriptDataC
00323 getDataC() const;
00324
00328 ESCRIPT_DLL_API
00329 size_t
00330 getSampleBufferSize() const;
00331
00332
00333
00338 ESCRIPT_DLL_API
00339 std::string
00340 toString() const;
00341
00346 ESCRIPT_DLL_API
00347 void
00348 expand();
00349
00356 ESCRIPT_DLL_API
00357 void
00358 tag();
00359
00364 ESCRIPT_DLL_API
00365 void
00366 resolve();
00367
00368
00376 ESCRIPT_DLL_API
00377 void
00378 requireWrite();
00379
00385 ESCRIPT_DLL_API
00386 bool
00387 isExpanded() const;
00388
00394 ESCRIPT_DLL_API
00395 bool
00396 actsExpanded() const;
00397
00398
00403 ESCRIPT_DLL_API
00404 bool
00405 isTagged() const;
00406
00411 ESCRIPT_DLL_API
00412 bool
00413 isConstant() const;
00414
00418 ESCRIPT_DLL_API
00419 bool
00420 isLazy() const;
00421
00425 ESCRIPT_DLL_API
00426 bool
00427 isReady() const;
00428
00434 ESCRIPT_DLL_API
00435 bool
00436 isEmpty() const;
00437
00442 ESCRIPT_DLL_API
00443 inline
00444 const FunctionSpace&
00445 getFunctionSpace() const
00446 {
00447 return m_data->getFunctionSpace();
00448 }
00449
00454 ESCRIPT_DLL_API
00455 const FunctionSpace
00456 getCopyOfFunctionSpace() const;
00457
00462 ESCRIPT_DLL_API
00463 inline
00464
00465 const_Domain_ptr
00466 getDomain() const
00467 {
00468 return getFunctionSpace().getDomain();
00469 }
00470
00471
00477 ESCRIPT_DLL_API
00478 inline
00479
00480 Domain_ptr
00481 getDomainPython() const
00482 {
00483 return getFunctionSpace().getDomainPython();
00484 }
00485
00490 ESCRIPT_DLL_API
00491 const AbstractDomain
00492 getCopyOfDomain() const;
00493
00498 ESCRIPT_DLL_API
00499 inline
00500 unsigned int
00501 getDataPointRank() const
00502 {
00503 return m_data->getRank();
00504 }
00505
00510 ESCRIPT_DLL_API
00511 inline
00512 int
00513 getNumDataPoints() const
00514 {
00515 return getNumSamples() * getNumDataPointsPerSample();
00516 }
00521 ESCRIPT_DLL_API
00522 inline
00523 int
00524 getNumSamples() const
00525 {
00526 return m_data->getNumSamples();
00527 }
00528
00533 ESCRIPT_DLL_API
00534 inline
00535 int
00536 getNumDataPointsPerSample() const
00537 {
00538 return m_data->getNumDPPSample();
00539 }
00540
00541
00546 ESCRIPT_DLL_API
00547 int
00548 getNoValues() const
00549 {
00550 return m_data->getNoValues();
00551 }
00552
00553
00558 ESCRIPT_DLL_API
00559 void
00560 dump(const std::string fileName) const;
00561
00568 ESCRIPT_DLL_API
00569 const boost::python::object
00570 toListOfTuples(bool scalarastuple=true);
00571
00572
00582 ESCRIPT_DLL_API
00583 inline
00584 const DataAbstract::ValueType::value_type*
00585 getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg=0);
00586
00587
00595 ESCRIPT_DLL_API
00596 inline
00597 DataAbstract::ValueType::value_type*
00598 getSampleDataRW(DataAbstract::ValueType::size_type sampleNo);
00599
00600
00607 ESCRIPT_DLL_API
00608 inline
00609 DataAbstract::ValueType::value_type*
00610 getSampleDataByTag(int tag)
00611 {
00612 return m_data->getSampleDataByTag(tag);
00613 }
00614
00621 ESCRIPT_DLL_API
00622 DataTypes::ValueType::const_reference
00623 getDataPointRO(int sampleNo, int dataPointNo);
00624
00631 ESCRIPT_DLL_API
00632 DataTypes::ValueType::reference
00633 getDataPointRW(int sampleNo, int dataPointNo);
00634
00635
00636
00641 ESCRIPT_DLL_API
00642 inline
00643 DataTypes::ValueType::size_type
00644 getDataOffset(int sampleNo,
00645 int dataPointNo)
00646 {
00647 return m_data->getPointOffset(sampleNo,dataPointNo);
00648 }
00649
00654 ESCRIPT_DLL_API
00655 inline
00656 const DataTypes::ShapeType&
00657 getDataPointShape() const
00658 {
00659 return m_data->getShape();
00660 }
00661
00666 ESCRIPT_DLL_API
00667 const boost::python::tuple
00668 getShapeTuple() const;
00669
00675 ESCRIPT_DLL_API
00676 int
00677 getDataPointSize() const;
00678
00683 ESCRIPT_DLL_API
00684 DataTypes::ValueType::size_type
00685 getLength() const;
00686
00687
00688
00697 ESCRIPT_DLL_API
00698 void
00699 setTaggedValueByName(std::string name,
00700 const boost::python::object& value);
00701
00711 ESCRIPT_DLL_API
00712 void
00713 setTaggedValue(int tagKey,
00714 const boost::python::object& value);
00715
00726 ESCRIPT_DLL_API
00727 void
00728 setTaggedValueFromCPP(int tagKey,
00729 const DataTypes::ShapeType& pointshape,
00730 const DataTypes::ValueType& value,
00731 int dataOffset=0);
00732
00733
00734
00739 ESCRIPT_DLL_API
00740 void
00741 copyWithMask(const Data& other,
00742 const Data& mask);
00743
00753 ESCRIPT_DLL_API
00754 void
00755 setToZero();
00756
00763 ESCRIPT_DLL_API
00764 Data
00765 interpolate(const FunctionSpace& functionspace) const;
00772 ESCRIPT_DLL_API
00773 Data
00774 gradOn(const FunctionSpace& functionspace) const;
00775
00776 ESCRIPT_DLL_API
00777 Data
00778 grad() const;
00779
00784 ESCRIPT_DLL_API
00785 boost::python::object
00786 integrateToTuple_const() const;
00787
00788
00793 ESCRIPT_DLL_API
00794 boost::python::object
00795 integrateToTuple();
00796
00797
00798
00804 ESCRIPT_DLL_API
00805 Data
00806 oneOver() const;
00812 ESCRIPT_DLL_API
00813 Data
00814 wherePositive() const;
00815
00821 ESCRIPT_DLL_API
00822 Data
00823 whereNegative() const;
00824
00830 ESCRIPT_DLL_API
00831 Data
00832 whereNonNegative() const;
00833
00839 ESCRIPT_DLL_API
00840 Data
00841 whereNonPositive() const;
00842
00848 ESCRIPT_DLL_API
00849 Data
00850 whereZero(double tol=0.0) const;
00851
00857 ESCRIPT_DLL_API
00858 Data
00859 whereNonZero(double tol=0.0) const;
00860
00872 ESCRIPT_DLL_API
00873 double
00874 Lsup();
00875
00876 ESCRIPT_DLL_API
00877 double
00878 Lsup_const() const;
00879
00880
00892 ESCRIPT_DLL_API
00893 double
00894 sup();
00895
00896 ESCRIPT_DLL_API
00897 double
00898 sup_const() const;
00899
00900
00912 ESCRIPT_DLL_API
00913 double
00914 inf();
00915
00916 ESCRIPT_DLL_API
00917 double
00918 inf_const() const;
00919
00920
00921
00927 ESCRIPT_DLL_API
00928 Data
00929 abs() const;
00930
00936 ESCRIPT_DLL_API
00937 Data
00938 maxval() const;
00939
00945 ESCRIPT_DLL_API
00946 Data
00947 minval() const;
00948
00956 ESCRIPT_DLL_API
00957 const boost::python::tuple
00958 minGlobalDataPoint() const;
00959
00967 ESCRIPT_DLL_API
00968 const boost::python::tuple
00969 maxGlobalDataPoint() const;
00970
00971
00972
00979 ESCRIPT_DLL_API
00980 Data
00981 sign() const;
00982
00988 ESCRIPT_DLL_API
00989 Data
00990 symmetric() const;
00991
00997 ESCRIPT_DLL_API
00998 Data
00999 nonsymmetric() const;
01000
01006 ESCRIPT_DLL_API
01007 Data
01008 trace(int axis_offset) const;
01009
01015 ESCRIPT_DLL_API
01016 Data
01017 transpose(int axis_offset) const;
01018
01025 ESCRIPT_DLL_API
01026 Data
01027 eigenvalues() const;
01028
01038 ESCRIPT_DLL_API
01039 const boost::python::tuple
01040 eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
01041
01047 ESCRIPT_DLL_API
01048 Data
01049 swapaxes(const int axis0, const int axis1) const;
01050
01056 ESCRIPT_DLL_API
01057 Data
01058 erf() const;
01059
01065 ESCRIPT_DLL_API
01066 Data
01067 sin() const;
01068
01074 ESCRIPT_DLL_API
01075 Data
01076 cos() const;
01077
01083 ESCRIPT_DLL_API
01084 Data
01085 tan() const;
01086
01092 ESCRIPT_DLL_API
01093 Data
01094 asin() const;
01095
01101 ESCRIPT_DLL_API
01102 Data
01103 acos() const;
01104
01110 ESCRIPT_DLL_API
01111 Data
01112 atan() const;
01113
01119 ESCRIPT_DLL_API
01120 Data
01121 sinh() const;
01122
01128 ESCRIPT_DLL_API
01129 Data
01130 cosh() const;
01131
01137 ESCRIPT_DLL_API
01138 Data
01139 tanh() const;
01140
01146 ESCRIPT_DLL_API
01147 Data
01148 asinh() const;
01149
01155 ESCRIPT_DLL_API
01156 Data
01157 acosh() const;
01158
01164 ESCRIPT_DLL_API
01165 Data
01166 atanh() const;
01167
01173 ESCRIPT_DLL_API
01174 Data
01175 log10() const;
01176
01182 ESCRIPT_DLL_API
01183 Data
01184 log() const;
01185
01191 ESCRIPT_DLL_API
01192 Data
01193 exp() const;
01194
01200 ESCRIPT_DLL_API
01201 Data
01202 sqrt() const;
01203
01209 ESCRIPT_DLL_API
01210 Data
01211 neg() const;
01212
01219 ESCRIPT_DLL_API
01220 Data
01221 pos() const;
01222
01230 ESCRIPT_DLL_API
01231 Data
01232 powD(const Data& right) const;
01233
01241 ESCRIPT_DLL_API
01242 Data
01243 powO(const boost::python::object& right) const;
01244
01253 ESCRIPT_DLL_API
01254 Data
01255 rpowO(const boost::python::object& left) const;
01256
01261 ESCRIPT_DLL_API
01262 void
01263 saveDX(std::string fileName) const;
01264
01269 ESCRIPT_DLL_API
01270 void
01271 saveVTK(std::string fileName) const;
01272
01279 ESCRIPT_DLL_API
01280 Data& operator+=(const Data& right);
01281 ESCRIPT_DLL_API
01282 Data& operator+=(const boost::python::object& right);
01283
01284 ESCRIPT_DLL_API
01285 Data& operator=(const Data& other);
01286
01293 ESCRIPT_DLL_API
01294 Data& operator-=(const Data& right);
01295 ESCRIPT_DLL_API
01296 Data& operator-=(const boost::python::object& right);
01297
01304 ESCRIPT_DLL_API
01305 Data& operator*=(const Data& right);
01306 ESCRIPT_DLL_API
01307 Data& operator*=(const boost::python::object& right);
01308
01315 ESCRIPT_DLL_API
01316 Data& operator/=(const Data& right);
01317 ESCRIPT_DLL_API
01318 Data& operator/=(const boost::python::object& right);
01319
01324 ESCRIPT_DLL_API
01325 bool
01326 probeInterpolation(const FunctionSpace& functionspace) const;
01327
01343 ESCRIPT_DLL_API
01344 Data
01345 getItem(const boost::python::object& key) const;
01346
01358 ESCRIPT_DLL_API
01359 void
01360 setItemD(const boost::python::object& key,
01361 const Data& value);
01362
01363 ESCRIPT_DLL_API
01364 void
01365 setItemO(const boost::python::object& key,
01366 const boost::python::object& value);
01367
01368
01369
01375 template <class UnaryFunction>
01376 ESCRIPT_DLL_API
01377 inline
01378 void
01379 unaryOp2(UnaryFunction operation);
01380
01388 ESCRIPT_DLL_API
01389 Data
01390 getSlice(const DataTypes::RegionType& region) const;
01391
01400 ESCRIPT_DLL_API
01401 void
01402 setSlice(const Data& value,
01403 const DataTypes::RegionType& region);
01404
01409 ESCRIPT_DLL_API
01410 void
01411 print(void);
01412
01419 ESCRIPT_DLL_API
01420 int
01421 get_MPIRank(void) const;
01422
01429 ESCRIPT_DLL_API
01430 int
01431 get_MPISize(void) const;
01432
01438 ESCRIPT_DLL_API
01439 MPI_Comm
01440 get_MPIComm(void) const;
01441
01447 ESCRIPT_DLL_API
01448 DataAbstract*
01449 borrowData(void) const;
01450
01451 ESCRIPT_DLL_API
01452 DataAbstract_ptr
01453 borrowDataPtr(void) const;
01454
01455 ESCRIPT_DLL_API
01456 DataReady_ptr
01457 borrowReadyPtr(void) const;
01458
01459
01460
01468 ESCRIPT_DLL_API
01469 DataTypes::ValueType::const_reference
01470 getDataAtOffsetRO(DataTypes::ValueType::size_type i);
01471
01472
01473 ESCRIPT_DLL_API
01474 DataTypes::ValueType::reference
01475 getDataAtOffsetRW(DataTypes::ValueType::size_type i);
01476
01477
01478
01489 ESCRIPT_DLL_API
01490 BufferGroup*
01491 allocSampleBuffer() const;
01492
01497 ESCRIPT_DLL_API void freeSampleBuffer(BufferGroup* buffer);
01498
01499 protected:
01500
01501 private:
01502
01503 double
01504 LsupWorker() const;
01505
01506 double
01507 supWorker() const;
01508
01509 double
01510 infWorker() const;
01511
01512 boost::python::object
01513 integrateWorker() const;
01514
01515 void
01516 calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
01517
01518 void
01519 calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
01520
01521
01528 inline
01529 void
01530 operandCheck(const Data& right) const
01531 {
01532 return m_data->operandCheck(*(right.m_data.get()));
01533 }
01534
01540 template <class BinaryFunction>
01541 inline
01542 double
01543 algorithm(BinaryFunction operation,
01544 double initial_value) const;
01545
01553 template <class BinaryFunction>
01554 inline
01555 Data
01556 dp_algorithm(BinaryFunction operation,
01557 double initial_value) const;
01558
01567 template <class BinaryFunction>
01568 inline
01569 void
01570 binaryOp(const Data& right,
01571 BinaryFunction operation);
01572
01578 void
01579 typeMatchLeft(Data& right) const;
01580
01586 void
01587 typeMatchRight(const Data& right);
01588
01594 void
01595 initialise(const DataTypes::ValueType& value,
01596 const DataTypes::ShapeType& shape,
01597 const FunctionSpace& what,
01598 bool expanded);
01599
01600 void
01601 initialise(const WrappedArray& value,
01602 const FunctionSpace& what,
01603 bool expanded);
01604
01605
01606
01607 bool m_protected;
01608 mutable bool m_shared;
01609 bool m_lazy;
01610
01611
01612
01613
01614 DataAbstract_ptr m_data;
01615
01616
01617
01618 const DataReady*
01619 getReady() const;
01620
01621 DataReady*
01622 getReady();
01623
01624
01625
01626
01627
01628 DataReady_ptr
01629 getReadyPtr();
01630
01631 const_DataReady_ptr
01632 getReadyPtr() const;
01633
01634
01640 void updateShareStatus(bool nowshared) const
01641 {
01642 m_shared=nowshared;
01643 }
01644
01645
01646
01647
01648
01649
01650
01651
01652
01653
01654
01655
01656
01657
01658
01659 bool isShared() const
01660 {
01661 return m_shared;
01662
01663
01664
01665
01666
01667
01668
01669 }
01670
01671 void forceResolve()
01672 {
01673 if (isLazy())
01674 {
01675 #ifdef _OPENMP
01676 if (omp_in_parallel())
01677 {
01678 throw DataException("Please do not call forceResolve() in a parallel region.");
01679 }
01680 #endif
01681 resolve();
01682 }
01683 }
01684
01689 void exclusiveWrite()
01690 {
01691 #ifdef _OPENMP
01692 if (omp_in_parallel())
01693 {
01694
01695 throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
01696 }
01697 #endif
01698 forceResolve();
01699 if (isShared())
01700 {
01701 DataAbstract* t=m_data->deepCopy();
01702 set_m_data(DataAbstract_ptr(t));
01703 }
01704 }
01705
01709 void checkExclusiveWrite()
01710 {
01711 if (isLazy() || isShared())
01712 {
01713 throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
01714 }
01715 }
01716
01723 void set_m_data(DataAbstract_ptr p);
01724
01725 friend class DataAbstract;
01726
01727 };
01728
01729 }
01730
01731
01732
01733
01734
01735 #include "DataReady.h"
01736 #include "DataLazy.h"
01737
01738 namespace escript
01739 {
01740
01741 inline
01742 const DataReady*
01743 Data::getReady() const
01744 {
01745 const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
01746 EsysAssert((dr!=0), "Error - casting to DataReady.");
01747 return dr;
01748 }
01749
01750 inline
01751 DataReady*
01752 Data::getReady()
01753 {
01754 DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
01755 EsysAssert((dr!=0), "Error - casting to DataReady.");
01756 return dr;
01757 }
01758
01759
01760
01761
01762 inline
01763 DataReady_ptr
01764 Data::getReadyPtr()
01765 {
01766 DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
01767 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
01768 return dr;
01769 }
01770
01771
01772 inline
01773 const_DataReady_ptr
01774 Data::getReadyPtr() const
01775 {
01776 const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
01777 EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
01778 return dr;
01779 }
01780
01781 inline
01782 DataAbstract::ValueType::value_type*
01783 Data::getSampleDataRW(DataAbstract::ValueType::size_type sampleNo)
01784 {
01785 if (isLazy())
01786 {
01787 throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
01788 }
01789 return getReady()->getSampleDataRW(sampleNo);
01790 }
01791
01792 inline
01793 const DataAbstract::ValueType::value_type*
01794 Data::getSampleDataRO(DataAbstract::ValueType::size_type sampleNo, BufferGroup* bufferg)
01795 {
01796 DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
01797 if (l!=0)
01798 {
01799 size_t offset=0;
01800 if (bufferg==NULL)
01801 {
01802 throw DataException("Error, attempt to getSampleDataRO for lazy Data with buffer==NULL");
01803 }
01804 const DataTypes::ValueType* res=l->resolveSample(*bufferg,sampleNo,offset);
01805 return &((*res)[offset]);
01806 }
01807 return getReady()->getSampleDataRO(sampleNo);
01808 }
01809
01810
01811
01815 char *Escript_MPI_appendRankToFileName(const char *, int, int);
01816
01820 inline double rpow(double x,double y)
01821 {
01822 return pow(y,x);
01823 }
01824
01830 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
01831
01837 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
01838
01844 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
01845
01851 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
01852
01859 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
01860
01867 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
01868
01875 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
01876
01883 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
01884
01891 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
01892
01899 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
01900
01907 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
01908
01915 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
01916
01917
01918
01923 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
01924
01933 ESCRIPT_DLL_API
01934 Data
01935 C_GeneralTensorProduct(Data& arg_0,
01936 Data& arg_1,
01937 int axis_offset=0,
01938 int transpose=0);
01939
01945 template <class BinaryFunction>
01946 inline
01947 void
01948 Data::binaryOp(const Data& right,
01949 BinaryFunction operation)
01950 {
01951
01952
01953 if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
01954 throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
01955 }
01956
01957 if (isLazy() || right.isLazy())
01958 {
01959 throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
01960 }
01961
01962
01963 Data tempRight(right);
01964
01965 if (getFunctionSpace()!=right.getFunctionSpace()) {
01966 if (right.probeInterpolation(getFunctionSpace())) {
01967
01968
01969 tempRight=Data(right,this->getFunctionSpace());
01970 } else if (probeInterpolation(right.getFunctionSpace())) {
01971
01972
01973 Data tempLeft(*this,right.getFunctionSpace());
01974
01975 set_m_data(tempLeft.m_data);
01976 }
01977 }
01978 operandCheck(tempRight);
01979
01980
01981 typeMatchRight(tempRight);
01982
01983
01984
01985 if (isExpanded()) {
01986
01987
01988
01989 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
01990 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
01991 escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
01992 } else if (isTagged()) {
01993
01994
01995
01996 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
01997 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
01998 if (right.isTagged()) {
01999 DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
02000 EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
02001 escript::binaryOp(*leftC,*rightC,operation);
02002 } else {
02003 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
02004 EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
02005 escript::binaryOp(*leftC,*rightC,operation);
02006 }
02007 } else if (isConstant()) {
02008 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
02009 DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
02010 EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
02011 escript::binaryOp(*leftC,*rightC,operation);
02012 }
02013 }
02014
02022 template <class BinaryFunction>
02023 inline
02024 double
02025 Data::algorithm(BinaryFunction operation, double initial_value) const
02026 {
02027 if (isExpanded()) {
02028 DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
02029 EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
02030 return escript::algorithm(*leftC,operation,initial_value);
02031 } else if (isTagged()) {
02032 DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
02033 EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
02034 return escript::algorithm(*leftC,operation,initial_value);
02035 } else if (isConstant()) {
02036 DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
02037 EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
02038 return escript::algorithm(*leftC,operation,initial_value);
02039 } else if (isEmpty()) {
02040 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
02041 } else if (isLazy()) {
02042 throw DataException("Error - Operations not permitted on instances of DataLazy.");
02043 } else {
02044 throw DataException("Error - Data encapsulates an unknown type.");
02045 }
02046 }
02047
02056 template <class BinaryFunction>
02057 inline
02058 Data
02059 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
02060 {
02061 if (isEmpty()) {
02062 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
02063 }
02064 else if (isExpanded()) {
02065 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
02066 DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
02067 DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
02068 EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
02069 EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
02070 escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
02071 return result;
02072 }
02073 else if (isTagged()) {
02074 DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
02075 EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
02076 DataTypes::ValueType defval(1);
02077 defval[0]=0;
02078 DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
02079 escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
02080 return Data(resultT);
02081 }
02082 else if (isConstant()) {
02083 Data result(0,DataTypes::ShapeType(),getFunctionSpace(),isExpanded());
02084 DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
02085 DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
02086 EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
02087 EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
02088 escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
02089 return result;
02090 } else if (isLazy()) {
02091 throw DataException("Error - Operations not permitted on instances of DataLazy.");
02092 } else {
02093 throw DataException("Error - Data encapsulates an unknown type.");
02094 }
02095 }
02096
02104 template <typename BinaryFunction>
02105 inline
02106 Data
02107 C_TensorBinaryOperation(Data const &arg_0,
02108 Data const &arg_1,
02109 BinaryFunction operation)
02110 {
02111 if (arg_0.isEmpty() || arg_1.isEmpty())
02112 {
02113 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
02114 }
02115 if (arg_0.isLazy() || arg_1.isLazy())
02116 {
02117 throw DataException("Error - Operations not permitted on lazy data.");
02118 }
02119
02120 Data arg_0_Z, arg_1_Z;
02121 if (arg_0.getFunctionSpace()!=arg_1.getFunctionSpace()) {
02122 if (arg_0.probeInterpolation(arg_1.getFunctionSpace())) {
02123 arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
02124 arg_1_Z = Data(arg_1);
02125 }
02126 else if (arg_1.probeInterpolation(arg_0.getFunctionSpace())) {
02127 arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
02128 arg_0_Z =Data(arg_0);
02129 }
02130 else {
02131 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible function spaces.");
02132 }
02133 } else {
02134 arg_0_Z = Data(arg_0);
02135 arg_1_Z = Data(arg_1);
02136 }
02137
02138 int rank0 = arg_0_Z.getDataPointRank();
02139 int rank1 = arg_1_Z.getDataPointRank();
02140 DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
02141 DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
02142 int size0 = arg_0_Z.getDataPointSize();
02143 int size1 = arg_1_Z.getDataPointSize();
02144
02145 Data res;
02146
02147 if (shape0 == shape1) {
02148 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
02149 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
02150 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
02151 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
02152 double *ptr_2 = &(res.getDataAtOffsetRW(0));
02153
02154 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02155 }
02156 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
02157
02158
02159 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02160
02161
02162 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02163
02164
02165 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
02166 res.tag();
02167 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02168
02169
02170 int offset_0 = tmp_0->getPointOffset(0,0);
02171 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02172
02173
02174 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02175 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02176
02177
02178 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02179
02180 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02181 DataTagged::DataMapType::const_iterator i;
02182 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02183 tmp_2->addTag(i->first);
02184 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02185 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02186
02187 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02188 }
02189
02190 }
02191 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
02192 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true);
02193 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02194 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02195 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02196
02197 int sampleNo_1,dataPointNo_1;
02198 int numSamples_1 = arg_1_Z.getNumSamples();
02199 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
02200 int offset_0 = tmp_0->getPointOffset(0,0);
02201 res.requireWrite();
02202 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
02203 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
02204 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
02205 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
02206 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
02207 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02208 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02209 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02210 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02211 }
02212 }
02213
02214 }
02215 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
02216
02217 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02218
02219
02220 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02221
02222
02223 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());
02224 res.tag();
02225 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02226
02227
02228 int offset_1 = tmp_1->getPointOffset(0,0);
02229
02230 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02231
02232 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02233 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02234
02235 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02236
02237 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02238 DataTagged::DataMapType::const_iterator i;
02239 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02240 tmp_2->addTag(i->first);
02241 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02242 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02243 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02244 }
02245
02246 }
02247 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
02248
02249 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02250
02251
02252 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02253
02254
02255 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
02256 res.tag();
02257 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02258
02259
02260 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02261 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02262 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02263
02264
02265 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02266
02267 DataTagged::DataMapType::const_iterator i;
02268 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02269 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02270 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02271 tmp_2->addTag(i->first);
02272 }
02273 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02274 tmp_2->addTag(i->first);
02275 }
02276
02277 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
02278 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
02279
02280 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02281 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02282 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02283
02284 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02285 }
02286
02287 }
02288 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
02289
02290 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true);
02291 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02292 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02293 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02294
02295 int sampleNo_0,dataPointNo_0;
02296 int numSamples_0 = arg_0_Z.getNumSamples();
02297 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02298 res.requireWrite();
02299 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02300 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02301 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
02302 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02303 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02304 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02305 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02306 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02307 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02308 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02309 }
02310 }
02311
02312 }
02313 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
02314 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true);
02315 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02316 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02317 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02318
02319 int sampleNo_0,dataPointNo_0;
02320 int numSamples_0 = arg_0_Z.getNumSamples();
02321 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02322 int offset_1 = tmp_1->getPointOffset(0,0);
02323 res.requireWrite();
02324 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02325 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02326 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02327 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02328 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02329
02330 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02331 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02332 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02333
02334
02335 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02336 }
02337 }
02338
02339 }
02340 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
02341
02342 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true);
02343 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02344 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02345 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02346
02347 int sampleNo_0,dataPointNo_0;
02348 int numSamples_0 = arg_0_Z.getNumSamples();
02349 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02350 res.requireWrite();
02351 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02352 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02353 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
02354 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02355 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02356 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02357 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02358 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02359 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02360 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02361 }
02362 }
02363
02364 }
02365 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
02366
02367 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true);
02368 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02369 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02370 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02371
02372 int sampleNo_0,dataPointNo_0;
02373 int numSamples_0 = arg_0_Z.getNumSamples();
02374 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02375 res.requireWrite();
02376 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02377 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02378 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02379 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02380 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02381 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02382 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02383 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02384 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02385 tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
02386 }
02387 }
02388
02389 }
02390 else {
02391 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
02392 }
02393
02394 } else if (0 == rank0) {
02395 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
02396 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
02397 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
02398 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
02399 double *ptr_2 = &(res.getDataAtOffsetRW(0));
02400 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02401 }
02402 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
02403
02404
02405 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02406
02407
02408 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02409
02410
02411 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
02412 res.tag();
02413 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02414
02415
02416 int offset_0 = tmp_0->getPointOffset(0,0);
02417 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02418
02419 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02420 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02421
02422
02423 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02424
02425 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02426 DataTagged::DataMapType::const_iterator i;
02427 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02428 tmp_2->addTag(i->first);
02429 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02430 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02431 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02432 }
02433
02434 }
02435 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
02436
02437 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true);
02438 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02439 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02440 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02441
02442 int sampleNo_1,dataPointNo_1;
02443 int numSamples_1 = arg_1_Z.getNumSamples();
02444 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
02445 int offset_0 = tmp_0->getPointOffset(0,0);
02446 res.requireWrite();
02447 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
02448 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
02449 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
02450 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
02451 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
02452 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02453 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02454 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02455 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02456
02457 }
02458 }
02459
02460 }
02461 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
02462
02463
02464 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02465
02466
02467 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02468
02469
02470 res = Data(0.0, shape1, arg_0_Z.getFunctionSpace());
02471 res.tag();
02472 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02473
02474
02475 int offset_1 = tmp_1->getPointOffset(0,0);
02476 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02477
02478
02479 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02480 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02481
02482
02483
02484 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02485
02486 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02487 DataTagged::DataMapType::const_iterator i;
02488 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02489 tmp_2->addTag(i->first);
02490 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02491 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02492
02493 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02494 }
02495
02496 }
02497 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
02498
02499
02500 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02501
02502
02503 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02504
02505
02506 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
02507 res.tag();
02508 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02509
02510
02511 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02512 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02513 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02514
02515
02516 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02517
02518 DataTagged::DataMapType::const_iterator i;
02519 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02520 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02521 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02522 tmp_2->addTag(i->first);
02523 }
02524 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02525 tmp_2->addTag(i->first);
02526 }
02527
02528 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
02529 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
02530 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02531 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02532 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02533
02534 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02535 }
02536
02537 }
02538 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
02539
02540
02541 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true);
02542 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02543 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02544 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02545
02546 int sampleNo_0,dataPointNo_0;
02547 int numSamples_0 = arg_0_Z.getNumSamples();
02548 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02549 res.requireWrite();
02550 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02551 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02552 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
02553 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02554 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02555 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02556 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02557 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02558 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02559 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02560 }
02561 }
02562
02563 }
02564 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
02565 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true);
02566 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02567 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02568 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02569
02570 int sampleNo_0,dataPointNo_0;
02571 int numSamples_0 = arg_0_Z.getNumSamples();
02572 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02573 int offset_1 = tmp_1->getPointOffset(0,0);
02574 res.requireWrite();
02575 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02576 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02577 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02578 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02579 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02580 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02581 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02582 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02583 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02584 }
02585 }
02586
02587
02588 }
02589 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
02590
02591
02592 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true);
02593 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02594 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02595 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02596
02597 int sampleNo_0,dataPointNo_0;
02598 int numSamples_0 = arg_0_Z.getNumSamples();
02599 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02600 res.requireWrite();
02601 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02602 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02603 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
02604 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02605 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02606 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02607 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02608 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02609 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02610 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02611 }
02612 }
02613
02614 }
02615 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
02616
02617
02618 res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true);
02619 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02620 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02621 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02622
02623 int sampleNo_0,dataPointNo_0;
02624 int numSamples_0 = arg_0_Z.getNumSamples();
02625 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02626 res.requireWrite();
02627 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02628 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02629 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02630 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02631 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02632 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02633 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02634 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02635 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02636 tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
02637 }
02638 }
02639
02640 }
02641 else {
02642 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
02643 }
02644
02645 } else if (0 == rank1) {
02646 if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
02647 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
02648 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
02649 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
02650 double *ptr_2 = &(res.getDataAtOffsetRW(0));
02651 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02652 }
02653 else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
02654
02655
02656 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02657
02658
02659 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02660
02661
02662 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
02663 res.tag();
02664 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02665
02666
02667 int offset_0 = tmp_0->getPointOffset(0,0);
02668 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02669
02670
02671 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02672 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02673
02674
02675 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02676
02677 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02678 DataTagged::DataMapType::const_iterator i;
02679 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02680 tmp_2->addTag(i->first);
02681 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02682 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02683 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02684 }
02685 }
02686 else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
02687
02688 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true);
02689 DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
02690 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02691 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02692
02693 int sampleNo_1,dataPointNo_1;
02694 int numSamples_1 = arg_1_Z.getNumSamples();
02695 int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
02696 int offset_0 = tmp_0->getPointOffset(0,0);
02697 res.requireWrite();
02698 #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
02699 for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
02700 for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
02701 int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
02702 int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
02703 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02704 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02705 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02706 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02707 }
02708 }
02709
02710 }
02711 else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
02712
02713
02714 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02715
02716
02717 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02718
02719
02720 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());
02721 res.tag();
02722 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02723
02724
02725 int offset_1 = tmp_1->getPointOffset(0,0);
02726 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02727
02728 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02729 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02730
02731 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02732
02733 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02734 DataTagged::DataMapType::const_iterator i;
02735 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02736 tmp_2->addTag(i->first);
02737 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02738 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02739 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02740 }
02741
02742 }
02743 else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
02744
02745
02746 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02747
02748
02749 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02750
02751
02752 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
02753 res.tag();
02754 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02755
02756
02757 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02758 const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
02759 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02760
02761
02762 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02763
02764 DataTagged::DataMapType::const_iterator i;
02765 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02766 const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
02767 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02768 tmp_2->addTag(i->first);
02769 }
02770 for (i=lookup_1.begin();i!=lookup_1.end();i++) {
02771 tmp_2->addTag(i->first);
02772 }
02773
02774 const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
02775 for (i=lookup_2.begin();i!=lookup_2.end();i++) {
02776 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02777 const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
02778 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02779 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02780 }
02781
02782 }
02783 else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
02784
02785
02786 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true);
02787 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02788 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02789 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02790
02791 int sampleNo_0,dataPointNo_0;
02792 int numSamples_0 = arg_0_Z.getNumSamples();
02793 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02794 res.requireWrite();
02795 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02796 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02797 int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
02798 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02799 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02800 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02801 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02802 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02803 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02804 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02805 }
02806 }
02807
02808 }
02809 else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
02810 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true);
02811 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02812 DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
02813 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02814
02815 int sampleNo_0,dataPointNo_0;
02816 int numSamples_0 = arg_0_Z.getNumSamples();
02817 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02818 int offset_1 = tmp_1->getPointOffset(0,0);
02819 res.requireWrite();
02820 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02821 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02822 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02823 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02824 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02825 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02826 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02827 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02828 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02829 }
02830 }
02831
02832
02833 }
02834 else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
02835
02836
02837 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true);
02838 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02839 DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
02840 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02841
02842 int sampleNo_0,dataPointNo_0;
02843 int numSamples_0 = arg_0_Z.getNumSamples();
02844 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02845 res.requireWrite();
02846 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02847 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02848 int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
02849 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02850 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02851 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02852 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02853 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02854 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02855 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02856 }
02857 }
02858
02859 }
02860 else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
02861
02862
02863 res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true);
02864 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02865 DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
02866 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02867
02868 int sampleNo_0,dataPointNo_0;
02869 int numSamples_0 = arg_0_Z.getNumSamples();
02870 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02871 res.requireWrite();
02872 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02873 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02874 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02875 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02876 int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
02877 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02878 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02879 const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
02880 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02881 tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
02882 }
02883 }
02884
02885 }
02886 else {
02887 throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
02888 }
02889
02890 } else {
02891 throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
02892 }
02893
02894 return res;
02895 }
02896
02897 template <typename UnaryFunction>
02898 Data
02899 C_TensorUnaryOperation(Data const &arg_0,
02900 UnaryFunction operation)
02901 {
02902 if (arg_0.isEmpty())
02903 {
02904 throw DataException("Error - Operations not permitted on instances of DataEmpty.");
02905 }
02906 if (arg_0.isLazy())
02907 {
02908 throw DataException("Error - Operations not permitted on lazy data.");
02909 }
02910
02911 Data arg_0_Z = Data(arg_0);
02912
02913
02914 const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
02915 int size0 = arg_0_Z.getDataPointSize();
02916
02917
02918 Data res;
02919
02920 if (arg_0_Z.isConstant()) {
02921 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());
02922 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
02923 double *ptr_2 = &(res.getDataAtOffsetRW(0));
02924 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
02925 }
02926 else if (arg_0_Z.isTagged()) {
02927
02928
02929 DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
02930
02931
02932 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace());
02933 res.tag();
02934 DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
02935
02936
02937 const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
02938 double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
02939
02940 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
02941
02942 const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
02943 DataTagged::DataMapType::const_iterator i;
02944 for (i=lookup_0.begin();i!=lookup_0.end();i++) {
02945 tmp_2->addTag(i->first);
02946 const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
02947 double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
02948 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
02949 }
02950
02951 }
02952 else if (arg_0_Z.isExpanded()) {
02953
02954 res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true);
02955 DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
02956 DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
02957
02958 int sampleNo_0,dataPointNo_0;
02959 int numSamples_0 = arg_0_Z.getNumSamples();
02960 int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
02961 #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
02962 for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
02963 for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
02964 int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
02965 int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
02966 const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
02967 double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
02968 tensor_unary_operation(size0, ptr_0, ptr_2, operation);
02969 }
02970 }
02971 }
02972 else {
02973 throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
02974 }
02975
02976 return res;
02977 }
02978
02979 }
02980 #endif