ESScript  Revision_4488
Data.h
Go to the documentation of this file.
1 
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2013 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development since 2012 by School of Earth Sciences
13 *
14 *****************************************************************************/
15 
16 
19 #ifndef DATA_H
20 #define DATA_H
21 #include "system_dep.h"
22 
23 #include "DataTypes.h"
24 #include "DataAbstract.h"
25 #include "DataAlgorithm.h"
26 #include "FunctionSpace.h"
27 #include "BinaryOp.h"
28 #include "UnaryOp.h"
29 #include "DataException.h"
30 
31 
32 
33 #include "DataC.h"
34 
35 #ifdef _OPENMP
36 #include <omp.h>
37 #endif
38 
39 #include "esysUtils/Esys_MPI.h"
40 #include <string>
41 #include <algorithm>
42 #include <sstream>
43 
44 #include <boost/shared_ptr.hpp>
45 #include <boost/python/object.hpp>
46 #include <boost/python/tuple.hpp>
47 
48 namespace escript {
49 
50 //
51 // Forward declaration for various implementations of Data.
52 class DataConstant;
53 class DataTagged;
54 class DataExpanded;
55 class DataLazy;
56 
70 class Data {
71 
72  public:
73 
74  // These typedefs allow function names to be cast to pointers
75  // to functions of the appropriate type when calling unaryOp etc.
76  typedef double (*UnaryDFunPtr)(double);
77  typedef double (*BinaryDFunPtr)(double,double);
78 
79 
90  Data();
91 
98  Data(const Data& inData);
99 
107  Data(const Data& inData,
108  const FunctionSpace& what);
109 
115  Data(const DataTypes::ValueType& value,
116  const DataTypes::ShapeType& shape,
117  const FunctionSpace& what=FunctionSpace(),
118  bool expanded=false);
119 
132  Data(double value,
133  const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
134  const FunctionSpace& what=FunctionSpace(),
135  bool expanded=false);
136 
145  Data(const Data& inData,
146  const DataTypes::RegionType& region);
147 
159  Data(const boost::python::object& value,
160  const FunctionSpace& what=FunctionSpace(),
161  bool expanded=false);
162 
174  Data(const WrappedArray& w, const FunctionSpace& what,
175  bool expanded=false);
176 
177 
188  Data(const boost::python::object& value,
189  const Data& other);
190 
196  Data(double value,
197  const boost::python::tuple& shape=boost::python::make_tuple(),
198  const FunctionSpace& what=FunctionSpace(),
199  bool expanded=false);
200 
206  explicit Data(DataAbstract* underlyingdata);
207 
212  explicit Data(DataAbstract_ptr underlyingdata);
213 
219  ~Data();
220 
225  void
226  copy(const Data& other);
227 
232  Data
233  copySelf();
234 
235 
240  Data
241  delay();
242 
247  void
248  delaySelf();
249 
250 
261  void
262  setProtection();
263 
270  bool
271  isProtected() const;
272 
273 
279  const boost::python::object
280  getValueOfDataPointAsTuple(int dataPointNo);
281 
287  void
288  setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
289 
295  void
296  setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
297 
303  void
304  setValueOfDataPoint(int dataPointNo, const double);
305 
310  const boost::python::object
311  getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
312 
313 
318  void
319  setTupleForGlobalDataPoint(int id, int proc, boost::python::object);
320 
327  int
328  getTagNumber(int dpno);
329 
336  getDataC();
337 
338 
339 
346  getDataC() const;
347 
348 
354  std::string
355  toString() const;
356 
362  void
363  expand();
364 
372  void
373  tag();
374 
380  void
381  resolve();
382 
383 
392  void
393  requireWrite();
394 
401  bool
402  isExpanded() const;
403 
410  bool
411  actsExpanded() const;
412 
413 
419  bool
420  isTagged() const;
421 
427  bool
428  isConstant() const;
429 
434  bool
435  isLazy() const;
436 
441  bool
442  isReady() const;
443 
450  bool
451  isEmpty() const;
452 
458  inline
459  const FunctionSpace&
461  {
462  return m_data->getFunctionSpace();
463  }
464 
470  const FunctionSpace
471  getCopyOfFunctionSpace() const;
472 
478  inline
479 // const AbstractDomain&
481  getDomain() const
482  {
483  return getFunctionSpace().getDomain();
484  }
485 
486 
493  inline
494 // const AbstractDomain&
495  Domain_ptr
497  {
499  }
500 
506  const AbstractDomain
507  getCopyOfDomain() const;
508 
514  inline
515  unsigned int
517  {
518  return m_data->getRank();
519  }
520 
526  inline
527  int
529  {
531  }
537  inline
538  int
540  {
541  return m_data->getNumSamples();
542  }
543 
549  inline
550  int
552  {
553  return m_data->getNumDPPSample();
554  }
555 
562  inline
563  bool numSamplesEqual(int numDataPointsPerSample, int numSamples) const
564  {
565  return (isEmpty() ||
566  (numDataPointsPerSample==getNumDataPointsPerSample() && numSamples==getNumSamples()));
567  }
568 
574  inline
575  bool isDataPointShapeEqual(int rank, const int* dimensions) const
576  {
577  if (isEmpty())
578  return true;
579  const DataTypes::ShapeType givenShape(&dimensions[0],&dimensions[rank]);
580  return (getDataPointShape()==givenShape);
581  }
582 
588  int
589  getNoValues() const
590  {
591  return m_data->getNoValues();
592  }
593 
594 
600  void
601  dump(const std::string fileName) const;
602 
610  const boost::python::object
611  toListOfTuples(bool scalarastuple=true);
612 
613 
622  inline
625 
626 
635  inline
638 
639 
647  inline
650  {
651  return m_data->getSampleDataByTag(tag);
652  }
653 
662  getDataPointRO(int sampleNo, int dataPointNo);
663 
672  getDataPointRW(int sampleNo, int dataPointNo);
673 
674 
675 
681  inline
683  getDataOffset(int sampleNo,
684  int dataPointNo)
685  {
686  return m_data->getPointOffset(sampleNo,dataPointNo);
687  }
688 
694  inline
695  const DataTypes::ShapeType&
697  {
698  return m_data->getShape();
699  }
700 
706  const boost::python::tuple
707  getShapeTuple() const;
708 
715  int
716  getDataPointSize() const;
717 
724  getLength() const;
725 
731  bool
732  hasNoSamples() const
733  {
734  return getLength()==0;
735  }
736 
746  void
747  setTaggedValueByName(std::string name,
748  const boost::python::object& value);
749 
760  void
761  setTaggedValue(int tagKey,
762  const boost::python::object& value);
763 
775  void
776  setTaggedValueFromCPP(int tagKey,
777  const DataTypes::ShapeType& pointshape,
778  const DataTypes::ValueType& value,
779  int dataOffset=0);
780 
781 
782 
788  void
789  copyWithMask(const Data& other,
790  const Data& mask);
791 
802  void
803  setToZero();
804 
812  Data
813  interpolate(const FunctionSpace& functionspace) const;
814 
816  Data
817  interpolateFromTable3D(const WrappedArray& table, double Amin, double Astep,
818  double undef, Data& B, double Bmin, double Bstep, Data& C,
819  double Cmin, double Cstep, bool check_boundaries);
820 
822  Data
823  interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
824  double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
825 
827  Data
828  interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
829  double undef,bool check_boundaries);
830 
831 
833  Data
834  interpolateFromTable3DP(boost::python::object table, double Amin, double Astep,
835  Data& B, double Bmin, double Bstep, Data& C, double Cmin, double Cstep, double undef,bool check_boundaries);
836 
837 
839  Data
840  interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
841  Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
842 
844  Data
845  interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
846  double undef,bool check_boundaries);
847 
849  Data
850  nonuniforminterp(boost::python::object in, boost::python::object out, bool check_boundaries);
851 
853  Data
854  nonuniformslope(boost::python::object in, boost::python::object out, bool check_boundaries);
855 
863  Data
864  gradOn(const FunctionSpace& functionspace) const;
865 
867  Data
868  grad() const;
869 
875  boost::python::object
876  integrateToTuple_const() const;
877 
878 
884  boost::python::object
886 
887 
888 
895  Data
896  oneOver() const;
903  Data
904  wherePositive() const;
905 
912  Data
913  whereNegative() const;
914 
921  Data
922  whereNonNegative() const;
923 
930  Data
931  whereNonPositive() const;
932 
939  Data
940  whereZero(double tol=0.0) const;
941 
948  Data
949  whereNonZero(double tol=0.0) const;
950 
963  double
964  Lsup();
965 
967  double
968  Lsup_const() const;
969 
970 
983  double
984  sup();
985 
987  double
988  sup_const() const;
989 
990 
1003  double
1004  inf();
1005 
1007  double
1008  inf_const() const;
1009 
1010 
1011 
1018  Data
1019  abs() const;
1020 
1027  Data
1028  maxval() const;
1029 
1036  Data
1037  minval() const;
1038 
1047  const boost::python::tuple
1048  minGlobalDataPoint() const;
1049 
1058  const boost::python::tuple
1059  maxGlobalDataPoint() const;
1060 
1061 
1062 
1070  Data
1071  sign() const;
1072 
1079  Data
1080  symmetric() const;
1081 
1088  Data
1089  nonsymmetric() const;
1090 
1097  Data
1098  trace(int axis_offset) const;
1099 
1106  Data
1107  transpose(int axis_offset) const;
1108 
1116  Data
1117  eigenvalues() const;
1118 
1129  const boost::python::tuple
1130  eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1131 
1138  Data
1139  swapaxes(const int axis0, const int axis1) const;
1140 
1147  Data
1148  erf() const;
1149 
1156  Data
1157  sin() const;
1158 
1165  Data
1166  cos() const;
1167 
1174  Data
1175  tan() const;
1176 
1183  Data
1184  asin() const;
1185 
1192  Data
1193  acos() const;
1194 
1201  Data
1202  atan() const;
1203 
1210  Data
1211  sinh() const;
1212 
1219  Data
1220  cosh() const;
1221 
1228  Data
1229  tanh() const;
1230 
1237  Data
1238  asinh() const;
1239 
1246  Data
1247  acosh() const;
1248 
1255  Data
1256  atanh() const;
1257 
1264  Data
1265  log10() const;
1266 
1273  Data
1274  log() const;
1275 
1282  Data
1283  exp() const;
1284 
1291  Data
1292  sqrt() const;
1293 
1300  Data
1301  neg() const;
1302 
1310  Data
1311  pos() const;
1312 
1321  Data
1322  powD(const Data& right) const;
1323 
1332  Data
1333  powO(const boost::python::object& right) const;
1334 
1344  Data
1345  rpowO(const boost::python::object& left) const;
1346 
1354  Data& operator+=(const Data& right);
1356  Data& operator+=(const boost::python::object& right);
1357 
1359  Data& operator=(const Data& other);
1360 
1368  Data& operator-=(const Data& right);
1370  Data& operator-=(const boost::python::object& right);
1371 
1379  Data& operator*=(const Data& right);
1381  Data& operator*=(const boost::python::object& right);
1382 
1390  Data& operator/=(const Data& right);
1392  Data& operator/=(const boost::python::object& right);
1393 
1399  Data truedivD(const Data& right);
1400 
1406  Data truedivO(const boost::python::object& right);
1407 
1413  Data rtruedivO(const boost::python::object& left);
1414 
1420  boost::python::object __add__(const boost::python::object& right);
1421 
1422 
1428  boost::python::object __sub__(const boost::python::object& right);
1429 
1435  boost::python::object __rsub__(const boost::python::object& right);
1436 
1442  boost::python::object __mul__(const boost::python::object& right);
1443 
1449  boost::python::object __div__(const boost::python::object& right);
1450 
1456  boost::python::object __rdiv__(const boost::python::object& right);
1457 
1462  Data
1463  matrixInverse() const;
1464 
1470  bool
1471  probeInterpolation(const FunctionSpace& functionspace) const;
1472 
1489  Data
1490  getItem(const boost::python::object& key) const;
1491 
1504  void
1505  setItemD(const boost::python::object& key,
1506  const Data& value);
1507 
1509  void
1510  setItemO(const boost::python::object& key,
1511  const boost::python::object& value);
1512 
1513  // These following public methods should be treated as private.
1514 
1520  template <class UnaryFunction>
1522  inline
1523  void
1524  unaryOp2(UnaryFunction operation);
1525 
1534  Data
1535  getSlice(const DataTypes::RegionType& region) const;
1536 
1546  void
1547  setSlice(const Data& value,
1548  const DataTypes::RegionType& region);
1549 
1555  void
1556  print(void);
1557 
1565  int
1566  get_MPIRank(void) const;
1567 
1575  int
1576  get_MPISize(void) const;
1577 
1584  MPI_Comm
1585  get_MPIComm(void) const;
1586 
1593  DataAbstract*
1594  borrowData(void) const;
1595 
1598  borrowDataPtr(void) const;
1599 
1602  borrowReadyPtr(void) const;
1603 
1604 
1605 
1616 
1617 
1621 
1622 
1623  protected:
1624 
1625  private:
1626 
1627 template <class BinaryOp>
1628  double
1629 #ifdef ESYS_MPI
1630  lazyAlgWorker(double init, MPI_Op mpiop_type);
1631 #else
1632  lazyAlgWorker(double init);
1633 #endif
1634 
1635  double
1636  LsupWorker() const;
1637 
1638  double
1639  supWorker() const;
1640 
1641  double
1642  infWorker() const;
1643 
1644  boost::python::object
1645  integrateWorker() const;
1646 
1647  void
1648  calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1649 
1650  void
1651  calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1652 
1653  // For internal use in Data.cpp only!
1654  // other uses should call the main entry points and allow laziness
1655  Data
1656  minval_nonlazy() const;
1657 
1658  // For internal use in Data.cpp only!
1659  Data
1660  maxval_nonlazy() const;
1661 
1662 
1669  inline
1670  void
1671  operandCheck(const Data& right) const
1672  {
1673  return m_data->operandCheck(*(right.m_data.get()));
1674  }
1675 
1681  template <class BinaryFunction>
1682  inline
1683  double
1684  algorithm(BinaryFunction operation,
1685  double initial_value) const;
1686 
1694  template <class BinaryFunction>
1695  inline
1696  Data
1697  dp_algorithm(BinaryFunction operation,
1698  double initial_value) const;
1699 
1708  template <class BinaryFunction>
1709  inline
1710  void
1711  binaryOp(const Data& right,
1712  BinaryFunction operation);
1713 
1719  void
1720  typeMatchLeft(Data& right) const;
1721 
1727  void
1728  typeMatchRight(const Data& right);
1729 
1735  void
1736  initialise(const DataTypes::ValueType& value,
1737  const DataTypes::ShapeType& shape,
1738  const FunctionSpace& what,
1739  bool expanded);
1740 
1741  void
1742  initialise(const WrappedArray& value,
1743  const FunctionSpace& what,
1744  bool expanded);
1745 
1746  void
1747  initialise(const double value,
1748  const DataTypes::ShapeType& shape,
1749  const FunctionSpace& what,
1750  bool expanded);
1751 
1752  //
1753  // flag to protect the data object against any update
1755  mutable bool m_shared;
1756  bool m_lazy;
1757 
1758  //
1759  // pointer to the actual data object
1760 // boost::shared_ptr<DataAbstract> m_data;
1762 
1763 // If possible please use getReadyPtr instead.
1764 // But see warning below.
1765  const DataReady*
1766  getReady() const;
1767 
1768  DataReady*
1769  getReady();
1770 
1771 
1772 // Be wary of using this for local operations since it (temporarily) increases reference count.
1773 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1774 // getReady() instead
1776  getReadyPtr();
1777 
1779  getReadyPtr() const;
1780 
1781 
1787  void updateShareStatus(bool nowshared) const
1788  {
1789  m_shared=nowshared; // m_shared is mutable
1790  }
1791 
1792  // In the isShared() method below:
1793  // A problem would occur if m_data (the address pointed to) were being modified
1794  // while the call m_data->is_shared is being executed.
1795  //
1796  // Q: So why do I think this code can be thread safe/correct?
1797  // A: We need to make some assumptions.
1798  // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1799  // 2. We assume that no constructions or assignments which will share previously unshared
1800  // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1801  //
1802  // This means that the only transition we need to consider, is when a previously shared object is
1803  // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1804  // In those cases the m_shared flag changes to false after m_data has completed changing.
1805  // For any threads executing before the flag switches they will assume the object is still shared.
1806  bool isShared() const
1807  {
1808  return m_shared;
1809 /* if (m_shared) return true;
1810  if (m_data->isShared())
1811  {
1812  updateShareStatus(true);
1813  return true;
1814  }
1815  return false;*/
1816  }
1817 
1819  {
1820  if (isLazy())
1821  {
1822  #ifdef _OPENMP
1823  if (omp_in_parallel())
1824  { // Yes this is throwing an exception out of an omp thread which is forbidden.
1825  throw DataException("Please do not call forceResolve() in a parallel region.");
1826  }
1827  #endif
1828  resolve();
1829  }
1830  }
1831 
1837  {
1838 #ifdef _OPENMP
1839  if (omp_in_parallel())
1840  {
1841 // *((int*)0)=17;
1842  throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1843  }
1844 #endif
1845  forceResolve();
1846  if (isShared())
1847  {
1848  DataAbstract* t=m_data->deepCopy();
1850  }
1851  }
1852 
1857  {
1858  if (isLazy() || isShared())
1859  {
1860  throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1861  }
1862  }
1863 
1870  void set_m_data(DataAbstract_ptr p);
1871 
1872  friend class DataAbstract; // To allow calls to updateShareStatus
1873  friend class TestDomain; // so its getX will work quickly
1874 #ifdef IKNOWWHATIMDOING
1875  friend ESCRIPT_DLL_API Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1876 #endif
1877  friend ESCRIPT_DLL_API Data condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1878  friend ESCRIPT_DLL_API Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed);
1879 
1880 };
1881 
1882 
1883 #ifdef IKNOWWHATIMDOING
1885 Data
1886 applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1887 #endif
1888 
1889 
1891 Data
1892 condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1893 
1894 
1895 
1900 Data randomData(const boost::python::tuple& shape,
1901  const FunctionSpace& what,
1902  long seed);
1903 
1904 
1905 } // end namespace escript
1906 
1907 
1908 // No, this is not supposed to be at the top of the file
1909 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1910 // so that I can dynamic cast between them below.
1911 #include "DataReady.h"
1912 #include "DataLazy.h"
1913 
1914 namespace escript
1915 {
1916 
1917 inline
1918 const DataReady*
1920 {
1921  const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1922  EsysAssert((dr!=0), "Error - casting to DataReady.");
1923  return dr;
1924 }
1925 
1926 inline
1927 DataReady*
1929 {
1930  DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1931  EsysAssert((dr!=0), "Error - casting to DataReady.");
1932  return dr;
1933 }
1934 
1935 // Be wary of using this for local operations since it (temporarily) increases reference count.
1936 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1937 // getReady() instead
1938 inline
1941 {
1942  DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1943  EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1944  return dr;
1945 }
1946 
1947 
1948 inline
1951 {
1952  const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1953  EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1954  return dr;
1955 }
1956 
1957 inline
1960 {
1961  if (isLazy())
1962  {
1963  throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1964  }
1965  return getReady()->getSampleDataRW(sampleNo);
1966 }
1967 
1968 inline
1971 {
1972  DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1973  if (l!=0)
1974  {
1975  size_t offset=0;
1976  const DataTypes::ValueType* res=l->resolveSample(sampleNo,offset);
1977  return &((*res)[offset]);
1978  }
1979  return getReady()->getSampleDataRO(sampleNo);
1980 }
1981 
1982 
1983 
1987 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1988 
1992 inline double rpow(double x,double y)
1993 {
1994  return pow(y,x);
1995 }
1996 
2002 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
2003 
2009 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
2010 
2016 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
2017 
2023 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
2024 
2031 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
2032 
2039 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
2040 
2047 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
2048 
2055 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
2056 
2063 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
2064 
2071 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
2072 
2079 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
2080 
2087 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
2088 
2089 
2090 
2095 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
2096 
2106 Data
2107 C_GeneralTensorProduct(Data& arg_0,
2108  Data& arg_1,
2109  int axis_offset=0,
2110  int transpose=0);
2111 
2117 inline
2118 Data
2119 Data::truedivD(const Data& right)
2120 {
2121  return *this / right;
2122 }
2123 
2129 inline
2130 Data
2131 Data::truedivO(const boost::python::object& right)
2132 {
2133  Data tmp(right, getFunctionSpace(), false);
2134  return truedivD(tmp);
2135 }
2136 
2142 inline
2143 Data
2144 Data::rtruedivO(const boost::python::object& left)
2145 {
2146  Data tmp(left, getFunctionSpace(), false);
2147  return tmp.truedivD(*this);
2148 }
2149 
2155 template <class BinaryFunction>
2156 inline
2157 void
2158 Data::binaryOp(const Data& right,
2159  BinaryFunction operation)
2160 {
2161  //
2162  // if this has a rank of zero promote it to the rank of the RHS
2163  if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
2164  throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
2165  }
2166 
2167  if (isLazy() || right.isLazy())
2168  {
2169  throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2170  }
2171  //
2172  // initially make the temporary a shallow copy
2173  Data tempRight(right);
2175  FunctionSpace fsr=right.getFunctionSpace();
2176  if (fsl!=fsr) {
2177  signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2178  if (intres==0)
2179  {
2180  std::string msg="Error - attempt to combine incompatible FunctionSpaces.";
2181  msg+=fsl.toString();
2182  msg+=" ";
2183  msg+=fsr.toString();
2184  throw DataException(msg.c_str());
2185  }
2186  else if (intres==1)
2187  {
2188  // an interpolation is required so create a new Data
2189  tempRight=Data(right,fsl);
2190  }
2191  else // reverse interpolation preferred
2192  {
2193  // interpolate onto the RHS function space
2194  Data tempLeft(*this,fsr);
2195  set_m_data(tempLeft.m_data);
2196  }
2197  }
2198  operandCheck(tempRight);
2199  //
2200  // ensure this has the right type for the RHS
2201  typeMatchRight(tempRight);
2202  //
2203  // Need to cast to the concrete types so that the correct binaryOp
2204  // is called.
2205  if (isExpanded()) {
2206  //
2207  // Expanded data will be done in parallel, the right hand side can be
2208  // of any data type
2209  DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2210  EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2211  escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2212  } else if (isTagged()) {
2213  //
2214  // Tagged data is operated on serially, the right hand side can be
2215  // either DataConstant or DataTagged
2216  DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2217  EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2218  if (right.isTagged()) {
2219  DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2220  EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2221  escript::binaryOp(*leftC,*rightC,operation);
2222  } else {
2223  DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2224  EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2225  escript::binaryOp(*leftC,*rightC,operation);
2226  }
2227  } else if (isConstant()) {
2228  DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2229  DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2230  EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2231  escript::binaryOp(*leftC,*rightC,operation);
2232  }
2233 }
2234 
2242 template <class BinaryFunction>
2243 inline
2244 double
2245 Data::algorithm(BinaryFunction operation, double initial_value) const
2246 {
2247  if (isExpanded()) {
2248  DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2249  EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2250  return escript::algorithm(*leftC,operation,initial_value);
2251  } else if (isTagged()) {
2252  DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2253  EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2254  return escript::algorithm(*leftC,operation,initial_value);
2255  } else if (isConstant()) {
2256  DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2257  EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2258  return escript::algorithm(*leftC,operation,initial_value);
2259  } else if (isEmpty()) {
2260  throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2261  } else if (isLazy()) {
2262  throw DataException("Error - Operations not permitted on instances of DataLazy.");
2263  } else {
2264  throw DataException("Error - Data encapsulates an unknown type.");
2265  }
2266 }
2267 
2276 template <class BinaryFunction>
2277 inline
2278 Data
2279 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2280 {
2281  if (isEmpty()) {
2282  throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2283  }
2284  else if (isExpanded()) {
2286  DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2287  DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2288  EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2289  EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2290  escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2291  return result;
2292  }
2293  else if (isTagged()) {
2294  DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2295  EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2296  DataTypes::ValueType defval(1);
2297  defval[0]=0;
2298  DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2299  escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2300  return Data(resultT); // note: the Data object now owns the resultT pointer
2301  }
2302  else if (isConstant()) {
2304  DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2305  DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2306  EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2307  EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2308  escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2309  return result;
2310  } else if (isLazy()) {
2311  throw DataException("Error - Operations not permitted on instances of DataLazy.");
2312  } else {
2313  throw DataException("Error - Data encapsulates an unknown type.");
2314  }
2315 }
2316 
2324 template <typename BinaryFunction>
2325 inline
2326 Data
2328  Data const &arg_1,
2329  BinaryFunction operation)
2330 {
2331  if (arg_0.isEmpty() || arg_1.isEmpty())
2332  {
2333  throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2334  }
2335  if (arg_0.isLazy() || arg_1.isLazy())
2336  {
2337  throw DataException("Error - Operations not permitted on lazy data.");
2338  }
2339  // Interpolate if necessary and find an appropriate function space
2340  Data arg_0_Z, arg_1_Z;
2341  FunctionSpace fsl=arg_0.getFunctionSpace();
2342  FunctionSpace fsr=arg_1.getFunctionSpace();
2343  if (fsl!=fsr) {
2344  signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2345  if (intres==0)
2346  {
2347  std::string msg="Error - C_TensorBinaryOperation: arguments have incompatible function spaces.";
2348  msg+=fsl.toString();
2349  msg+=" ";
2350  msg+=fsr.toString();
2351  throw DataException(msg.c_str());
2352  }
2353  else if (intres==1)
2354  {
2355  arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2356  arg_0_Z =Data(arg_0);
2357  }
2358  else // reverse interpolation preferred
2359  {
2360  arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2361  arg_1_Z = Data(arg_1);
2362  }
2363  } else {
2364  arg_0_Z = Data(arg_0);
2365  arg_1_Z = Data(arg_1);
2366  }
2367  // Get rank and shape of inputs
2368  int rank0 = arg_0_Z.getDataPointRank();
2369  int rank1 = arg_1_Z.getDataPointRank();
2370  DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2371  DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2372  int size0 = arg_0_Z.getDataPointSize();
2373  int size1 = arg_1_Z.getDataPointSize();
2374  // Declare output Data object
2375  Data res;
2376 
2377  if (shape0 == shape1) {
2378  if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2379  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2380  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2381  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2382  double *ptr_2 = &(res.getDataAtOffsetRW(0));
2383 
2384  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2385  }
2386  else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2387 
2388  // Prepare the DataConstant input
2389  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2390 
2391  // Borrow DataTagged input from Data object
2392  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2393 
2394  // Prepare a DataTagged output 2
2395  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2396  res.tag();
2397  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2398 
2399  // Prepare offset into DataConstant
2400  int offset_0 = tmp_0->getPointOffset(0,0);
2401  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2402 
2403  // Get the pointers to the actual data
2404  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2405  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2406 
2407  // Compute a result for the default
2408  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2409  // Compute a result for each tag
2410  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2411  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2412  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2413  tmp_2->addTag(i->first);
2414  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2415  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2416 
2417  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2418  }
2419 
2420  }
2421  else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2422  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2423  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2424  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2425  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2426 
2427  int sampleNo_1,dataPointNo_1;
2428  int numSamples_1 = arg_1_Z.getNumSamples();
2429  int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2430  int offset_0 = tmp_0->getPointOffset(0,0);
2431  res.requireWrite();
2432  #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2433  for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2434  for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2435  int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2436  int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2437  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2438  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2439  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2440  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2441  }
2442  }
2443 
2444  }
2445  else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2446  // Borrow DataTagged input from Data object
2447  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2448 
2449  // Prepare the DataConstant input
2450  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2451 
2452  // Prepare a DataTagged output 2
2453  res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2454  res.tag();
2455  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2456 
2457  // Prepare offset into DataConstant
2458  int offset_1 = tmp_1->getPointOffset(0,0);
2459 
2460  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2461  // Get the pointers to the actual data
2462  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2463  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2464  // Compute a result for the default
2465  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2466  // Compute a result for each tag
2467  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2468  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2469  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2470  tmp_2->addTag(i->first);
2471  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2472  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2473  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2474  }
2475 
2476  }
2477  else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2478  // Borrow DataTagged input from Data object
2479  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2480 
2481  // Borrow DataTagged input from Data object
2482  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2483 
2484  // Prepare a DataTagged output 2
2485  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2486  res.tag(); // DataTagged output
2487  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2488 
2489  // Get the pointers to the actual data
2490  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2491  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2492  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2493 
2494  // Compute a result for the default
2495  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2496  // Merge the tags
2497  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2498  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2499  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2500  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2501  tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2502  }
2503  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2504  tmp_2->addTag(i->first);
2505  }
2506  // Compute a result for each tag
2507  const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2508  for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2509 
2510  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2511  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2512  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2513 
2514  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2515  }
2516 
2517  }
2518  else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2519  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2520  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2521  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2522  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2523  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2524 
2525  int sampleNo_0,dataPointNo_0;
2526  int numSamples_0 = arg_0_Z.getNumSamples();
2527  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2528  res.requireWrite();
2529  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2530  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2531  int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2532  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2533  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2534  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2535  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2536  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2537  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2538  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2539  }
2540  }
2541 
2542  }
2543  else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2544  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2545  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2546  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2547  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2548 
2549  int sampleNo_0,dataPointNo_0;
2550  int numSamples_0 = arg_0_Z.getNumSamples();
2551  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2552  int offset_1 = tmp_1->getPointOffset(0,0);
2553  res.requireWrite();
2554  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2555  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2556  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2557  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2558  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2559 
2560  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2561  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2562  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2563 
2564 
2565  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2566  }
2567  }
2568 
2569  }
2570  else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2571  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2572  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2573  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2574  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2575  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2576 
2577  int sampleNo_0,dataPointNo_0;
2578  int numSamples_0 = arg_0_Z.getNumSamples();
2579  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2580  res.requireWrite();
2581  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2582  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2583  int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2584  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2585  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2586  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2587  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2588  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2589  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2590  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2591  }
2592  }
2593 
2594  }
2595  else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2596  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2597  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2598  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2599  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2600  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2601 
2602  int sampleNo_0,dataPointNo_0;
2603  int numSamples_0 = arg_0_Z.getNumSamples();
2604  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2605  res.requireWrite();
2606  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2607  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2608  dataPointNo_0=0;
2609 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2610  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2611  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2612  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2613  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2614  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2615  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2616  tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2617 // }
2618  }
2619 
2620  }
2621  else {
2622  throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2623  }
2624 
2625  } else if (0 == rank0) {
2626  if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2627  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2628  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2629  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2630  double *ptr_2 = &(res.getDataAtOffsetRW(0));
2631  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2632  }
2633  else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2634 
2635  // Prepare the DataConstant input
2636  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2637 
2638  // Borrow DataTagged input from Data object
2639  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2640 
2641  // Prepare a DataTagged output 2
2642  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2643  res.tag();
2644  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2645 
2646  // Prepare offset into DataConstant
2647  int offset_0 = tmp_0->getPointOffset(0,0);
2648  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2649 
2650  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2651  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2652 
2653  // Compute a result for the default
2654  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2655  // Compute a result for each tag
2656  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2657  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2658  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2659  tmp_2->addTag(i->first);
2660  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2661  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2662  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2663  }
2664 
2665  }
2666  else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2667 
2668  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2669  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2670  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2671  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2672 
2673  int sampleNo_1;
2674  int numSamples_1 = arg_1_Z.getNumSamples();
2675  int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2676  int offset_0 = tmp_0->getPointOffset(0,0);
2677  const double *ptr_src = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2678  double ptr_0 = ptr_src[0];
2679  int size = size1*numDataPointsPerSample_1;
2680  res.requireWrite();
2681  #pragma omp parallel for private(sampleNo_1) schedule(static)
2682  for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2683 // for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2684  int offset_1 = tmp_1->getPointOffset(sampleNo_1,0);
2685  int offset_2 = tmp_2->getPointOffset(sampleNo_1,0);
2686 // const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2687  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2688  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2689  tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
2690 
2691 // }
2692  }
2693 
2694  }
2695  else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2696 
2697  // Borrow DataTagged input from Data object
2698  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2699 
2700  // Prepare the DataConstant input
2701  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2702 
2703  // Prepare a DataTagged output 2
2704  res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2705  res.tag();
2706  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2707 
2708  // Prepare offset into DataConstant
2709  int offset_1 = tmp_1->getPointOffset(0,0);
2710  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2711 
2712  // Get the pointers to the actual data
2713  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2714  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2715 
2716 
2717  // Compute a result for the default
2718  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2719  // Compute a result for each tag
2720  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2721  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2722  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2723  tmp_2->addTag(i->first);
2724  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2725  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2726 
2727  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2728  }
2729 
2730  }
2731  else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2732 
2733  // Borrow DataTagged input from Data object
2734  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2735 
2736  // Borrow DataTagged input from Data object
2737  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2738 
2739  // Prepare a DataTagged output 2
2740  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2741  res.tag(); // DataTagged output
2742  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2743 
2744  // Get the pointers to the actual data
2745  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2746  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2747  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2748 
2749  // Compute a result for the default
2750  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2751  // Merge the tags
2752  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2753  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2754  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2755  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2756  tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2757  }
2758  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2759  tmp_2->addTag(i->first);
2760  }
2761  // Compute a result for each tag
2762  const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2763  for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2764  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2765  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2766  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2767 
2768  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2769  }
2770 
2771  }
2772  else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2773 
2774  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2775  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2776  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2777  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2778  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2779 
2780  int sampleNo_0,dataPointNo_0;
2781  int numSamples_0 = arg_0_Z.getNumSamples();
2782  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2783  res.requireWrite();
2784  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2785  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2786  int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2787  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2788  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2789  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2790  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2791  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2792  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2793  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2794  }
2795  }
2796 
2797  }
2798  else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2799  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2800  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2801  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2802  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2803 
2804  int sampleNo_0,dataPointNo_0;
2805  int numSamples_0 = arg_0_Z.getNumSamples();
2806  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2807  int offset_1 = tmp_1->getPointOffset(0,0);
2808  res.requireWrite();
2809  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2810  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2811  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2812  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2813  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2814  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2815  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2816  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2817  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2818  }
2819  }
2820 
2821 
2822  }
2823  else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2824 
2825  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2826  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2827  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2828  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2829  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2830 
2831  int sampleNo_0,dataPointNo_0;
2832  int numSamples_0 = arg_0_Z.getNumSamples();
2833  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2834  res.requireWrite();
2835  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2836  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2837  int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2838  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2839  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2840  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2841  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2842  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2843  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2844  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2845  }
2846  }
2847 
2848  }
2849  else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2850 
2851  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2852  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2853  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2854  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2855  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2856 
2857  int sampleNo_0,dataPointNo_0;
2858  int numSamples_0 = arg_0_Z.getNumSamples();
2859  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2860  res.requireWrite();
2861  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2862  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2863  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2864  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2865  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2866  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2867  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2868  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2869  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2870  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2871  }
2872  }
2873 
2874  }
2875  else {
2876  throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2877  }
2878 
2879  } else if (0 == rank1) {
2880  if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2881  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2882  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2883  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2884  double *ptr_2 = &(res.getDataAtOffsetRW(0));
2885  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2886  }
2887  else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2888 
2889  // Prepare the DataConstant input
2890  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2891 
2892  // Borrow DataTagged input from Data object
2893  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2894 
2895  // Prepare a DataTagged output 2
2896  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2897  res.tag();
2898  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2899 
2900  // Prepare offset into DataConstant
2901  int offset_0 = tmp_0->getPointOffset(0,0);
2902  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2903 
2904  //Get the pointers to the actual data
2905  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2906  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2907 
2908  // Compute a result for the default
2909  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2910  // Compute a result for each tag
2911  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2912  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2913  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2914  tmp_2->addTag(i->first);
2915  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2916  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2917  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2918  }
2919  }
2920  else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2921 
2922  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2923  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2924  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2925  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2926 
2927  int sampleNo_1,dataPointNo_1;
2928  int numSamples_1 = arg_1_Z.getNumSamples();
2929  int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2930  int offset_0 = tmp_0->getPointOffset(0,0);
2931  res.requireWrite();
2932  #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2933  for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2934  for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2935  int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2936  int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2937  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2938  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2939  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2940  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2941  }
2942  }
2943 
2944  }
2945  else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2946 
2947  // Borrow DataTagged input from Data object
2948  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2949 
2950  // Prepare the DataConstant input
2951  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2952 
2953  // Prepare a DataTagged output 2
2954  res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2955  res.tag();
2956  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2957 
2958  // Prepare offset into DataConstant
2959  int offset_1 = tmp_1->getPointOffset(0,0);
2960  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2961  // Get the pointers to the actual data
2962  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2963  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2964  // Compute a result for the default
2965  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2966  // Compute a result for each tag
2967  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2968  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2969  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2970  tmp_2->addTag(i->first);
2971  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2972  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2973  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2974  }
2975 
2976  }
2977  else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2978 
2979  // Borrow DataTagged input from Data object
2980  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2981 
2982  // Borrow DataTagged input from Data object
2983  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2984 
2985  // Prepare a DataTagged output 2
2986  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2987  res.tag(); // DataTagged output
2988  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2989 
2990  // Get the pointers to the actual data
2991  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2992  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2993  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2994 
2995  // Compute a result for the default
2996  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2997  // Merge the tags
2998  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2999  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3000  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
3001  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3002  tmp_2->addTag(i->first); // use tmp_2 to get correct shape
3003  }
3004  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
3005  tmp_2->addTag(i->first);
3006  }
3007  // Compute a result for each tag
3008  const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
3009  for (i=lookup_2.begin();i!=lookup_2.end();i++) {
3010  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3011  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
3012  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3013  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3014  }
3015 
3016  }
3017  else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
3018 
3019  // After finding a common function space above the two inputs have the same numSamples and num DPPS
3020  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3021  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3022  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3023  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3024 
3025  int sampleNo_0,dataPointNo_0;
3026  int numSamples_0 = arg_0_Z.getNumSamples();
3027  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3028  res.requireWrite();
3029  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3030  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3031  int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
3032  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3033  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3034  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3035  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3036  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3037  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3038  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3039  }
3040  }
3041 
3042  }
3043  else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
3044  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3045  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3046  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
3047  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3048 
3049  int sampleNo_0;
3050  int numSamples_0 = arg_0_Z.getNumSamples();
3051  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3052  int offset_1 = tmp_1->getPointOffset(0,0);
3053  const double *ptr_src = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3054  double ptr_1 = ptr_src[0];
3055  int size = size0 * numDataPointsPerSample_0;
3056  res.requireWrite();
3057  #pragma omp parallel for private(sampleNo_0) schedule(static)
3058  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3059 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3060  int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
3061  int offset_2 = tmp_2->getPointOffset(sampleNo_0,0);
3062  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3063 // const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3064  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3065  tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
3066 // }
3067  }
3068 
3069 
3070  }
3071  else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
3072 
3073  // After finding a common function space above the two inputs have the same numSamples and num DPPS
3074  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3075  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3076  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3077  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3078 
3079  int sampleNo_0,dataPointNo_0;
3080  int numSamples_0 = arg_0_Z.getNumSamples();
3081  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3082  res.requireWrite();
3083  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3084  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3085  int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
3086  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3087  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3088  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3089  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3090  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3091  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3092  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3093  }
3094  }
3095 
3096  }
3097  else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
3098 
3099  // After finding a common function space above the two inputs have the same numSamples and num DPPS
3100  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3101  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3102  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3103  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3104 
3105  int sampleNo_0,dataPointNo_0;
3106  int numSamples_0 = arg_0_Z.getNumSamples();
3107  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3108  res.requireWrite();
3109  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3110  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3111  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3112  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3113  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3114  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3115  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3116  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3117  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3118  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3119  }
3120  }
3121 
3122  }
3123  else {
3124  throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
3125  }
3126 
3127  } else {
3128  throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
3129  }
3130 
3131  return res;
3132 }
3133 
3134 template <typename UnaryFunction>
3135 Data
3137  UnaryFunction operation)
3138 {
3139  if (arg_0.isEmpty()) // do this before we attempt to interpolate
3140  {
3141  throw DataException("Error - Operations not permitted on instances of DataEmpty.");
3142  }
3143  if (arg_0.isLazy())
3144  {
3145  throw DataException("Error - Operations not permitted on lazy data.");
3146  }
3147  // Interpolate if necessary and find an appropriate function space
3148  Data arg_0_Z = Data(arg_0);
3149 
3150  // Get rank and shape of inputs
3151  const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
3152  int size0 = arg_0_Z.getDataPointSize();
3153 
3154  // Declare output Data object
3155  Data res;
3156 
3157  if (arg_0_Z.isConstant()) {
3158  res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
3159  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
3160  double *ptr_2 = &(res.getDataAtOffsetRW(0));
3161  tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3162  }
3163  else if (arg_0_Z.isTagged()) {
3164 
3165  // Borrow DataTagged input from Data object
3166  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3167 
3168  // Prepare a DataTagged output 2
3169  res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
3170  res.tag();
3171  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3172 
3173  // Get the pointers to the actual data
3174  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3175  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3176  // Compute a result for the default
3177  tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3178  // Compute a result for each tag
3179  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3180  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3181  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3182  tmp_2->addTag(i->first);
3183  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3184  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3185  tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3186  }
3187 
3188  }
3189  else if (arg_0_Z.isExpanded()) {
3190 
3191  res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
3192  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3193  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3194 
3195  int sampleNo_0,dataPointNo_0;
3196  int numSamples_0 = arg_0_Z.getNumSamples();
3197  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3198  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3199  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3200  dataPointNo_0=0;
3201 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3202  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3203  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3204  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3205  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3206  tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3207 // }
3208  }
3209  }
3210  else {
3211  throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3212  }
3213 
3214  return res;
3215 }
3216 
3217 }
3218 #endif