Escript  Revision_4320
Data.h
Go to the documentation of this file.
1 
2 /*****************************************************************************
3 *
4 * Copyright (c) 2003-2013 by University of Queensland
5 * http://www.uq.edu.au
6 *
7 * Primary Business: Queensland, Australia
8 * Licensed under the Open Software License version 3.0
9 * http://www.opensource.org/licenses/osl-3.0.php
10 *
11 * Development until 2012 by Earth Systems Science Computational Center (ESSCC)
12 * Development since 2012 by School of Earth Sciences
13 *
14 *****************************************************************************/
15 
16 
19 #ifndef DATA_H
20 #define DATA_H
21 #include "system_dep.h"
22 
23 #include "DataTypes.h"
24 #include "DataAbstract.h"
25 #include "DataAlgorithm.h"
26 #include "FunctionSpace.h"
27 #include "BinaryOp.h"
28 #include "UnaryOp.h"
29 #include "DataException.h"
30 
31 
32 
33 extern "C" {
34 #include "DataC.h"
35 //#include <omp.h>
36 }
37 
38 #ifdef _OPENMP
39 #include <omp.h>
40 #endif
41 
42 #include "esysUtils/Esys_MPI.h"
43 #include <string>
44 #include <algorithm>
45 #include <sstream>
46 
47 #include <boost/shared_ptr.hpp>
48 #include <boost/python/object.hpp>
49 #include <boost/python/tuple.hpp>
50 
51 namespace escript {
52 
53 //
54 // Forward declaration for various implementations of Data.
55 class DataConstant;
56 class DataTagged;
57 class DataExpanded;
58 class DataLazy;
59 
73 class Data {
74 
75  public:
76 
77  // These typedefs allow function names to be cast to pointers
78  // to functions of the appropriate type when calling unaryOp etc.
79  typedef double (*UnaryDFunPtr)(double);
80  typedef double (*BinaryDFunPtr)(double,double);
81 
82 
93  Data();
94 
101  Data(const Data& inData);
102 
110  Data(const Data& inData,
111  const FunctionSpace& what);
112 
118  Data(const DataTypes::ValueType& value,
119  const DataTypes::ShapeType& shape,
120  const FunctionSpace& what=FunctionSpace(),
121  bool expanded=false);
122 
135  Data(double value,
136  const DataTypes::ShapeType& dataPointShape=DataTypes::ShapeType(),
137  const FunctionSpace& what=FunctionSpace(),
138  bool expanded=false);
139 
148  Data(const Data& inData,
149  const DataTypes::RegionType& region);
150 
162  Data(const boost::python::object& value,
163  const FunctionSpace& what=FunctionSpace(),
164  bool expanded=false);
165 
177  Data(const WrappedArray& w, const FunctionSpace& what,
178  bool expanded=false);
179 
180 
191  Data(const boost::python::object& value,
192  const Data& other);
193 
199  Data(double value,
200  const boost::python::tuple& shape=boost::python::make_tuple(),
201  const FunctionSpace& what=FunctionSpace(),
202  bool expanded=false);
203 
209  explicit Data(DataAbstract* underlyingdata);
210 
215  explicit Data(DataAbstract_ptr underlyingdata);
216 
222  ~Data();
223 
228  void
229  copy(const Data& other);
230 
235  Data
236  copySelf();
237 
238 
243  Data
244  delay();
245 
250  void
251  delaySelf();
252 
253 
264  void
265  setProtection();
266 
273  bool
274  isProtected() const;
275 
276 
282  const boost::python::object
283  getValueOfDataPointAsTuple(int dataPointNo);
284 
290  void
291  setValueOfDataPointToPyObject(int dataPointNo, const boost::python::object& py_object);
292 
298  void
299  setValueOfDataPointToArray(int dataPointNo, const boost::python::object&);
300 
306  void
307  setValueOfDataPoint(int dataPointNo, const double);
308 
313  const boost::python::object
314  getValueOfGlobalDataPointAsTuple(int procNo, int dataPointNo);
315 
316 
321  void
322  setTupleForGlobalDataPoint(int id, int proc, boost::python::object);
323 
330  int
331  getTagNumber(int dpno);
332 
339  getDataC();
340 
341 
342 
349  getDataC() const;
350 
351 
357  std::string
358  toString() const;
359 
365  void
366  expand();
367 
375  void
376  tag();
377 
383  void
384  resolve();
385 
386 
395  void
396  requireWrite();
397 
404  bool
405  isExpanded() const;
406 
413  bool
414  actsExpanded() const;
415 
416 
422  bool
423  isTagged() const;
424 
430  bool
431  isConstant() const;
432 
437  bool
438  isLazy() const;
439 
444  bool
445  isReady() const;
446 
453  bool
454  isEmpty() const;
455 
461  inline
462  const FunctionSpace&
464  {
465  return m_data->getFunctionSpace();
466  }
467 
473  const FunctionSpace
474  getCopyOfFunctionSpace() const;
475 
481  inline
482 // const AbstractDomain&
484  getDomain() const
485  {
486  return getFunctionSpace().getDomain();
487  }
488 
489 
496  inline
497 // const AbstractDomain&
498  Domain_ptr
500  {
502  }
503 
509  const AbstractDomain
510  getCopyOfDomain() const;
511 
517  inline
518  unsigned int
520  {
521  return m_data->getRank();
522  }
523 
529  inline
530  int
532  {
534  }
540  inline
541  int
543  {
544  return m_data->getNumSamples();
545  }
546 
552  inline
553  int
555  {
556  return m_data->getNumDPPSample();
557  }
558 
559 
565  int
566  getNoValues() const
567  {
568  return m_data->getNoValues();
569  }
570 
571 
577  void
578  dump(const std::string fileName) const;
579 
587  const boost::python::object
588  toListOfTuples(bool scalarastuple=true);
589 
590 
599  inline
602 
603 
612  inline
615 
616 
624  inline
627  {
628  return m_data->getSampleDataByTag(tag);
629  }
630 
639  getDataPointRO(int sampleNo, int dataPointNo);
640 
649  getDataPointRW(int sampleNo, int dataPointNo);
650 
651 
652 
658  inline
660  getDataOffset(int sampleNo,
661  int dataPointNo)
662  {
663  return m_data->getPointOffset(sampleNo,dataPointNo);
664  }
665 
671  inline
672  const DataTypes::ShapeType&
674  {
675  return m_data->getShape();
676  }
677 
683  const boost::python::tuple
684  getShapeTuple() const;
685 
692  int
693  getDataPointSize() const;
694 
701  getLength() const;
702 
708  bool
709  hasNoSamples() const
710  {
711  return getLength()==0;
712  }
713 
723  void
724  setTaggedValueByName(std::string name,
725  const boost::python::object& value);
726 
737  void
738  setTaggedValue(int tagKey,
739  const boost::python::object& value);
740 
752  void
753  setTaggedValueFromCPP(int tagKey,
754  const DataTypes::ShapeType& pointshape,
755  const DataTypes::ValueType& value,
756  int dataOffset=0);
757 
758 
759 
765  void
766  copyWithMask(const Data& other,
767  const Data& mask);
768 
779  void
780  setToZero();
781 
789  Data
790  interpolate(const FunctionSpace& functionspace) const;
791 
793  Data
794  interpolateFromTable3D(const WrappedArray& table, double Amin, double Astep,
795  double undef, Data& B, double Bmin, double Bstep, Data& C,
796  double Cmin, double Cstep, bool check_boundaries);
797 
799  Data
800  interpolateFromTable2D(const WrappedArray& table, double Amin, double Astep,
801  double undef, Data& B, double Bmin, double Bstep,bool check_boundaries);
802 
804  Data
805  interpolateFromTable1D(const WrappedArray& table, double Amin, double Astep,
806  double undef,bool check_boundaries);
807 
808 
810  Data
811  interpolateFromTable3DP(boost::python::object table, double Amin, double Astep,
812  Data& B, double Bmin, double Bstep, Data& C, double Cmin, double Cstep, double undef,bool check_boundaries);
813 
814 
816  Data
817  interpolateFromTable2DP(boost::python::object table, double Amin, double Astep,
818  Data& B, double Bmin, double Bstep, double undef,bool check_boundaries);
819 
821  Data
822  interpolateFromTable1DP(boost::python::object table, double Amin, double Astep,
823  double undef,bool check_boundaries);
824 
826  Data
827  nonuniforminterp(boost::python::object in, boost::python::object out, bool check_boundaries);
828 
830  Data
831  nonuniformslope(boost::python::object in, boost::python::object out, bool check_boundaries);
832 
840  Data
841  gradOn(const FunctionSpace& functionspace) const;
842 
844  Data
845  grad() const;
846 
852  boost::python::object
853  integrateToTuple_const() const;
854 
855 
861  boost::python::object
863 
864 
865 
872  Data
873  oneOver() const;
880  Data
881  wherePositive() const;
882 
889  Data
890  whereNegative() const;
891 
898  Data
899  whereNonNegative() const;
900 
907  Data
908  whereNonPositive() const;
909 
916  Data
917  whereZero(double tol=0.0) const;
918 
925  Data
926  whereNonZero(double tol=0.0) const;
927 
940  double
941  Lsup();
942 
944  double
945  Lsup_const() const;
946 
947 
960  double
961  sup();
962 
964  double
965  sup_const() const;
966 
967 
980  double
981  inf();
982 
984  double
985  inf_const() const;
986 
987 
988 
995  Data
996  abs() const;
997 
1004  Data
1005  maxval() const;
1006 
1013  Data
1014  minval() const;
1015 
1024  const boost::python::tuple
1025  minGlobalDataPoint() const;
1026 
1035  const boost::python::tuple
1036  maxGlobalDataPoint() const;
1037 
1038 
1039 
1047  Data
1048  sign() const;
1049 
1056  Data
1057  symmetric() const;
1058 
1065  Data
1066  nonsymmetric() const;
1067 
1074  Data
1075  trace(int axis_offset) const;
1076 
1083  Data
1084  transpose(int axis_offset) const;
1085 
1093  Data
1094  eigenvalues() const;
1095 
1106  const boost::python::tuple
1107  eigenvalues_and_eigenvectors(const double tol=1.e-12) const;
1108 
1115  Data
1116  swapaxes(const int axis0, const int axis1) const;
1117 
1124  Data
1125  erf() const;
1126 
1133  Data
1134  sin() const;
1135 
1142  Data
1143  cos() const;
1144 
1151  Data
1152  tan() const;
1153 
1160  Data
1161  asin() const;
1162 
1169  Data
1170  acos() const;
1171 
1178  Data
1179  atan() const;
1180 
1187  Data
1188  sinh() const;
1189 
1196  Data
1197  cosh() const;
1198 
1205  Data
1206  tanh() const;
1207 
1214  Data
1215  asinh() const;
1216 
1223  Data
1224  acosh() const;
1225 
1232  Data
1233  atanh() const;
1234 
1241  Data
1242  log10() const;
1243 
1250  Data
1251  log() const;
1252 
1259  Data
1260  exp() const;
1261 
1268  Data
1269  sqrt() const;
1270 
1277  Data
1278  neg() const;
1279 
1287  Data
1288  pos() const;
1289 
1298  Data
1299  powD(const Data& right) const;
1300 
1309  Data
1310  powO(const boost::python::object& right) const;
1311 
1321  Data
1322  rpowO(const boost::python::object& left) const;
1323 
1331  Data& operator+=(const Data& right);
1333  Data& operator+=(const boost::python::object& right);
1334 
1336  Data& operator=(const Data& other);
1337 
1345  Data& operator-=(const Data& right);
1347  Data& operator-=(const boost::python::object& right);
1348 
1356  Data& operator*=(const Data& right);
1358  Data& operator*=(const boost::python::object& right);
1359 
1367  Data& operator/=(const Data& right);
1369  Data& operator/=(const boost::python::object& right);
1370 
1376  Data truedivD(const Data& right);
1377 
1383  Data truedivO(const boost::python::object& right);
1384 
1390  Data rtruedivO(const boost::python::object& left);
1391 
1397  boost::python::object __add__(const boost::python::object& right);
1398 
1399 
1405  boost::python::object __sub__(const boost::python::object& right);
1406 
1412  boost::python::object __rsub__(const boost::python::object& right);
1413 
1419  boost::python::object __mul__(const boost::python::object& right);
1420 
1426  boost::python::object __div__(const boost::python::object& right);
1427 
1433  boost::python::object __rdiv__(const boost::python::object& right);
1434 
1439  Data
1440  matrixInverse() const;
1441 
1447  bool
1448  probeInterpolation(const FunctionSpace& functionspace) const;
1449 
1466  Data
1467  getItem(const boost::python::object& key) const;
1468 
1481  void
1482  setItemD(const boost::python::object& key,
1483  const Data& value);
1484 
1486  void
1487  setItemO(const boost::python::object& key,
1488  const boost::python::object& value);
1489 
1490  // These following public methods should be treated as private.
1491 
1497  template <class UnaryFunction>
1499  inline
1500  void
1501  unaryOp2(UnaryFunction operation);
1502 
1511  Data
1512  getSlice(const DataTypes::RegionType& region) const;
1513 
1523  void
1524  setSlice(const Data& value,
1525  const DataTypes::RegionType& region);
1526 
1532  void
1533  print(void);
1534 
1542  int
1543  get_MPIRank(void) const;
1544 
1552  int
1553  get_MPISize(void) const;
1554 
1561  MPI_Comm
1562  get_MPIComm(void) const;
1563 
1570  DataAbstract*
1571  borrowData(void) const;
1572 
1575  borrowDataPtr(void) const;
1576 
1579  borrowReadyPtr(void) const;
1580 
1581 
1582 
1593 
1594 
1598 
1599 
1600  protected:
1601 
1602  private:
1603 
1604 template <class BinaryOp>
1605  double
1606 #ifdef ESYS_MPI
1607  lazyAlgWorker(double init, MPI_Op mpiop_type);
1608 #else
1609  lazyAlgWorker(double init);
1610 #endif
1611 
1612  double
1613  LsupWorker() const;
1614 
1615  double
1616  supWorker() const;
1617 
1618  double
1619  infWorker() const;
1620 
1621  boost::python::object
1622  integrateWorker() const;
1623 
1624  void
1625  calc_minGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1626 
1627  void
1628  calc_maxGlobalDataPoint(int& ProcNo, int& DataPointNo) const;
1629 
1630  // For internal use in Data.cpp only!
1631  // other uses should call the main entry points and allow laziness
1632  Data
1633  minval_nonlazy() const;
1634 
1635  // For internal use in Data.cpp only!
1636  Data
1637  maxval_nonlazy() const;
1638 
1639 
1646  inline
1647  void
1648  operandCheck(const Data& right) const
1649  {
1650  return m_data->operandCheck(*(right.m_data.get()));
1651  }
1652 
1658  template <class BinaryFunction>
1659  inline
1660  double
1661  algorithm(BinaryFunction operation,
1662  double initial_value) const;
1663 
1671  template <class BinaryFunction>
1672  inline
1673  Data
1674  dp_algorithm(BinaryFunction operation,
1675  double initial_value) const;
1676 
1685  template <class BinaryFunction>
1686  inline
1687  void
1688  binaryOp(const Data& right,
1689  BinaryFunction operation);
1690 
1696  void
1697  typeMatchLeft(Data& right) const;
1698 
1704  void
1705  typeMatchRight(const Data& right);
1706 
1712  void
1713  initialise(const DataTypes::ValueType& value,
1714  const DataTypes::ShapeType& shape,
1715  const FunctionSpace& what,
1716  bool expanded);
1717 
1718  void
1719  initialise(const WrappedArray& value,
1720  const FunctionSpace& what,
1721  bool expanded);
1722 
1723  void
1724  initialise(const double value,
1725  const DataTypes::ShapeType& shape,
1726  const FunctionSpace& what,
1727  bool expanded);
1728 
1729  //
1730  // flag to protect the data object against any update
1732  mutable bool m_shared;
1733  bool m_lazy;
1734 
1735  //
1736  // pointer to the actual data object
1737 // boost::shared_ptr<DataAbstract> m_data;
1739 
1740 // If possible please use getReadyPtr instead.
1741 // But see warning below.
1742  const DataReady*
1743  getReady() const;
1744 
1745  DataReady*
1746  getReady();
1747 
1748 
1749 // Be wary of using this for local operations since it (temporarily) increases reference count.
1750 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1751 // getReady() instead
1753  getReadyPtr();
1754 
1756  getReadyPtr() const;
1757 
1758 
1764  void updateShareStatus(bool nowshared) const
1765  {
1766  m_shared=nowshared; // m_shared is mutable
1767  }
1768 
1769  // In the isShared() method below:
1770  // A problem would occur if m_data (the address pointed to) were being modified
1771  // while the call m_data->is_shared is being executed.
1772  //
1773  // Q: So why do I think this code can be thread safe/correct?
1774  // A: We need to make some assumptions.
1775  // 1. We assume it is acceptable to return true under some conditions when we aren't shared.
1776  // 2. We assume that no constructions or assignments which will share previously unshared
1777  // will occur while this call is executing. This is consistent with the way Data:: and C are written.
1778  //
1779  // This means that the only transition we need to consider, is when a previously shared object is
1780  // not shared anymore. ie. the other objects have been destroyed or a deep copy has been made.
1781  // In those cases the m_shared flag changes to false after m_data has completed changing.
1782  // For any threads executing before the flag switches they will assume the object is still shared.
1783  bool isShared() const
1784  {
1785  return m_shared;
1786 /* if (m_shared) return true;
1787  if (m_data->isShared())
1788  {
1789  updateShareStatus(true);
1790  return true;
1791  }
1792  return false;*/
1793  }
1794 
1796  {
1797  if (isLazy())
1798  {
1799  #ifdef _OPENMP
1800  if (omp_in_parallel())
1801  { // Yes this is throwing an exception out of an omp thread which is forbidden.
1802  throw DataException("Please do not call forceResolve() in a parallel region.");
1803  }
1804  #endif
1805  resolve();
1806  }
1807  }
1808 
1814  {
1815 #ifdef _OPENMP
1816  if (omp_in_parallel())
1817  {
1818 // *((int*)0)=17;
1819  throw DataException("Programming error. Please do not run exclusiveWrite() in multi-threaded sections.");
1820  }
1821 #endif
1822  forceResolve();
1823  if (isShared())
1824  {
1825  DataAbstract* t=m_data->deepCopy();
1827  }
1828  }
1829 
1834  {
1835  if (isLazy() || isShared())
1836  {
1837  throw DataException("Programming error. ExclusiveWrite required - please call requireWrite()");
1838  }
1839  }
1840 
1847  void set_m_data(DataAbstract_ptr p);
1848 
1849  friend class DataAbstract; // To allow calls to updateShareStatus
1850  friend class TestDomain; // so its getX will work quickly
1851 #ifdef IKNOWWHATIMDOING
1852  friend ESCRIPT_DLL_API Data applyBinaryCFunction(boost::python::object cfunc, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1853 #endif
1854  friend ESCRIPT_DLL_API Data condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1855  friend ESCRIPT_DLL_API Data randomData(const boost::python::tuple& shape, const FunctionSpace& what, long seed);
1856 
1857 };
1858 
1859 
1860 #ifdef IKNOWWHATIMDOING
1862 Data
1863 applyBinaryCFunction(boost::python::object func, boost::python::tuple shape, escript::Data& d, escript::Data& e);
1864 #endif
1865 
1866 
1868 Data
1869 condEval(escript::Data& mask, escript::Data& trueval, escript::Data& falseval);
1870 
1871 
1872 
1877 Data randomData(const boost::python::tuple& shape,
1878  const FunctionSpace& what,
1879  long seed);
1880 
1881 
1882 } // end namespace escript
1883 
1884 
1885 // No, this is not supposed to be at the top of the file
1886 // DataAbstact needs to be declared first, then DataReady needs to be fully declared
1887 // so that I can dynamic cast between them below.
1888 #include "DataReady.h"
1889 #include "DataLazy.h"
1890 
1891 namespace escript
1892 {
1893 
1894 inline
1895 const DataReady*
1897 {
1898  const DataReady* dr=dynamic_cast<const DataReady*>(m_data.get());
1899  EsysAssert((dr!=0), "Error - casting to DataReady.");
1900  return dr;
1901 }
1902 
1903 inline
1904 DataReady*
1906 {
1907  DataReady* dr=dynamic_cast<DataReady*>(m_data.get());
1908  EsysAssert((dr!=0), "Error - casting to DataReady.");
1909  return dr;
1910 }
1911 
1912 // Be wary of using this for local operations since it (temporarily) increases reference count.
1913 // If you are just using this to call a method on DataReady instead of DataAbstract consider using
1914 // getReady() instead
1915 inline
1918 {
1919  DataReady_ptr dr=boost::dynamic_pointer_cast<DataReady>(m_data);
1920  EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1921  return dr;
1922 }
1923 
1924 
1925 inline
1928 {
1929  const_DataReady_ptr dr=boost::dynamic_pointer_cast<const DataReady>(m_data);
1930  EsysAssert((dr.get()!=0), "Error - casting to DataReady.");
1931  return dr;
1932 }
1933 
1934 inline
1937 {
1938  if (isLazy())
1939  {
1940  throw DataException("Error, attempt to acquire RW access to lazy data. Please call requireWrite() first.");
1941  }
1942  return getReady()->getSampleDataRW(sampleNo);
1943 }
1944 
1945 inline
1948 {
1949  DataLazy* l=dynamic_cast<DataLazy*>(m_data.get());
1950  if (l!=0)
1951  {
1952  size_t offset=0;
1953  const DataTypes::ValueType* res=l->resolveSample(sampleNo,offset);
1954  return &((*res)[offset]);
1955  }
1956  return getReady()->getSampleDataRO(sampleNo);
1957 }
1958 
1959 
1960 
1964 char *Escript_MPI_appendRankToFileName(const char *, int, int);
1965 
1969 inline double rpow(double x,double y)
1970 {
1971  return pow(y,x);
1972 }
1973 
1979 ESCRIPT_DLL_API Data operator+(const Data& left, const Data& right);
1980 
1986 ESCRIPT_DLL_API Data operator-(const Data& left, const Data& right);
1987 
1993 ESCRIPT_DLL_API Data operator*(const Data& left, const Data& right);
1994 
2000 ESCRIPT_DLL_API Data operator/(const Data& left, const Data& right);
2001 
2008 ESCRIPT_DLL_API Data operator+(const Data& left, const boost::python::object& right);
2009 
2016 ESCRIPT_DLL_API Data operator-(const Data& left, const boost::python::object& right);
2017 
2024 ESCRIPT_DLL_API Data operator*(const Data& left, const boost::python::object& right);
2025 
2032 ESCRIPT_DLL_API Data operator/(const Data& left, const boost::python::object& right);
2033 
2040 ESCRIPT_DLL_API Data operator+(const boost::python::object& left, const Data& right);
2041 
2048 ESCRIPT_DLL_API Data operator-(const boost::python::object& left, const Data& right);
2049 
2056 ESCRIPT_DLL_API Data operator*(const boost::python::object& left, const Data& right);
2057 
2064 ESCRIPT_DLL_API Data operator/(const boost::python::object& left, const Data& right);
2065 
2066 
2067 
2072 ESCRIPT_DLL_API std::ostream& operator<<(std::ostream& o, const Data& data);
2073 
2083 Data
2084 C_GeneralTensorProduct(Data& arg_0,
2085  Data& arg_1,
2086  int axis_offset=0,
2087  int transpose=0);
2088 
2094 inline
2095 Data
2096 Data::truedivD(const Data& right)
2097 {
2098  return *this / right;
2099 }
2100 
2106 inline
2107 Data
2108 Data::truedivO(const boost::python::object& right)
2109 {
2110  Data tmp(right, getFunctionSpace(), false);
2111  return truedivD(tmp);
2112 }
2113 
2119 inline
2120 Data
2121 Data::rtruedivO(const boost::python::object& left)
2122 {
2123  Data tmp(left, getFunctionSpace(), false);
2124  return tmp.truedivD(*this);
2125 }
2126 
2132 template <class BinaryFunction>
2133 inline
2134 void
2135 Data::binaryOp(const Data& right,
2136  BinaryFunction operation)
2137 {
2138  //
2139  // if this has a rank of zero promote it to the rank of the RHS
2140  if (getDataPointRank()==0 && right.getDataPointRank()!=0) {
2141  throw DataException("Error - attempt to update rank zero object with object with rank bigger than zero.");
2142  }
2143 
2144  if (isLazy() || right.isLazy())
2145  {
2146  throw DataException("Programmer error - attempt to call binaryOp with Lazy Data.");
2147  }
2148  //
2149  // initially make the temporary a shallow copy
2150  Data tempRight(right);
2152  FunctionSpace fsr=right.getFunctionSpace();
2153  if (fsl!=fsr) {
2154  signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2155  if (intres==0)
2156  {
2157  std::string msg="Error - attempt to combine incompatible FunctionSpaces.";
2158  msg+=fsl.toString();
2159  msg+=" ";
2160  msg+=fsr.toString();
2161  throw DataException(msg.c_str());
2162  }
2163  else if (intres==1)
2164  {
2165  // an interpolation is required so create a new Data
2166  tempRight=Data(right,fsl);
2167  }
2168  else // reverse interpolation preferred
2169  {
2170  // interpolate onto the RHS function space
2171  Data tempLeft(*this,fsr);
2172  set_m_data(tempLeft.m_data);
2173  }
2174  }
2175  operandCheck(tempRight);
2176  //
2177  // ensure this has the right type for the RHS
2178  typeMatchRight(tempRight);
2179  //
2180  // Need to cast to the concrete types so that the correct binaryOp
2181  // is called.
2182  if (isExpanded()) {
2183  //
2184  // Expanded data will be done in parallel, the right hand side can be
2185  // of any data type
2186  DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2187  EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2188  escript::binaryOp(*leftC,*(tempRight.getReady()),operation);
2189  } else if (isTagged()) {
2190  //
2191  // Tagged data is operated on serially, the right hand side can be
2192  // either DataConstant or DataTagged
2193  DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2194  EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2195  if (right.isTagged()) {
2196  DataTagged* rightC=dynamic_cast<DataTagged*>(tempRight.m_data.get());
2197  EsysAssert((rightC!=0), "Programming error - casting to DataTagged.");
2198  escript::binaryOp(*leftC,*rightC,operation);
2199  } else {
2200  DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2201  EsysAssert((rightC!=0), "Programming error - casting to DataConstant.");
2202  escript::binaryOp(*leftC,*rightC,operation);
2203  }
2204  } else if (isConstant()) {
2205  DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2206  DataConstant* rightC=dynamic_cast<DataConstant*>(tempRight.m_data.get());
2207  EsysAssert((leftC!=0 && rightC!=0), "Programming error - casting to DataConstant.");
2208  escript::binaryOp(*leftC,*rightC,operation);
2209  }
2210 }
2211 
2219 template <class BinaryFunction>
2220 inline
2221 double
2222 Data::algorithm(BinaryFunction operation, double initial_value) const
2223 {
2224  if (isExpanded()) {
2225  DataExpanded* leftC=dynamic_cast<DataExpanded*>(m_data.get());
2226  EsysAssert((leftC!=0), "Programming error - casting to DataExpanded.");
2227  return escript::algorithm(*leftC,operation,initial_value);
2228  } else if (isTagged()) {
2229  DataTagged* leftC=dynamic_cast<DataTagged*>(m_data.get());
2230  EsysAssert((leftC!=0), "Programming error - casting to DataTagged.");
2231  return escript::algorithm(*leftC,operation,initial_value);
2232  } else if (isConstant()) {
2233  DataConstant* leftC=dynamic_cast<DataConstant*>(m_data.get());
2234  EsysAssert((leftC!=0), "Programming error - casting to DataConstant.");
2235  return escript::algorithm(*leftC,operation,initial_value);
2236  } else if (isEmpty()) {
2237  throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2238  } else if (isLazy()) {
2239  throw DataException("Error - Operations not permitted on instances of DataLazy.");
2240  } else {
2241  throw DataException("Error - Data encapsulates an unknown type.");
2242  }
2243 }
2244 
2253 template <class BinaryFunction>
2254 inline
2255 Data
2256 Data::dp_algorithm(BinaryFunction operation, double initial_value) const
2257 {
2258  if (isEmpty()) {
2259  throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2260  }
2261  else if (isExpanded()) {
2263  DataExpanded* dataE=dynamic_cast<DataExpanded*>(m_data.get());
2264  DataExpanded* resultE=dynamic_cast<DataExpanded*>(result.m_data.get());
2265  EsysAssert((dataE!=0), "Programming error - casting data to DataExpanded.");
2266  EsysAssert((resultE!=0), "Programming error - casting result to DataExpanded.");
2267  escript::dp_algorithm(*dataE,*resultE,operation,initial_value);
2268  return result;
2269  }
2270  else if (isTagged()) {
2271  DataTagged* dataT=dynamic_cast<DataTagged*>(m_data.get());
2272  EsysAssert((dataT!=0), "Programming error - casting data to DataTagged.");
2273  DataTypes::ValueType defval(1);
2274  defval[0]=0;
2275  DataTagged* resultT=new DataTagged(getFunctionSpace(), DataTypes::scalarShape, defval, dataT);
2276  escript::dp_algorithm(*dataT,*resultT,operation,initial_value);
2277  return Data(resultT); // note: the Data object now owns the resultT pointer
2278  }
2279  else if (isConstant()) {
2281  DataConstant* dataC=dynamic_cast<DataConstant*>(m_data.get());
2282  DataConstant* resultC=dynamic_cast<DataConstant*>(result.m_data.get());
2283  EsysAssert((dataC!=0), "Programming error - casting data to DataConstant.");
2284  EsysAssert((resultC!=0), "Programming error - casting result to DataConstant.");
2285  escript::dp_algorithm(*dataC,*resultC,operation,initial_value);
2286  return result;
2287  } else if (isLazy()) {
2288  throw DataException("Error - Operations not permitted on instances of DataLazy.");
2289  } else {
2290  throw DataException("Error - Data encapsulates an unknown type.");
2291  }
2292 }
2293 
2301 template <typename BinaryFunction>
2302 inline
2303 Data
2305  Data const &arg_1,
2306  BinaryFunction operation)
2307 {
2308  if (arg_0.isEmpty() || arg_1.isEmpty())
2309  {
2310  throw DataException("Error - Operations not permitted on instances of DataEmpty.");
2311  }
2312  if (arg_0.isLazy() || arg_1.isLazy())
2313  {
2314  throw DataException("Error - Operations not permitted on lazy data.");
2315  }
2316  // Interpolate if necessary and find an appropriate function space
2317  Data arg_0_Z, arg_1_Z;
2318  FunctionSpace fsl=arg_0.getFunctionSpace();
2319  FunctionSpace fsr=arg_1.getFunctionSpace();
2320  if (fsl!=fsr) {
2321  signed char intres=fsl.getDomain()->preferredInterpolationOnDomain(fsr.getTypeCode(), fsl.getTypeCode());
2322  if (intres==0)
2323  {
2324  std::string msg="Error - C_TensorBinaryOperation: arguments have incompatible function spaces.";
2325  msg+=fsl.toString();
2326  msg+=" ";
2327  msg+=fsr.toString();
2328  throw DataException(msg.c_str());
2329  }
2330  else if (intres==1)
2331  {
2332  arg_1_Z=arg_1.interpolate(arg_0.getFunctionSpace());
2333  arg_0_Z =Data(arg_0);
2334  }
2335  else // reverse interpolation preferred
2336  {
2337  arg_0_Z = arg_0.interpolate(arg_1.getFunctionSpace());
2338  arg_1_Z = Data(arg_1);
2339  }
2340  } else {
2341  arg_0_Z = Data(arg_0);
2342  arg_1_Z = Data(arg_1);
2343  }
2344  // Get rank and shape of inputs
2345  int rank0 = arg_0_Z.getDataPointRank();
2346  int rank1 = arg_1_Z.getDataPointRank();
2347  DataTypes::ShapeType shape0 = arg_0_Z.getDataPointShape();
2348  DataTypes::ShapeType shape1 = arg_1_Z.getDataPointShape();
2349  int size0 = arg_0_Z.getDataPointSize();
2350  int size1 = arg_1_Z.getDataPointSize();
2351  // Declare output Data object
2352  Data res;
2353 
2354  if (shape0 == shape1) {
2355  if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2356  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2357  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2358  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2359  double *ptr_2 = &(res.getDataAtOffsetRW(0));
2360 
2361  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2362  }
2363  else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2364 
2365  // Prepare the DataConstant input
2366  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2367 
2368  // Borrow DataTagged input from Data object
2369  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2370 
2371  // Prepare a DataTagged output 2
2372  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2373  res.tag();
2374  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2375 
2376  // Prepare offset into DataConstant
2377  int offset_0 = tmp_0->getPointOffset(0,0);
2378  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2379 
2380  // Get the pointers to the actual data
2381  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2382  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2383 
2384  // Compute a result for the default
2385  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2386  // Compute a result for each tag
2387  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2388  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2389  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2390  tmp_2->addTag(i->first);
2391  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2392  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2393 
2394  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2395  }
2396 
2397  }
2398  else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2399  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2400  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2401  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2402  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2403 
2404  int sampleNo_1,dataPointNo_1;
2405  int numSamples_1 = arg_1_Z.getNumSamples();
2406  int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2407  int offset_0 = tmp_0->getPointOffset(0,0);
2408  res.requireWrite();
2409  #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2410  for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2411  for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2412  int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2413  int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2414  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2415  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2416  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2417  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2418  }
2419  }
2420 
2421  }
2422  else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2423  // Borrow DataTagged input from Data object
2424  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2425 
2426  // Prepare the DataConstant input
2427  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2428 
2429  // Prepare a DataTagged output 2
2430  res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2431  res.tag();
2432  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2433 
2434  // Prepare offset into DataConstant
2435  int offset_1 = tmp_1->getPointOffset(0,0);
2436 
2437  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2438  // Get the pointers to the actual data
2439  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2440  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2441  // Compute a result for the default
2442  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2443  // Compute a result for each tag
2444  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2445  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2446  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2447  tmp_2->addTag(i->first);
2448  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2449  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2450  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2451  }
2452 
2453  }
2454  else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2455  // Borrow DataTagged input from Data object
2456  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2457 
2458  // Borrow DataTagged input from Data object
2459  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2460 
2461  // Prepare a DataTagged output 2
2462  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2463  res.tag(); // DataTagged output
2464  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2465 
2466  // Get the pointers to the actual data
2467  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2468  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2469  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2470 
2471  // Compute a result for the default
2472  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2473  // Merge the tags
2474  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2475  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2476  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2477  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2478  tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2479  }
2480  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2481  tmp_2->addTag(i->first);
2482  }
2483  // Compute a result for each tag
2484  const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2485  for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2486 
2487  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2488  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2489  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2490 
2491  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2492  }
2493 
2494  }
2495  else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2496  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2497  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2498  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2499  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2500  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2501 
2502  int sampleNo_0,dataPointNo_0;
2503  int numSamples_0 = arg_0_Z.getNumSamples();
2504  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2505  res.requireWrite();
2506  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2507  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2508  int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2509  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2510  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2511  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2512  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2513  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2514  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2515  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2516  }
2517  }
2518 
2519  }
2520  else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2521  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2522  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2523  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2524  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2525 
2526  int sampleNo_0,dataPointNo_0;
2527  int numSamples_0 = arg_0_Z.getNumSamples();
2528  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2529  int offset_1 = tmp_1->getPointOffset(0,0);
2530  res.requireWrite();
2531  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2532  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2533  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2534  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2535  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2536 
2537  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2538  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2539  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2540 
2541 
2542  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2543  }
2544  }
2545 
2546  }
2547  else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2548  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2549  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2550  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2551  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2552  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2553 
2554  int sampleNo_0,dataPointNo_0;
2555  int numSamples_0 = arg_0_Z.getNumSamples();
2556  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2557  res.requireWrite();
2558  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2559  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2560  int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2561  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2562  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2563  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2564  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2565  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2566  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2567  tensor_binary_operation(size0, ptr_0, ptr_1, ptr_2, operation);
2568  }
2569  }
2570 
2571  }
2572  else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2573  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2574  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2575  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2576  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2577  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2578 
2579  int sampleNo_0,dataPointNo_0;
2580  int numSamples_0 = arg_0_Z.getNumSamples();
2581  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2582  res.requireWrite();
2583  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2584  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2585  dataPointNo_0=0;
2586 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2587  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2588  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2589  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2590  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2591  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2592  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2593  tensor_binary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_1, ptr_2, operation);
2594 // }
2595  }
2596 
2597  }
2598  else {
2599  throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2600  }
2601 
2602  } else if (0 == rank0) {
2603  if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2604  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataConstant output
2605  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2606  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2607  double *ptr_2 = &(res.getDataAtOffsetRW(0));
2608  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2609  }
2610  else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2611 
2612  // Prepare the DataConstant input
2613  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2614 
2615  // Borrow DataTagged input from Data object
2616  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2617 
2618  // Prepare a DataTagged output 2
2619  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace()); // DataTagged output
2620  res.tag();
2621  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2622 
2623  // Prepare offset into DataConstant
2624  int offset_0 = tmp_0->getPointOffset(0,0);
2625  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2626 
2627  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2628  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2629 
2630  // Compute a result for the default
2631  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2632  // Compute a result for each tag
2633  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2634  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2635  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2636  tmp_2->addTag(i->first);
2637  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2638  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2639  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2640  }
2641 
2642  }
2643  else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2644 
2645  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2646  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2647  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2648  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2649 
2650  int sampleNo_1;
2651  int numSamples_1 = arg_1_Z.getNumSamples();
2652  int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2653  int offset_0 = tmp_0->getPointOffset(0,0);
2654  const double *ptr_src = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2655  double ptr_0 = ptr_src[0];
2656  int size = size1*numDataPointsPerSample_1;
2657  res.requireWrite();
2658  #pragma omp parallel for private(sampleNo_1) schedule(static)
2659  for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2660 // for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2661  int offset_1 = tmp_1->getPointOffset(sampleNo_1,0);
2662  int offset_2 = tmp_2->getPointOffset(sampleNo_1,0);
2663 // const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2664  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2665  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2666  tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
2667 
2668 // }
2669  }
2670 
2671  }
2672  else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2673 
2674  // Borrow DataTagged input from Data object
2675  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2676 
2677  // Prepare the DataConstant input
2678  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2679 
2680  // Prepare a DataTagged output 2
2681  res = Data(0.0, shape1, arg_0_Z.getFunctionSpace()); // DataTagged output
2682  res.tag();
2683  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2684 
2685  // Prepare offset into DataConstant
2686  int offset_1 = tmp_1->getPointOffset(0,0);
2687  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2688 
2689  // Get the pointers to the actual data
2690  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2691  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2692 
2693 
2694  // Compute a result for the default
2695  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2696  // Compute a result for each tag
2697  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2698  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2699  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2700  tmp_2->addTag(i->first);
2701  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2702  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2703 
2704  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2705  }
2706 
2707  }
2708  else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2709 
2710  // Borrow DataTagged input from Data object
2711  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2712 
2713  // Borrow DataTagged input from Data object
2714  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2715 
2716  // Prepare a DataTagged output 2
2717  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace());
2718  res.tag(); // DataTagged output
2719  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2720 
2721  // Get the pointers to the actual data
2722  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2723  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2724  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2725 
2726  // Compute a result for the default
2727  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2728  // Merge the tags
2729  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2730  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2731  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2732  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2733  tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2734  }
2735  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2736  tmp_2->addTag(i->first);
2737  }
2738  // Compute a result for each tag
2739  const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2740  for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2741  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2742  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2743  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2744 
2745  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2746  }
2747 
2748  }
2749  else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2750 
2751  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2752  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2753  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2754  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2755  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2756 
2757  int sampleNo_0,dataPointNo_0;
2758  int numSamples_0 = arg_0_Z.getNumSamples();
2759  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2760  res.requireWrite();
2761  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2762  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2763  int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
2764  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2765  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2766  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2767  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2768  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2769  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2770  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2771  }
2772  }
2773 
2774  }
2775  else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
2776  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2777  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2778  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2779  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2780 
2781  int sampleNo_0,dataPointNo_0;
2782  int numSamples_0 = arg_0_Z.getNumSamples();
2783  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2784  int offset_1 = tmp_1->getPointOffset(0,0);
2785  res.requireWrite();
2786  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2787  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2788  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2789  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2790  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2791  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2792  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2793  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2794  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2795  }
2796  }
2797 
2798 
2799  }
2800  else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
2801 
2802  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2803  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2804  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2805  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2806  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2807 
2808  int sampleNo_0,dataPointNo_0;
2809  int numSamples_0 = arg_0_Z.getNumSamples();
2810  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2811  res.requireWrite();
2812  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2813  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2814  int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
2815  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2816  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2817  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2818  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2819  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2820  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2821  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2822  }
2823  }
2824 
2825  }
2826  else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
2827 
2828  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2829  res = Data(0.0, shape1, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2830  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
2831  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2832  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2833 
2834  int sampleNo_0,dataPointNo_0;
2835  int numSamples_0 = arg_0_Z.getNumSamples();
2836  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
2837  res.requireWrite();
2838  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
2839  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
2840  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
2841  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
2842  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
2843  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
2844  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2845  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2846  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2847  tensor_binary_operation(size1, ptr_0[0], ptr_1, ptr_2, operation);
2848  }
2849  }
2850 
2851  }
2852  else {
2853  throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
2854  }
2855 
2856  } else if (0 == rank1) {
2857  if (arg_0_Z.isConstant() && arg_1_Z.isConstant()) {
2858  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataConstant output
2859  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
2860  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(0));
2861  double *ptr_2 = &(res.getDataAtOffsetRW(0));
2862  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2863  }
2864  else if (arg_0_Z.isConstant() && arg_1_Z.isTagged()) {
2865 
2866  // Prepare the DataConstant input
2867  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2868 
2869  // Borrow DataTagged input from Data object
2870  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2871 
2872  // Prepare a DataTagged output 2
2873  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace()); // DataTagged output
2874  res.tag();
2875  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2876 
2877  // Prepare offset into DataConstant
2878  int offset_0 = tmp_0->getPointOffset(0,0);
2879  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2880 
2881  //Get the pointers to the actual data
2882  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2883  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2884 
2885  // Compute a result for the default
2886  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2887  // Compute a result for each tag
2888  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2889  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2890  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2891  tmp_2->addTag(i->first);
2892  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2893  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2894  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2895  }
2896  }
2897  else if (arg_0_Z.isConstant() && arg_1_Z.isExpanded()) {
2898 
2899  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2900  DataConstant* tmp_0=dynamic_cast<DataConstant*>(arg_0_Z.borrowData());
2901  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
2902  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
2903 
2904  int sampleNo_1,dataPointNo_1;
2905  int numSamples_1 = arg_1_Z.getNumSamples();
2906  int numDataPointsPerSample_1 = arg_1_Z.getNumDataPointsPerSample();
2907  int offset_0 = tmp_0->getPointOffset(0,0);
2908  res.requireWrite();
2909  #pragma omp parallel for private(sampleNo_1,dataPointNo_1) schedule(static)
2910  for (sampleNo_1 = 0; sampleNo_1 < numSamples_1; sampleNo_1++) {
2911  for (dataPointNo_1 = 0; dataPointNo_1 < numDataPointsPerSample_1; dataPointNo_1++) {
2912  int offset_1 = tmp_1->getPointOffset(sampleNo_1,dataPointNo_1);
2913  int offset_2 = tmp_2->getPointOffset(sampleNo_1,dataPointNo_1);
2914  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
2915  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2916  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
2917  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2918  }
2919  }
2920 
2921  }
2922  else if (arg_0_Z.isTagged() && arg_1_Z.isConstant()) {
2923 
2924  // Borrow DataTagged input from Data object
2925  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2926 
2927  // Prepare the DataConstant input
2928  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
2929 
2930  // Prepare a DataTagged output 2
2931  res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
2932  res.tag();
2933  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2934 
2935  // Prepare offset into DataConstant
2936  int offset_1 = tmp_1->getPointOffset(0,0);
2937  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
2938  // Get the pointers to the actual data
2939  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2940  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2941  // Compute a result for the default
2942  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2943  // Compute a result for each tag
2944  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2945  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2946  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2947  tmp_2->addTag(i->first);
2948  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2949  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2950  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2951  }
2952 
2953  }
2954  else if (arg_0_Z.isTagged() && arg_1_Z.isTagged()) {
2955 
2956  // Borrow DataTagged input from Data object
2957  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2958 
2959  // Borrow DataTagged input from Data object
2960  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
2961 
2962  // Prepare a DataTagged output 2
2963  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace());
2964  res.tag(); // DataTagged output
2965  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
2966 
2967  // Get the pointers to the actual data
2968  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
2969  const double *ptr_1 = &(tmp_1->getDefaultValueRO(0));
2970  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
2971 
2972  // Compute a result for the default
2973  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2974  // Merge the tags
2975  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
2976  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
2977  const DataTagged::DataMapType& lookup_1=tmp_1->getTagLookup();
2978  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
2979  tmp_2->addTag(i->first); // use tmp_2 to get correct shape
2980  }
2981  for (i=lookup_1.begin();i!=lookup_1.end();i++) {
2982  tmp_2->addTag(i->first);
2983  }
2984  // Compute a result for each tag
2985  const DataTagged::DataMapType& lookup_2=tmp_2->getTagLookup();
2986  for (i=lookup_2.begin();i!=lookup_2.end();i++) {
2987  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
2988  const double *ptr_1 = &(tmp_1->getDataByTagRO(i->first,0));
2989  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
2990  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
2991  }
2992 
2993  }
2994  else if (arg_0_Z.isTagged() && arg_1_Z.isExpanded()) {
2995 
2996  // After finding a common function space above the two inputs have the same numSamples and num DPPS
2997  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
2998  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
2999  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3000  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3001 
3002  int sampleNo_0,dataPointNo_0;
3003  int numSamples_0 = arg_0_Z.getNumSamples();
3004  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3005  res.requireWrite();
3006  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3007  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3008  int offset_0 = tmp_0->getPointOffset(sampleNo_0,0); // They're all the same, so just use #0
3009  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3010  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3011  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3012  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3013  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3014  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3015  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3016  }
3017  }
3018 
3019  }
3020  else if (arg_0_Z.isExpanded() && arg_1_Z.isConstant()) {
3021  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3022  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3023  DataConstant* tmp_1=dynamic_cast<DataConstant*>(arg_1_Z.borrowData());
3024  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3025 
3026  int sampleNo_0;
3027  int numSamples_0 = arg_0_Z.getNumSamples();
3028  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3029  int offset_1 = tmp_1->getPointOffset(0,0);
3030  const double *ptr_src = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3031  double ptr_1 = ptr_src[0];
3032  int size = size0 * numDataPointsPerSample_0;
3033  res.requireWrite();
3034  #pragma omp parallel for private(sampleNo_0) schedule(static)
3035  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3036 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3037  int offset_0 = tmp_0->getPointOffset(sampleNo_0,0);
3038  int offset_2 = tmp_2->getPointOffset(sampleNo_0,0);
3039  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3040 // const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3041  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3042  tensor_binary_operation(size, ptr_0, ptr_1, ptr_2, operation);
3043 // }
3044  }
3045 
3046 
3047  }
3048  else if (arg_0_Z.isExpanded() && arg_1_Z.isTagged()) {
3049 
3050  // After finding a common function space above the two inputs have the same numSamples and num DPPS
3051  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3052  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3053  DataTagged* tmp_1=dynamic_cast<DataTagged*>(arg_1_Z.borrowData());
3054  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3055 
3056  int sampleNo_0,dataPointNo_0;
3057  int numSamples_0 = arg_0_Z.getNumSamples();
3058  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3059  res.requireWrite();
3060  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3061  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3062  int offset_1 = tmp_1->getPointOffset(sampleNo_0,0);
3063  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3064  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3065  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3066  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3067  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3068  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3069  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3070  }
3071  }
3072 
3073  }
3074  else if (arg_0_Z.isExpanded() && arg_1_Z.isExpanded()) {
3075 
3076  // After finding a common function space above the two inputs have the same numSamples and num DPPS
3077  res = Data(0.0, shape0, arg_1_Z.getFunctionSpace(),true); // DataExpanded output
3078  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3079  DataExpanded* tmp_1=dynamic_cast<DataExpanded*>(arg_1_Z.borrowData());
3080  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3081 
3082  int sampleNo_0,dataPointNo_0;
3083  int numSamples_0 = arg_0_Z.getNumSamples();
3084  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3085  res.requireWrite();
3086  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3087  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3088  for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3089  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3090  int offset_1 = tmp_1->getPointOffset(sampleNo_0,dataPointNo_0);
3091  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3092  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3093  const double *ptr_1 = &(arg_1_Z.getDataAtOffsetRO(offset_1));
3094  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3095  tensor_binary_operation(size0, ptr_0, ptr_1[0], ptr_2, operation);
3096  }
3097  }
3098 
3099  }
3100  else {
3101  throw DataException("Error - C_TensorBinaryOperation: unknown combination of inputs");
3102  }
3103 
3104  } else {
3105  throw DataException("Error - C_TensorBinaryOperation: arguments have incompatible shapes");
3106  }
3107 
3108  return res;
3109 }
3110 
3111 template <typename UnaryFunction>
3112 Data
3114  UnaryFunction operation)
3115 {
3116  if (arg_0.isEmpty()) // do this before we attempt to interpolate
3117  {
3118  throw DataException("Error - Operations not permitted on instances of DataEmpty.");
3119  }
3120  if (arg_0.isLazy())
3121  {
3122  throw DataException("Error - Operations not permitted on lazy data.");
3123  }
3124  // Interpolate if necessary and find an appropriate function space
3125  Data arg_0_Z = Data(arg_0);
3126 
3127  // Get rank and shape of inputs
3128  const DataTypes::ShapeType& shape0 = arg_0_Z.getDataPointShape();
3129  int size0 = arg_0_Z.getDataPointSize();
3130 
3131  // Declare output Data object
3132  Data res;
3133 
3134  if (arg_0_Z.isConstant()) {
3135  res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataConstant output
3136  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(0));
3137  double *ptr_2 = &(res.getDataAtOffsetRW(0));
3138  tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3139  }
3140  else if (arg_0_Z.isTagged()) {
3141 
3142  // Borrow DataTagged input from Data object
3143  DataTagged* tmp_0=dynamic_cast<DataTagged*>(arg_0_Z.borrowData());
3144 
3145  // Prepare a DataTagged output 2
3146  res = Data(0.0, shape0, arg_0_Z.getFunctionSpace()); // DataTagged output
3147  res.tag();
3148  DataTagged* tmp_2=dynamic_cast<DataTagged*>(res.borrowData());
3149 
3150  // Get the pointers to the actual data
3151  const double *ptr_0 = &(tmp_0->getDefaultValueRO(0));
3152  double *ptr_2 = &(tmp_2->getDefaultValueRW(0));
3153  // Compute a result for the default
3154  tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3155  // Compute a result for each tag
3156  const DataTagged::DataMapType& lookup_0=tmp_0->getTagLookup();
3157  DataTagged::DataMapType::const_iterator i; // i->first is a tag, i->second is an offset into memory
3158  for (i=lookup_0.begin();i!=lookup_0.end();i++) {
3159  tmp_2->addTag(i->first);
3160  const double *ptr_0 = &(tmp_0->getDataByTagRO(i->first,0));
3161  double *ptr_2 = &(tmp_2->getDataByTagRW(i->first,0));
3162  tensor_unary_operation(size0, ptr_0, ptr_2, operation);
3163  }
3164 
3165  }
3166  else if (arg_0_Z.isExpanded()) {
3167 
3168  res = Data(0.0, shape0, arg_0_Z.getFunctionSpace(),true); // DataExpanded output
3169  DataExpanded* tmp_0=dynamic_cast<DataExpanded*>(arg_0_Z.borrowData());
3170  DataExpanded* tmp_2=dynamic_cast<DataExpanded*>(res.borrowData());
3171 
3172  int sampleNo_0,dataPointNo_0;
3173  int numSamples_0 = arg_0_Z.getNumSamples();
3174  int numDataPointsPerSample_0 = arg_0_Z.getNumDataPointsPerSample();
3175  #pragma omp parallel for private(sampleNo_0,dataPointNo_0) schedule(static)
3176  for (sampleNo_0 = 0; sampleNo_0 < numSamples_0; sampleNo_0++) {
3177  dataPointNo_0=0;
3178 // for (dataPointNo_0 = 0; dataPointNo_0 < numDataPointsPerSample_0; dataPointNo_0++) {
3179  int offset_0 = tmp_0->getPointOffset(sampleNo_0,dataPointNo_0);
3180  int offset_2 = tmp_2->getPointOffset(sampleNo_0,dataPointNo_0);
3181  const double *ptr_0 = &(arg_0_Z.getDataAtOffsetRO(offset_0));
3182  double *ptr_2 = &(res.getDataAtOffsetRW(offset_2));
3183  tensor_unary_operation(size0*numDataPointsPerSample_0, ptr_0, ptr_2, operation);
3184 // }
3185  }
3186  }
3187  else {
3188  throw DataException("Error - C_TensorUnaryOperation: unknown combination of inputs");
3189  }
3190 
3191  return res;
3192 }
3193 
3194 }
3195 #endif