diff -Nru normaliz-3.8.5+ds/CHANGELOG normaliz-3.8.9+ds/CHANGELOG --- normaliz-3.8.5+ds/CHANGELOG 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/CHANGELOG 2020-09-25 14:54:40.000000000 +0000 @@ -1,6 +1,28 @@ # ChangeLog -## [3.8.4] 2010-06-20 +## [3.8.9] 2030-09-25 + +-- extreme rays in floating point available +-- TriangulationGenerators replace Generators + +## [3.8.8] 2020-08-25 + +-- dual versions of face lattice, f-vector and incidence +-- rational lattices + +## [3.8.7] 2020-07-25 + +-- improvements in finding dependencies +-- Addition of IsEmptySemiopen and CoveringFace +-- source file structure changed +-- input in libnormaliz + +## [3.8.6] 2020-06-17 + +-- tests for installation in SageMath + + +## [3.8.5] 2020-06-20 -- several small improvements -- significant improvement in computation of integer hulls diff -Nru normaliz-3.8.5+ds/configure.ac normaliz-3.8.9+ds/configure.ac --- normaliz-3.8.5+ds/configure.ac 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/configure.ac 2020-09-25 14:54:40.000000000 +0000 @@ -8,7 +8,7 @@ dnl m4_define([normaliz_major_version], [3]) m4_define([normaliz_minor_version], [8]) -m4_define([normaliz_patch_version], [5]) +m4_define([normaliz_patch_version], [9]) m4_define([normaliz_version], [normaliz_major_version.normaliz_minor_version.normaliz_patch_version]) @@ -218,7 +218,7 @@ [check],[], [*],[ FLINT_CPPFLAGS="-I$with_flint/include" - FLINT_LDFLAGS="-L$with_flint/lib -Wl,-rpath,$with_flint/lib" + FLINT_LDFLAGS="-L$with_flint/lib" ] ) @@ -262,7 +262,7 @@ [check],[], [*],[ NAUTY_CPPFLAGS="-I$with_nauty/include" - NAUTY_LDFLAGS="-L$with_nauty/lib -Wl,-rpath,$with_nauty/lib" + NAUTY_LDFLAGS="-L$with_nauty/lib" ] ) @@ -304,15 +304,27 @@ [yes],[], [check],[], [*],[ - NAUTY_CPPFLAGS="-I$with_e_antic/include" - NAUTY_LDFLAGS="-L$with_e_antic/lib -Wl,-rpath,$with_e_antic/lib" + E_ANTIC_CPPFLAGS="-I$with_e_antic/include" + E_ANTIC_LDFLAGS="-L$with_e_antic/lib" ] ) AS_IF([test "x$with_e_antic" != xno], - [AC_MSG_CHECKING([whether e-antic headers and library are available]) - E_ANTIC_LIBS="-leanticxx -leantic -larb -lflint" + [ + dnl Checking for libarb is only necessary because e-antic + dnl does not have a pkgconfig file and we need to know what to + dnl link to when we compile statically. + dnl On Debian/Ubuntu, libarb is installed as libflint-arb. + AC_SEARCH_LIBS(arb_init, [arb flint-arb], []) + AS_IF([test x"$ac_cv_search_arb_init" != x"none required"], + [E_ANTIC_LIBS="-leanticxx -leantic $ac_cv_search_arb_init -lflint"], + [E_ANTIC_LIBS="-leanticxx -leantic -larb -lflint"]) + AC_MSG_CHECKING([whether e-antic headers and library are available]) + CPPFLAGS_SAVED="$CPPFLAGS" + LDFLAGS_SAVED="$LDFLAGS" LIBS_SAVED="$LIBS" + CPPFLAGS="$CPPFLAGS $E_ANTIC_CPPFLAGS" + LDFLAGS="$LDFLAGS $E_ANTIC_LDFLAGS" LIBS="$LIBS $E_ANTIC_LIBS" AC_LINK_IFELSE( [AC_LANG_PROGRAM([[#include @@ -325,6 +337,8 @@ AS_IF([test "x$with_e_antic" != xcheck], [AC_MSG_ERROR([e-antic is not available but was requested])])]) AC_MSG_RESULT($have_e_antic) + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" LIBS="$LIBS_SAVED" ]) AC_SUBST(E_ANTIC_LIBS) diff -Nru normaliz-3.8.5+ds/debian/changelog normaliz-3.8.9+ds/debian/changelog --- normaliz-3.8.5+ds/debian/changelog 2020-08-17 08:19:19.000000000 +0000 +++ normaliz-3.8.9+ds/debian/changelog 2020-10-12 10:17:34.000000000 +0000 @@ -1,8 +1,17 @@ -normaliz (3.8.5+ds-1build1) groovy; urgency=medium +normaliz (3.8.9+ds-0.1) unstable; urgency=medium - * No-change rebuild against libflint-2.6.3 + * Non-maintainer upload. + * Fix team mailing list address (Closes: #970759). + * Add patch to fix numerical noise issues (Closes: #969794, #971188). + * Made deps on flint, flint-arb and e-antic unconditional. + * Bump dh compat to 13. + * New upstream version. + * Refresh patches, and drop the numerical noise one (now unneeded). + * Drop all lintian overrides: keeping warnings is harmless, hiding them + might turn harmful. + * Ship the PyNormaliz examples too. - -- Steve Langasek Mon, 17 Aug 2020 08:19:19 +0000 + -- Julien Puydt Mon, 12 Oct 2020 12:17:34 +0200 normaliz (3.8.5+ds-1) unstable; urgency=medium diff -Nru normaliz-3.8.5+ds/debian/control normaliz-3.8.9+ds/debian/control --- normaliz-3.8.5+ds/debian/control 2020-08-17 08:19:19.000000000 +0000 +++ normaliz-3.8.9+ds/debian/control 2020-10-12 10:17:34.000000000 +0000 @@ -1,18 +1,17 @@ Source: normaliz Section: math Priority: optional -Maintainer: Ubuntu Developers -XSBC-Original-Maintainer: Debian Science Maintainers +Maintainer: Debian Science Maintainers Uploaders: Jerome Benoit Rules-Requires-Root: no Build-Depends: - debhelper-compat (= 12), autoconf-archive, + debhelper-compat (= 13), autoconf-archive, help2man, libgmp-dev, libmpfr-dev, - libflint-dev [!m68k !sh4 !x32], - libflint-arb-dev [!m68k !sh4 !x32], - libeantic-dev [!m68k !sh4 !x32] + libflint-dev, + libflint-arb-dev, + libeantic-dev Build-Depends-Indep: texlive-latex-base, texlive-latex-recommended, texlive-latex-extra, texlive-pictures, texlive-fonts-recommended, texlive-fonts-extra @@ -54,9 +53,9 @@ Depends: libnormaliz3 (= ${binary:Version}), libnormaliz-dev-common (= ${source:Version}), - libeantic-dev [!m68k !sh4 !x32], - libflint-arb-dev [!m68k !sh4 !x32], - libflint-dev [!m68k !sh4 !x32], + libeantic-dev, + libflint-arb-dev, + libflint-dev, libgmp-dev, ${misc:Depends} Conflicts: libnormaliz-dev diff -Nru normaliz-3.8.5+ds/debian/libnormaliz3.lintian-overrides normaliz-3.8.9+ds/debian/libnormaliz3.lintian-overrides --- normaliz-3.8.5+ds/debian/libnormaliz3.lintian-overrides 2019-12-28 09:29:22.000000000 +0000 +++ normaliz-3.8.9+ds/debian/libnormaliz3.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -# the upstream source contains C++ code, and has no clearly defined and versionned ABI -libnormaliz3: no-symbols-control-file diff -Nru normaliz-3.8.5+ds/debian/normaliz-bin.lintian-overrides normaliz-3.8.9+ds/debian/normaliz-bin.lintian-overrides --- normaliz-3.8.5+ds/debian/normaliz-bin.lintian-overrides 2020-06-06 11:03:49.000000000 +0000 +++ normaliz-3.8.9+ds/debian/normaliz-bin.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -# CPPFLAGS is properly passed -normaliz-bin: hardening-no-fortify-functions diff -Nru normaliz-3.8.5+ds/debian/normaliz-doc.examples normaliz-3.8.9+ds/debian/normaliz-doc.examples --- normaliz-3.8.5+ds/debian/normaliz-doc.examples 2019-12-28 09:29:22.000000000 +0000 +++ normaliz-3.8.9+ds/debian/normaliz-doc.examples 2020-10-12 10:17:34.000000000 +0000 @@ -1 +1,2 @@ example/* +PyNormaliz/examples/* diff -Nru normaliz-3.8.5+ds/debian/patches/debianization.patch normaliz-3.8.9+ds/debian/patches/debianization.patch --- normaliz-3.8.5+ds/debian/patches/debianization.patch 2020-06-05 16:30:57.000000000 +0000 +++ normaliz-3.8.9+ds/debian/patches/debianization.patch 2020-10-12 10:17:34.000000000 +0000 @@ -7,8 +7,8 @@ Author: Jerome Benoit Last-Update: 2019-12-28 ---- a/configure.ac -+++ b/configure.ac +--- normaliz.orig/configure.ac ++++ normaliz/configure.ac @@ -39,9 +39,6 @@ dnl Check for working C++ compiler; ask for C++14, require C++x0 dnl @@ -19,12 +19,12 @@ AC_LANG(C++) -@@ -311,7 +308,7 @@ - - AS_IF([test "x$with_e_antic" != xno], - [AC_MSG_CHECKING([whether e-antic headers and library are available]) -- E_ANTIC_LIBS="-leanticxx -leantic -larb -lflint" -+ E_ANTIC_LIBS="-leanticxx -leantic -lflint-arb -lflint" - LIBS_SAVED="$LIBS" - LIBS="$LIBS $E_ANTIC_LIBS" - AC_LINK_IFELSE( +@@ -318,7 +315,7 @@ + AC_SEARCH_LIBS(arb_init, [arb flint-arb], []) + AS_IF([test x"$ac_cv_search_arb_init" != x"none required"], + [E_ANTIC_LIBS="-leanticxx -leantic $ac_cv_search_arb_init -lflint"], +- [E_ANTIC_LIBS="-leanticxx -leantic -larb -lflint"]) ++ [E_ANTIC_LIBS="-leanticxx -leantic -lflint-arb -lflint"]) + AC_MSG_CHECKING([whether e-antic headers and library are available]) + CPPFLAGS_SAVED="$CPPFLAGS" + LDFLAGS_SAVED="$LDFLAGS" diff -Nru normaliz-3.8.5+ds/debian/rules normaliz-3.8.9+ds/debian/rules --- normaliz-3.8.5+ds/debian/rules 2019-12-30 06:13:16.000000000 +0000 +++ normaliz-3.8.9+ds/debian/rules 2020-10-12 10:17:34.000000000 +0000 @@ -54,5 +54,9 @@ override_dh_auto_install-indep: $(MAKE) -C _build/source install-data-am DESTDIR=$(CURDIR)/debian/tmp +override_dh_auto_install: + dh_auto_install + find debian/tmp/ -name "*.la" -delete + override_dh_compress-indep: dh_compress -X.pdf -Xexamples diff -Nru normaliz-3.8.5+ds/debian/source/lintian-overrides normaliz-3.8.9+ds/debian/source/lintian-overrides --- normaliz-3.8.5+ds/debian/source/lintian-overrides 2019-12-28 09:29:22.000000000 +0000 +++ normaliz-3.8.9+ds/debian/source/lintian-overrides 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -# upstream source tarball is not (yet) signed: request to the upstream team -# was sent -- Jerome Benoit -debian-watch-does-not-check-gpg-signature diff -Nru normaliz-3.8.5+ds/doc/Normaliz.tex normaliz-3.8.9+ds/doc/Normaliz.tex --- normaliz-3.8.5+ds/doc/Normaliz.tex 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/doc/Normaliz.tex 2020-09-25 14:54:40.000000000 +0000 @@ -123,7 +123,7 @@ \def\ttt{\texttt} -\def\version{3.8.5} +\def\version{3.8.9} \def\NmzDir{normaliz-\version} @@ -359,6 +359,31 @@ \item Refined triangulations added. \end{arab} +3.8.6 is a technical prerelease. + +In 3.8.7: + +\begin{arab} + \item Addition of computation goals IsEmptySemiopen and CoveringFace + \item Source file structure changed + \item Improvement in finding preexisting dependencies +\end{arab} + +In 3.8.8: + +\begin{arab} + \item Dual versions of face lattice, f-vector and incidence + \item Rational lattices +\end{arab} + +In 3.8.9: + +\begin{arab} + \item \verb|ExtremeRaysFloat| introduced + \item \verb|TriangulationGenerators| replace \verb|Generators| + \item improved stability for interactive use +\end{arab} + See the file \verb|CHANGELOG| in the basic package for more information on the history of Normaliz. @@ -1814,6 +1839,8 @@ It is justified to ask why we don't use \verb|strict_inequalities| instead of \verb|excluded_faces|. It does of course give the same Hilbert series. However, Normaliz cannot (yet) apply symmetrization in inhomogeneous computations. Moreover, the algorithmic approach is different, and according to our experience \verb|excluded_faces| is more efficient, independently of symmetrization. +See Section \ref{semi_open} for more information on \verb|exycluded_faces|. + \subsubsection{At least one vote for every preference order}\label{strict_signs_ex} Suppose we are only interested in elections in which every preference order is chosen by at least one voter. This can be modeled as follows (\verb|Condorcet_one.in|): @@ -2136,7 +2163,7 @@ -3.41637 -0.946868 0.435796 ... -1.0521 0.632677 1 ... \end{Verbatim} -Notre that they can only be printed if a polyhedron is defined. This is always the case in inhomogeneous computations, but in the homogeneous case a grading is necessary. +Notre that they can only be printed if a polyhedron is defined. This is always the case in inhomogeneous computations, but in the homogeneous case a grading is necessary. There is also a variant \verb|ExtremRaysFloat|. Similarly we can get the support hyperplanes in floating point format (they are only defined up to a positive scalar multiple) by \begin{itemize} @@ -2556,7 +2583,7 @@ \begin{Verbatim} amb_space \end{Verbatim} -where \ttt{} stands for the dimension $d$ of the ambient vector space $\RR^d$ in which the geometric objects live. The \emph{ambient lattice} $\AA$ is set to $\ZZ^d$. +where \ttt{} stands for the dimension $d$ of the ambient vector space $\RR^d$ in which the geometric objects live. The \emph{ambient lattice} $\AA$ is set to $\ZZ^d$. Alternatively one can define the ambient space implicitly by \begin{Verbatim} @@ -2834,11 +2861,11 @@ \ttt{dehomogenization} \end{center} - \item Special restrictions apply for the input types \verb|lattice_odeal| and \verb|open_facets|; see Sections \ref{relations} and \ref{open_facets}. + \item Special restrictions apply for the input types \verb|lattice_ideal| and \verb|open_facets|; see Sections \ref{relations} and \ref{open_facets}. \item Special rules apply if precomputed data are used. See Section \ref{precomputed_data}. - \item For restrictions that apply to algebraic polyhedra see Section \ref{Algebraic}. + \item For restrictions that apply to algebraic polyhedra see Section \ref{Algebraic}. Similar restrictions apply if the input types \ttt{rational\_lattice} and \ttt{rational\_offset} are used (see Sction \ref{ratlat}). \end{arab} A non-restriction: the same type can appear several times. This is useful if one wants to combine different formats, for example @@ -2916,10 +2943,12 @@ \subsubsection{Lattices} -There are $4$ types: +There are $5$ types. With the exception of ttt{rational\_lattice} and \ttt{saturation} their entries are integers. \begin{itemize} \itemtt[lattice] is a matrix with $d$ columns. Every row represents a vector, and they define the lattice generated by them. Section \ref{latt_ex}, \verb|3x3magiceven_lat.in| + + \itemtt[rational\_lattice] is a matrix with $d$ columns. Its entries can be fractions. Every row represents a vector, and they define the sublattice of $\QQ^d$ generated by them. See \ref{ratlat}, \verb|ratlat_2.in|. \itemtt[saturation] is a matrix with $d$ columns. Every row represents a vector, and they define the lattice $U\cap \ZZ^d$ where $U$ is the subspace generated by them. Section \ref{latt_ex}, \verb|3x3magic_sat.in|. (If the vectors are integral, then $U\cap \ZZ^d$ is the saturation of the lattice generated by them.) @@ -2951,8 +2980,8 @@ $$ \xi_1x_1+\dots+\xi_dx_d> 0 $$ - for the vectors $(x_1,\dots,x_d)\in\RR^d$. It is considered as a homogeneous input type though it defines inhomogeneous inequalities. The faces of the cone excluded by the inequalities are excluded from the Hilbert series computation, but \verb|excluded_faces| behaves like \verb|inequalities| in every other respect . - Section \ref{excluded_ex}, \verb|CondorcetSemi.in|. + for the vectors $(x_1,\dots,x_d)\in\RR^d$. It is considered as a homogeneous input type though it defines inhomogeneous inequalities. The faces of the cone excluded by the inequalities are excluded from the Hilbert series computation, but \verb|excluded_faces| behave like \verb|inequalities| in almost every other respect . + Section \ref{excluded_ex}, \verb|CondorcetSemi.in|. Also see Section \ref{semi_open}. \itemtt[support\_hyperplanes] is a matrix with $d$ columns. See Section \ref{precomputed_data}. \end{itemize} @@ -2996,6 +3025,9 @@ \begin{itemize} \itemtt[offset] is a vector with $d$ integer entries. It defines the origin of the affine lattice. Section \ref{offset_ex}, \verb|InhomCongLat.in|. + + \itemtt[rational\_offset] is a vector with $d$ rational entries. It defines the origin of the rational affine lattice. +Section \ref{ratlat}, \verb|ratlat_2.in|. \end{itemize} \textbf{Note:}\enspace \verb|offset| and \verb|lattice| (or \verb|saturation|) together define an affine lattice. If \verb|offset| is present in the input, then the default choice for \verb|lattice| is the empty matrix. @@ -3030,6 +3062,12 @@ Section \ref{strict_ex}, \verb|2cone_int.in|. \itemtt[strict\_signs] is a vector with $d$ components in $\{-1,0,1\}$. It is the ''strict'' counterpart to \verb|signs|. An entry $1$ in component $i$ represents the inequality $x_i>0$, an entry $-1$ the opposite inequality, whereas $0$ imposes no condition on $x_i$. \ref{strict_signs_ex}, \verb|Condorcet_one.in| + + \itemtt[inhom\_excluded\_faces] is a matrix with $d+1$ columns. Every row $(\xi_1,\dots,\xi_d,-\eta)$ represents an inequality + $$ + \xi_1x_1+\dots+\xi_dx_d> \eta + $$ + for the vectors $(x_1,\dots,x_d)\in\RR^d$. The faces of the polyhedron excluded by the inequalities are excluded from the Hilbert and Ehrhart series series computation, but \verb|inhom_excluded_faces| behave like \verb|inhom_inequalities| in almost every other respect. Ssee Section \ref{semi_open}. \end{itemize} \subsubsection{Affine lattices} @@ -3065,7 +3103,7 @@ $$ is satisfied, where \verb|rel| can take the values $=,\leq,\geq,<,>$ with the represented by input strings \verb|=,<=,>=,<,>|, respectively. - Constraints with the relations $>$ and $<$ are converted to \verb|strict_inequalities|. + Constraints with the relations $>$ or $<$ are replaced by inequalities with relation \verb|>=| or \verb|<=| after adding $1$ or $-1$, respectively, to $ \eta$. (Tabular constraints cannot be used for \verb|excluded_faces| or \verb|inhom_excluded_faces|.) The input string \verb|~| represents a congruence $\equiv$ and requires the additional input of a modulus. It represents the congruence $$ @@ -3265,7 +3303,6 @@ Since version 3.1 Normaliz can also compute nonpointed cones and polyhedra without vertices. \subsection{The zero cone}\label{zero} - The zero cone with an empty Hilbert basis is a legitimate object for Normaliz. Nevertheless a warning message is issued if the zero cone is encountered. @@ -3441,6 +3478,7 @@ \begin{itemize} \itemtt[VerticesFloat] converts the format of the vertices to floating point. It implies \texttt{SupportHyperplanes}. \itemtt[SuppHypsFloat] converts the format of the support hyperplanes to floating point. It implies \texttt{SupportHyperplanes}. + \itemtt[ExtremeRaysFloat] does the same for the extreme rays. \end{itemize} Note that \texttt{VerticesFloat} and \texttt{SuppHypsFloat} are not pure output options. They are computation goals, and therefore break implicit \texttt{DefaultMode}. @@ -3559,7 +3597,22 @@ \begin{itemize} \itemtt[Incidence] \end{itemize} -Section \ref{FaceLattice} as well. +Section \ref{FaceLattice} as well. See it also for the dual versions +\begin{itemize} + \itemtt[DualFVector] + \itemtt[DualFaceLattice] + \itemtt[DualIncidence] +\end{itemize} + + +\subsubsection{Semiopen polyhedra} + +\begin{itemize} + \itemtt[IsEmptySemiopen] +\end{itemize} + +asks for the emptyness of a semiopen polyhedron. See Section \ref{semi_open}. + \subsubsection{Automorphism groups} @@ -3649,7 +3702,7 @@ \begin{itemize} - \itemtt[Generators] controls the generators of the efficient cone. + \itemtt[TriangulationGenerators] controls the generators of the last computed triangulation. \itemtt[OriginalMonoidGenerators] controls the generators of the original monoid. @@ -3684,7 +3737,9 @@ \itemtt[EuclideanVolume]controls the Euclidean volume. \itemtt [GeneratorOfInterior] controls the generator of the interior if the monoid is Gorenstein. - + + \itemtt [CoveringFace] aks for an excluded face making the semiopen polyhedron empty. + \itemtt[Equations] controls the equations. \itemtt[Congruences] controls the congruences. \itemtt[ExternalIndex] controls the external index. @@ -4458,8 +4513,8 @@ $\ell$ is the least common multiple of the degrees of the extreme integral generators of $M$. See \cite{BS} for an elementary account, references and the algorithm used by Normaliz. - -At present, weighted Ehrhart series can only be computed with homogeneous data. Note that \verb|excluded_faces| is a homogeneous input type. For them the monoid $M$ is replaced by the set + +Note that \verb|excluded_faces| is a homogeneous input type. For them the monoid $M$ is replaced by the set $$ M'=C'\cap L $$ @@ -4576,11 +4631,15 @@ Virtual multiplicity (float) = 0.209594726562 \end{Verbatim} +Weeighted Ehrhart series can be computed for polytopes defined by homogeneous or inhomogeneous input. Weighted Ehrhart series as a weighted variant of Hilbert series for unbounded polyhedra are not defined in Normaliz. + \subsubsection{Virtual multiplicity} Instead of the option \verb|-E| (or (\verb|--WeightedEhrhartSeries|) we use \verb|-L| or (\verb|--VirtualMultiplicity|). Then we can extract the virtual multiplicity from the output file. +The scope of computations is the same as for Weighted Ehrhart series. + \subsubsection{An integral} In their paper \emph{Multiplicities of classical varieties} (Proc. Lond. Math. Soc. (3) 110 (2015), 1033--105) J. Jeffries, J. Monta\~no and M. Varbaro ask for the computation of the integral @@ -4623,7 +4682,7 @@ integral (float) = 9.40973210888e-10 \end{Verbatim} -As pointed out above, Normaliz integrates with respect to the measure in which the basic lattice mesh has volume $1$. (this is $r!$ times the lattice normalized measure, $r=\dim P$.) In the full.dimensional case that is just the standard Lebesgue measure. But in lower dimensional cases this often not the case, and therefore Normaliz also computes the integral with respect to this \emph{Euclidean} measure: +As pointed out above, \emph{Normaliz integrates with respect to the measure in which the basic lattice mesh has volume $1$}. (this is $r!$ times the lattice normalized measure, $r=\dim P$.) In the full dimensional case that is just the standard Lebesgue measure. But in lower dimensional cases this often not the case, and therefore Normaliz also computes the integral with respect to this \emph{Euclidean} measure: \begin{Verbatim} integral (euclidean) = 1.88194642178e-09 \end{Verbatim} @@ -5145,6 +5204,8 @@ In addition to the files \verb|.tgn| and \verb|.tri|, also the file \verb|.inv| is written. It contains the data of the file \verb|.out| above the line of stars in a human and machine readable format. +\textbf{Note:}\enspace Normaliz (now) allows the computation of triangulations for all input. In the homogeneous case it computes a triangulation of the (pointed quotient of the) cone $C$ defined by the input. It can then be interpreted as a triangulation of a cross-section polytope if a grading is given. In the inhomogeneous case for which the input defines a polyhedron $P$, $C$ is the cone over $P$. If $P$ is a polytope, then a triangulation of $C$ can again be identified with a triangulation of $P$. However, if $P$ is unbounded, the the triangulation of $C$ only induces a polyhedral decomposition of $P$ into subpolyhedra whose compact faces are simplices. + \subsubsection{Nested triangulations}\label{nested} If Normaliz has subdivided a simplicial cone of a triangulation of the cone $C$, the resulting decomposition of $C$ may no longer be a triangulation in the strict sense. It is rather a \emph{nested triangulation}, namely a map from a rooted tree to the set of full-dimensional subcones of $C$ with the following properties: @@ -5295,6 +5356,8 @@ Normaliz sorts the generators lexicographically by default so that $(2,1,1)$ is inserted into cone building before $(2,2,1)$. If you akk \}ttt{KeepOrder} to the input, the basic triangulation will have only $2$ triangles: the square is subdivided along its diagonal. +\textbf{Note:}\enspace The remark in Section \ref{Triang} about the interpretation of general triangulations applies to the refined trinagulations as well. The refined triangulations are computed for the cone over the polyhedron if the input is inhomogeneous. \ttt{LatticePointTriangulation} is only allowed if the input defines a polytope. + \subsubsection{All generetors triangulations} The otion \begin{itemize} @@ -5501,9 +5564,12 @@ 101 2 011 2 111 3 +primal \end{Verbatim} The first line contains the number of faces, and the second the number of facets. The other lines list the faces $F$, encoded by a a $0$-$1$-vector and an integer. The integer is the codimension of $F$. The $0$-$1$-vector lists the facets containing $F$: the entry $1$ at the $i$-the coordinate indicates that the $i$-th facet contains $F$. +The attribute \verb|primal| indicate sthat we have computed the face lattice on the primal side. Dual face lattices will be introduced below. + The facets are counted as in the main output file \verb|.out|. (If you want them in a separate file, activate the output file \verb|.cst|.) In our case the support hyperplanes are: \begin{Verbatim} -8 2 3 @@ -5524,6 +5590,7 @@ 101 110 011 +primal \end{Verbatim} The first line contains the number of support hyperplanes, the second the number of vertices of the polyhedron ($0$ for homogeneous input), and the third the number of extreme rays of the (recession) cone. The following lines list the incidence vectors of the facets. They are ordered in the same way as the support hyperplanes in the main output file. The incidence vector has entry $1$ for an extreme ray (or) vertex) contained in the facet, and $0$ otherwise. The extreme rays are ordered as in the main output file. @@ -5536,8 +5603,63 @@ 01 1 10 1 11 0 +primal \end{Verbatim} with its $2$ vertices and $1$ extreme ray of the recession cone. + +\subsubsection{Dual cace lattice, f-vector and incidence matrix} + +Normaliz can also compute the face lattice of the dual cone. On the primal side this means that the face lattice is built bottom up and each face is represented by the extreme rays it contains. Since this is not possible for unbounded polyhedra, the dual versions are restricted to homogeneous input or inhomogeneous input defining polytopes. One application of the dual version is the computation of faces of low dimension which may be difficult to reach from the top if there are many facets. The numerical \verb|face_codim_bound| now refers to tze face codimension on the dual side. For example, if one wants to compute the edges of a polytope from the vertices, \verb|face_codim_bound| must be set to $2$ since the edges define codimension $2$ faces of the dual polytope. + +An example (\verb|cube_3_dual_fac.in|): +\begin{Verbatim} +amb_space 3 +constraints 6 symbolic +x[1] >= 0; +x[2] >= 0; +x[3] >= 0; +x[1] <= 1; +x[2] <= 1; +x[3] <= 1; +DualFaceLattice +DualIncidence +face_codim_bound 2 +\end{Verbatim} + +In the oitput file we see +\begin{Verbatim} +dual f-vector (possibly truncated): +12 8 1 +\end{Verbatim} +which is the f-vector of the dual polytope (or cone) starting from codimension $2$ and going up to codimension $0$. + +The dual face lattice up to codimension $2$ is given by is given by +\begin{Verbatim} +21 +8 + +00000000 0 +10000000 1 +... +00000011 2 +dual +\end{Verbatim} +Indeed, we have $21$ faces in that range, and each face is specified by the vertices (or extreme rays) it contains. The attribute \verb|dual| helps to recognize the dual situation. + +The dual incidence matrix lusts the support hyperplanes containing the vertices (or extremne rays): +\begin{Verbatim} +8 +0 +6 + +000111 +... +111000 +dual +\end{Verbatim} +For the cube defined by inhomogeneous input we have $8$ vertices of the polyhedron, $0$ extreme rays of the recession cone and $6$ facets. + +Primal and dual versions of face lattice and incidence, respectively, are printed to the same file. Therefore only one of them is allowed. \subsection{Module generators over the original monoid}\label{MinMod} @@ -5753,6 +5875,182 @@ Note that the lattice points are listed with the homogenizing coordinate $1$. In fact, both \verb|vertices| and \verb|open_facets| make the computation inhomogeneous. If both are missing, then the lattice points are listed without the homogenizing coordinate. If you want a uniform format for the output, you can use the zero vector for \verb|open_facets| or the origin as the vertex. Both options change the result only to the extent that the homogenizing coordinate is added. +\subsection{Semiopen polyhedra}\label{semi_open} + +A \emph{semiopen polyhedron} $P$ is a subset of $\RR^d$ defined by system of inequalities $\lambda_i(x)\ge 0$, $i=1,\dots,u$, and $\lambda_i(x)> 0$, $i=u+1,\dots,v,$, where $\lambda_1,\dots,\lambda_v$ are affine linear forms. Normaliz can check whether $P$ is empty and compute Hilbert/Ehrhart series if $P$ is a semiopen polytope. + +The inequalities $\lambda_i(x)> 0$, $i=u+1,\dots,v,$ must be defined by \verb|excluded_faces| in the homogeneous case and \verb|inhom_excluded_faces| in the inhomogeneous case. (Don't use \verb|strict_inequalities|; they have a different effect.) These input types can be combined with generators and other constraints. + +Let $\overline P$ be the closed polyhedron defined by the inequalities $\lambda_i(x)\ge 0$, $i=1,\dots,u$ and the ``weak'' inequalities $\lambda_i(x)\ge 0$, $i=u+1,\dots,v$ . Then $\overline P$ is the topological closure of $P$, provided $P\neq\emptyset$. The main object for Normaliz is $\overline P$, but the computation is restricted to $P$ for the following goals if \verb|excluded_faces| or \verb|inhom_excluded_faces| are present in the input: +\begin{center} +\texttt{HilbertSeries\quad EhrhartSeries\quad WeightedEhrhartSeries\\ StanleyDecomposition \quad IsEmptySemiOpen} +\end{center} +See Section \ref{excluded_ex} for a typical example of \verb|HilbertSeries|. For all other computation goals \verb|excluded_faces| and \verb|inhom_excluded_faces| are simply ignored. Note that for lattice points in $P$ the inequalities $\lambda_i(x)> 0$, $i=u+1,\dots,v,$, can be replaced by $\lambda_i(x)\ge 1$ (if the $\lambda_i$ have integral coefficients). Therefore lattice points in semiopen polyhedra can be computed as well. But they require a different input. + +Note that Normaliz throws a \verb|BadInputException| if you try to compute one the first four goals above for the empty set. + +Let us have a look at two examples. In the first $P$ is empty, in the second $P$ is nonempty. +\begin{Verbatim} +IsEmpty.in IsNonEmpty.in + +amb_space 1 amb_space 1 +inequalities 1 inequalities 1 +1 1 +inhom_excluded_faces 1 inhom_excluded_faces 1 +-1 0 -1 1 +IsEmptySemiOpen EhrhartSeries + IsEmptySemiOpen +\end{Verbatim} + +The empty semiopen polytope is defined by the inequalities $\lambda_1(x) \ge 0$ and $\lambda_2(x) < 0$. In the second example the second inequality is replaced by $\lambda_2(x) < 1$. + +The first output file: +\begin{Verbatim} +1 vertices of polyhedron +0 extreme rays of recession cone +1 support hyperplanes of polyhedron (homogenized) + +1 excluded faces + +embedding dimension = 2 +affine dimension of the polyhedron = 0 +rank of recession monoid = 0 (polyhedron is polytope) + +dehomogenization: +0 1 + +Semiopen polyhedron is empty +Covering face: +-1 0 +... +\end{Verbatim} +We are informed that the semiopen polyhedron $P$ is empty. Moreover, we see an excluded face that covers $\overline P$ and forces $P$ to be empty. All other data refer to $\overline P=\{0\}$. + +Now the output for the nonempty semiopen polytope: +\begin{Verbatim} +2 vertices of polyhedron +0 extreme rays of recession cone +2 support hyperplanes of polyhedron (homogenized) + +1 excluded faces + +embedding dimension = 2 +affine dimension of the polyhedron = 1 (maximal) +rank of recession monoid = 0 (polyhedron is polytope) + +dehomogenization: +0 1 + +Ehrhart series: +1 +denominator with 2 factors: +1: 2 + +shift = 1 + +degree of Ehrhart Series as rational function = -1 + +The numerator of the Ehrhart series is symmetric. + +Ehrhart polynomial: +0 1 +with common denominator = 1 + +Semiopen polyhedron is nonempty +\end{Verbatim} +Note that the Ehrhart series is computed for the interval $[0,1)$. All other data are computed for $[0,1]$. + +\subsection{Rational lattices}\label{ratlat} + +It is sometimes desirable to work in a sublattice of $\qquad^d$ that is not contained in $\ZZ$. Such lattices can be defined by the input type \verb|rational_lattice|. In the inhomogeeous case the origin can be moved by \verb|rational_offset|. Note that a finitely generated $\ZZ$-submodule of $\QQ^d$ is automatically discrete. An example input file (\verb|ratlat_2.in|): +\begin{Verbatim} +amb_space 2 +vertices 3 +0 0 1 +0 1 1 +1 0 1 +rational_lattice 2 +1/2 -1/3 +1 1/2 +rational_offset +1 0 +EhrhartSeries +HSOP +\end{Verbatim} +Though the origin is shifted by an integral vector, \verb|rational _offset| has to be used. Conversely, if \verb|rational_offset| is in the input, the lattice can only be defined by \verb|rational_lattice|. + +Normaliz must return the results by integer vectors. Therefore it scales the coordinate axes of $\QQ^d$ in such a way that the vectors given in \verb|rational_lattice| and \verb|+#rational_offset| become integral with respect to the scaled coordinate axes. +The output: +\begin{Verbatim} +3 lattice points in polytope (module generators) +0 Hilbert basis elements of recession monoid +3 vertices of polyhedron +0 extreme rays of recession cone +3 support hyperplanes of polyhedron (homogenized) + +embedding dimension = 3 +affine dimension of the polyhedron = 2 (maximal) +rank of recession monoid = 0 (polyhedron is polytope) + +scaling of axes +2 6 + +dehomogenization: +0 0 1 + + +module rank = 3 + +Ehrhart series (HSOP): +1 2 3 4 8 8 10 10 10 9 8 4 4 2 1 +denominator with 3 factors: +1: 1 7: 2 + +degree of Ehrhart Series as rational function = -1 + +...1 + +Ehrhart quasi-polynomial of period 7: +0: 7 5 6 +... +with common denominator = 7 + +*********************************************************************** + +3 lattice points in polytope (module generators): +0 4 1 +1 2 1 +2 0 1 + +0 Hilbert basis elements of recession monoid: + +3 vertices of polyhedron: +0 0 7 +0 42 7 +2 0 1 + +0 extreme rays of recession cone: + +3 support hyperplanes of polyhedron (homogenized): +-3 -1 6 +0 1 0 +1 0 0 + +1 congruences: +3 5 1 7 + +3 basis elements of generated lattice: +1 0 -3 +0 1 2 +0 0 7 +\end{Verbatim} + +The vector following \verb|scaling of axes| contains the inverses of the scaling factors of the basis elements of $\QQ^d$. In he example above the first basis vector is divided by $2$ and the second by $6$. Thus the ambient lattice has changed from $\ZZ$ to $A=\ZZ(1/2,0)+\ZZ(0,1/6)$. We can see from the appearance of ba congruence that the lattice $L=\ZZ(1/2,-1/3) +\ZZ(1,12)$ is strictly contained in $A$. If the rank were smaller than $2$, equations would appear. + +The $3$ lattice oints, in original coordinates, are $(0,2/3)$, $(1/2,1/3) $ and $(1,0)$. The last is our origin. + +Since certain input types do not allow division of coordinates they are excluded by \verb|rational_lattice| and \verb|rational_pffset|. See Section \ref{alg_inp} for a list (with the inevitable changes). + \subsection{Automorphism groups}\label{Automorphisms} \def\Aut{\operatorname{Aut}} @@ -6157,7 +6455,7 @@ \end{arab} -\subsection{Ptrecomputed Hilbert basis of the recession cone}\label{HB_rec_cone} +\subsection{Precomputed Hilbert basis of the recession cone}\label{HB_rec_cone} In applications one may want to compute several polyhedra with the same recession cone. In these cases it is useful to add the Hilbert basis of the recession cone to the input. An example is \verb|small_inhom_hbrc.in|: \begin{Verbatim} @@ -6310,14 +6608,19 @@ \end{tikzpicture} \end{minipage} -The second line specifies the extension $\QQ[\sqrt 5]$ of $\QQ$ over which we want to define the icosahedron. In addition to the minimal polynomial (\verb|min_poly| or \verb|minpoly|)we have to give an interval from which the zero of the polynomial is to be picked. The square brackets are mandatory. There must be a \emph{single} zero in that interval. The name of the root is fixed to be \verb|a|. The number field specification must follow \verb|amb_space|. Otherwise Normaliz believes that you want to work over $\ZZ$. +The second line specifies the extension $\QQ[\sqrt 5]$ of $\QQ$ over which we want to define the icosahedron. In addition to the minimal polynomial (\verb|min_poly| or \verb|minpoly|)we have to give an interval from which the zero of the polynomial is to be picked. The square brackets are mandatory. There must be a \emph{single} zero in that interval. The name of the root can be any single letter except \verb|x| or \verb|e|. The number field specification must follow \verb|amb_space|. Otherwise Normaliz believes that you want to work over $\ZZ$. Note that the entries of the input file that contain \verb|a| must be enclosed in round brackets. You can enter any $\QQ$-linear combination of powers of \verb|a|. We allow \verb|*| between the coefficient and the power of \verb|a|, but it need not appear. The character \verb|^| indicates the exponent. It is mandatory. So \begin{Verbatim} (a^3-2*a^2 + 4a-1/2) (a+a-2a-10 + 10*a^0) \end{Verbatim} -are legal numbers in the input. +are legal numbers in the input. Instead of the delimiters \verb|(...)| one can also use \verb|"| and \verb|'| on both sides so that +\begin{Verbatim} +"a^3-2*a^2 + 4a-1/2" +'a+a-2a-10 + 10*a^0' +\end{Verbatim} +are also legal in matrices. However, in order to stick to standard conventions in mathematical notation, one must use \verb|(...)| in symbolic constraints. The result of the computation by \verb|normaliz -c ../example/icosahedron-v| starts \begin{Verbatim} @@ -6388,7 +6691,8 @@ \begin{tabular}{llll} lattice &strict\_inequalities&strict\_signs&open\_facets\\ cone\_and\_lattice& inhom\_congruences& lattice\_ideal&offset\\ -congruences& hilbert\_basis\_rec\_cone& excluded\_faces +congruences& hilbert\_basis\_rec\_cone &rees\_algebra & rational\_lattice\\ +rational\_offest \end{tabular} } \end{center} @@ -6410,7 +6714,7 @@ BottomDecomposition& SuppHypsFloat & NoBottomDec & TriangulationDetSum\\ GradingIsPositive&DefaultMode& IsPointed& EuclideanAutomorphisms\\ FVector & FaceLattice & Automorphisms & CombinatorialAutomorphisms\\ -Incidence & Deg1Elements& &AllGeneratorsTriangulation +Incidence & Deg1Elements& IsEmptySemiOpen &AllGeneratorsTriangulation \end{tabular} } \end{center} @@ -6877,6 +7181,8 @@ \item To run the test suite, go to \ttt{build} and run \ttt{make check}. For more information on the test suite see \verb|INSTALL|. \end{arab} +\textbf{Warning:}\enspace You must not change the path leading from root to the directory \ttt{local} in the Normaliz directory by moving or renaming a directory along the way. Otherwise the dynamic libraries cannot be found anymore. If you have chosen another \verb|NMZ_PREFIX|, this warning applies analogously. + \subsection{Packages for rational polyhedra} \subsubsection{CoCoALib} @@ -7298,6 +7604,8 @@ \section{Annotated console output}\label{Console} +Somewhat outdated, but not much has changed in the shown computations since 3.2.0. + \subsection{Primal mode} With @@ -7594,12 +7902,14 @@ cone, cone_and_lattice, lattice, +rational_lattice, saturation, // // inhomogeneous generators // vertices, offset, +rational_offset, // // homogeneous constraints // @@ -7607,6 +7917,7 @@ signs, equations, congruences, +excluded_faces, // // inhomogeneous constraints // @@ -7615,6 +7926,7 @@ strict_inequalities, strict_signs, inhom_congruences, +inhom_excluded_faces, // // linearforms // @@ -7624,13 +7936,12 @@ // special open_facets, projection_coordinates, -excluded_faces, -lattice_ideal, +lattice_ideal, // // precomputed data // support_hyperplanes, -extreme_rays, +extreme_rays, maximal_subspace, generated_lattice, hilbert_basis_rec_cone, @@ -7697,6 +8008,8 @@ \end{Verbatim} etc. +Note that \verb|rational_lattice| and \verb|rational_offset| can only be used if the input data are given in class \verb|mpq_class| or \verb|nmz_float|. + For convenience we provide the function \begin{Verbatim} vector > to_matrix(vector v) @@ -7817,7 +8130,7 @@ enum Enum { // matrix valued START_ENUM_RANGE(FIRST_MATRIX), -Generators, +TriangulationGenerators, ExtremeRays, VerticesOfPolyhedron, SupportHyperplanes, @@ -7836,6 +8149,7 @@ START_ENUM_RANGE(FIRST_MATRIX_FLOAT), SuppHypsFloat, VerticesFloat, +ExtremeRaysFloat, END_ENUM_RANGE(LAST_MATRIX_FLOAT), // vector valued @@ -7844,6 +8158,8 @@ Dehomogenization, WitnessNotIntegrallyClosed, GeneratorOfInterior, +CoveringFace, +AxesScaling, END_ENUM_RANGE(LAST_VECTOR), // integer valued @@ -7898,6 +8214,7 @@ IsReesPrimary, IsInhomogeneous, IsGorenstein, +IsEmptySemiOpen, // // checking properties of already computed data // (cannot be used as a computation goal) @@ -7930,9 +8247,14 @@ EhrhartQuasiPolynomial, WeightedEhrhartSeries, WeightedEhrhartQuasiPolynomial, +// FaceLattice, FVector, Incidence, +DualFVector, +DualIncidence, +DualSublattice, +// Sublattice, // ClassGroup, @@ -8115,9 +8437,10 @@ In the inhomogeneous case the first function returns the extreme rays of the recession cone, and the second the vertices of the polyhedron. (Together they form the extreme rays of the homogenized cone.) -Vertices can be returned in floating point format: +Vertices and extreme rays can be returned in floating point format: \begin{Verbatim} const vector< vector >& Cone::getVerticesFloat() +const vector< vector >& Cone::getExtremeRaysFloat() size_t Cone::getNrVerticesFloat() \end{Verbatim} @@ -8312,7 +8635,11 @@ size_t Cone::getTriangulationSize() Integer Cone::getTriangulationDetSum() \end{Verbatim} -See Section \ref{Triang} for the interpretation of these data. The first component of the pair is the vector of indices of the simplicial cones in the triangulation. Note that the indices are here counted from $0$ (whereas they start from $1$ in the \verb|tri| file). The second component is the determinant. +See Section \ref{Triang} for the interpretation of these data. The first component of the pair is the vector of indices of the simplicial cones in the triangulation. The indices refer to the vectors in +\begin{Verbatim} +const vector >& Cone::getTriangulationGenerators() +\end{Verbatim} +Note that the indices are here counted from $0$ (whereas they start from $1$ in the \verb|tri| file). The second component is the determinant. The type of triangulation can be retrieved by \begin{Verbatim} @@ -8332,7 +8659,13 @@ \begin{Verbatim} const vector< pair,Integer> >& Cone::getTriangulation() \end{Verbatim} -Note t5hat \verb|:getTriangulationSize()| and \verb|getTriangulationDetSum() | refer to the last basic triangulation from which the refined triangulation has been computed by stellar subdivision. +The corresponding vectors are returned by +\begin{Verbatim} +const vector >& Cone::getTriangulationGenerators() +\end{Verbatim} +AFTER the triangulation has been computed. + +Note that \verb|:getTriangulationSize()| and \verb|getTriangulationDetSum() | refer to the basic triangulation from which the refined triangulation has been computed by stellar subdivision. Additionally we have \begin{Verbatim} @@ -8361,6 +8694,14 @@ \end{Verbatim} The entries are accessed in the same way as those of \verb|vector >|. +\subsubsection{Scaling of axes} + +If \verb|rational_lattice| or \verb|rational_offswet| are in the input for the cone, then the vector giving scaling of axes can be retieved by +\begin{Verbatim} +vector Cone::getAxesScaling() +\end{Verbatim} +The cone property \verb|AxesScaling| cannot be used as a computation goal, but one can ask for its computation as usual.| + \subsubsection{Coordinate transformation}\label{coord} The coordinate transformation from the ambient lattice to the sublattice generated by the Hilbert basis (whether it has been computed or not) can be returned as follows: @@ -8504,6 +8845,14 @@ const vector& Cone::getIncidence() \end{Verbatim} +These functions have dual versions: + +\begin{Verbatim} +vector Cone::getDualFVector() +const map& Cone::getDualFaceLattice() +const vector& Cone::getDualIncidence() +\end{Verbatim} + \subsubsection{Integer hull} For the computation of the integer hull an auxiliary cone is constructed. A reference to it is returned by @@ -8538,6 +8887,15 @@ \end{Verbatim} The first component of each pair contains the indices of the generators (counted from 0) that lie in the face and the second component is the weight. +The emptyness of semiopen polyhedra can be tested by +\begin{Verbatim} +bool Cone::isEmptySemiOpen() +\end{Verbatim} +AIf the answer is positive, an excluded face making the semiopen polyhedron empty is returned by +\begin{Verbatim} +vector Cone::getCoveringFace() +\end{Verbatim} + \subsubsection{Boolean valued results} All the ``questions'' to the cone that can be asked by the boolean valued functions in this section start a computation if the answer is not yet known. @@ -8743,7 +9101,7 @@ Let us have a look at \verb|source/maxsimplex/maxsimplex.cpp|. First the more or less standard preamble: \begin{Verbatim} -#include +#include #include #include #include diff -Nru normaliz-3.8.5+ds/example/cube_3_dual_fac.in normaliz-3.8.9+ds/example/cube_3_dual_fac.in --- normaliz-3.8.5+ds/example/cube_3_dual_fac.in 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/example/cube_3_dual_fac.in 2020-08-29 07:43:26.000000000 +0000 @@ -0,0 +1,11 @@ +amb_space 3 +constraints 6 symbolic +x[1] >= 0; +x[2] >= 0; +x[3] >= 0; +x[1] <= 1; +x[2] <= 1; +x[3] <= 1; +DualFaceLattice +DualIncidence +face_codim_bound 2 diff -Nru normaliz-3.8.5+ds/example/IsEmpty.in normaliz-3.8.9+ds/example/IsEmpty.in --- normaliz-3.8.5+ds/example/IsEmpty.in 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/example/IsEmpty.in 2020-07-21 15:37:45.000000000 +0000 @@ -0,0 +1,6 @@ +amb_space 1 +inequalities 1 +1 +inhom_excluded_faces 1 +-1 0 +IsEmptySemiOpen diff -Nru normaliz-3.8.5+ds/example/IsNonEmpty.in normaliz-3.8.9+ds/example/IsNonEmpty.in --- normaliz-3.8.5+ds/example/IsNonEmpty.in 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/example/IsNonEmpty.in 2020-07-21 15:37:45.000000000 +0000 @@ -0,0 +1,7 @@ +amb_space 1 +inequalities 1 +1 +inhom_excluded_faces 1 +-1 1 +EhrhartSeries +IsEmptySemiOpen diff -Nru normaliz-3.8.5+ds/example/Makefile.am normaliz-3.8.9+ds/example/Makefile.am --- normaliz-3.8.5+ds/example/Makefile.am 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/example/Makefile.am 2020-08-29 10:31:40.000000000 +0000 @@ -34,6 +34,7 @@ CondorcetSemi.in \ Condorcet_one.in \ cube_3.in \ + cube_3_dual_fac.in \ FortuneCookie.in \ InhomCong.in \ InhomCongLat.in \ @@ -47,11 +48,13 @@ InhomIneq_prec.in \ IntClMonId.in \ IntClMonIdSupp.in \ + IsEmpty.in \ + IsNonEmpty.in \ MonIdeal.in \ NonCMDiv.in \ NumSemi.in \ PluralityVsCutoff.in \ - square2.in \ + square2.in \ SquareMinusVertex.in \ SquareMinusVertexInhom.in \ SquareMinusVertexPolyh.in \ @@ -91,6 +94,7 @@ rationalFL.in \ rationalIH.in \ rational_inhom.in \ + ratlat_2.in \ rees.in \ semigraphoid5.in \ small.in \ diff -Nru normaliz-3.8.5+ds/example/ratlat_2.in normaliz-3.8.9+ds/example/ratlat_2.in --- normaliz-3.8.5+ds/example/ratlat_2.in 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/example/ratlat_2.in 2020-08-29 07:43:26.000000000 +0000 @@ -0,0 +1,12 @@ +amb_space 2 +vertices 3 +0 0 1 +0 1 1 +1 0 1 +rational_lattice 2 +1/2 -1/3 +1 1/2 +rational_offset +1 0 +EhrhartSeries +HSOP diff -Nru normaliz-3.8.5+ds/INSTALL normaliz-3.8.9+ds/INSTALL --- normaliz-3.8.5+ds/INSTALL 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/INSTALL 2020-09-25 14:54:40.000000000 +0000 @@ -9,17 +9,17 @@ For rational polyhedra: CoCoALib, http://cocoa.dima.unige.it/cocoalib/, version 0.99710 -MPFR, http://www.mpfr.org/mpfr, version 4.0.2 -Flint, http://www.flintlib.org/flint, version 2.5.2 -nauty, http://users.cecs.anu.edu.au/~bdm/nauty, version 27rc5 +MPFR, http://www.mpfr.org/mpfr, version 4.1.0 +Flint, http://www.flintlib.org/flint, version 2.6.3 +nauty, http://users.cecs.anu.edu.au/~bdm/nauty, version 27r1 nauty should be configured with --enable-tls. For algebraic polyhedra additionally: MPFR and Flint as above -arb, https://github.com/fredrik-johansson/arb/archive/, version 2.17.0 -e-antic, http://www.labri.fr/perso/vdelecro/e-antic/, version 0.1.5 +arb, https://github.com/fredrik-johansson/arb/archive/, version 2.18.0 +e-antic, http://www.labri.fr/perso/vdelecro/e-antic/, version 0.1.8 ********************* @@ -55,7 +55,7 @@ **** THE TEST SUITE **** ************************ -Normaliz has a large test suite contained in test and its subdirectories. If Normaliz has been built in b uild_shared, one can simply navigate to this directory and type +Normaliz has a large test suite contained in test and its subdirectories. If Normaliz has been built in build, one can simply navigate to this directory and type make check @@ -71,7 +71,17 @@ The general method is to create an output file and to compare it to a reference file. Certain tests create two output files per input file, for example integer hull computations or automorphism groups. -Adding OUTPUTCHECK=yes disables the comparisons to the reference files. This makes it easier to spot unwanted debugging output to the terminal. +Strict and non-strict tests +--------------------------- + +With STRICT=yes, the output files are comared to the reference files without any exception. + +Otherwise some data will be skipped in the comparison, especially the floating point approximations to algebraic numbers. + +Check of terminhal output +------------------------- + +With OUTPUTCHECK=yes, no comparisons are made and this helps to detect any unwanted terminal output. Basic tests ----------- diff -Nru normaliz-3.8.5+ds/Makefile.am normaliz-3.8.9+ds/Makefile.am --- normaliz-3.8.5+ds/Makefile.am 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/Makefile.am 2020-08-29 07:43:26.000000000 +0000 @@ -21,6 +21,7 @@ EXTRA_DIST += install_scripts_opt/common.sh EXTRA_DIST += install_scripts_opt/install_nmz_cocoa.sh +EXTRA_DIST += install_scripts_opt/install_nmz_cocoa_old.sh EXTRA_DIST += install_scripts_opt/install_nmz_flint.sh EXTRA_DIST += install_scripts_opt/install_nmz_mpfr.sh EXTRA_DIST += install_scripts_opt/install_nmz_arb.sh diff -Nru normaliz-3.8.5+ds/source/dynamic/dynamic.cpp normaliz-3.8.9+ds/source/dynamic/dynamic.cpp --- normaliz-3.8.5+ds/source/dynamic/dynamic.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/dynamic/dynamic.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -1,4 +1,4 @@ -#include +#include #include #include #ifdef _OPENMP diff -Nru normaliz-3.8.5+ds/source/input.cpp normaliz-3.8.9+ds/source/input.cpp --- normaliz-3.8.5+ds/source/input.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/input.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,987 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#include -#include // std::isdigit -#include // numeric_limits - -#include "options.h" -#include "libnormaliz/input_type.h" -#include "libnormaliz/map_operations.h" -#include "libnormaliz/cone_property.h" - -// eats up a comment, stream must start with "/*", eats everything until "*/" -void skip_comment(istream& in) { - int i = in.get(); - int j = in.get(); - if (i != '/' || j != '*') { - throw BadInputException("Bad comment start!"); - } - while (in.good()) { - in.ignore(numeric_limits::max(), '*'); // ignore everything until next '*' - i = in.get(); - if (in.good() && i == '/') - return; // successfully skipped comment - } - throw BadInputException("Incomplete comment!"); -} - -template -void save_matrix(map > >& input_map, - InputType input_type, - const vector >& M) { - // check if this type already exists - if (contains(input_map, input_type)) { - /*throw BadInputException("Multiple inputs of type \"" + type_string - + "\" are not allowed!");*/ - input_map[input_type].insert(input_map[input_type].end(), M.begin(), M.end()); - return; - } - input_map[input_type] = M; -} - -template -void save_empty_matrix(map > >& input_map, InputType input_type) { - vector > M; - save_matrix(input_map, input_type, M); -} - -template -vector > transpose_mat(const vector >& mat) { - if (mat.size() == 0 || mat[0].size() == 0) - return vector >(0); - size_t m = mat[0].size(); - size_t n = mat.size(); - vector > transpose(m, vector(n, 0)); - for (size_t i = 0; i < m; ++i) - for (size_t j = 0; j < n; ++j) - transpose[i][j] = mat[j][i]; - return transpose; -} - -template -void append_row(const vector row, map > >& input_map, Type::InputType input_type) { - vector > one_row(1, row); - save_matrix(input_map, input_type, one_row); -} - -template -void process_constraint(const string& rel, - const vector& left, - Number right, - const Number modulus, - map > >& input_map, - bool forced_hom) { - vector row = left; - bool inhomogeneous = false; - if (right != 0 || rel == "<" || rel == ">") - inhomogeneous = true; - string modified_rel = rel; - bool strict_inequality = false; - if (rel == "<") { - strict_inequality = true; - right -= 1; - modified_rel = "<="; - } - if (rel == ">") { - strict_inequality = true; - right += 1; - modified_rel = ">="; - } - if (strict_inequality && forced_hom) { - throw BadInputException("Strict inequality not allowed in hom_constraints!"); - } - if (inhomogeneous || forced_hom) - row.push_back(-right); // rhs --> lhs - if (modified_rel == "<=") { // convert <= to >= - for (size_t j = 0; j < row.size(); ++j) - row[j] = -row[j]; - modified_rel = ">="; - } - if (rel == "~") - row.push_back(modulus); - - if (inhomogeneous && !forced_hom) { - if (modified_rel == "=") { - append_row(row, input_map, Type::inhom_equations); - return; - } - if (modified_rel == ">=") { - append_row(row, input_map, Type::inhom_inequalities); - return; - } - if (modified_rel == "~") { - append_row(row, input_map, Type::inhom_congruences); - return; - } - } - else { - if (modified_rel == "=") { - append_row(row, input_map, Type::equations); - return; - } - if (modified_rel == ">=") { - append_row(row, input_map, Type::inequalities); - return; - } - if (modified_rel == "~") { - append_row(row, input_map, Type::congruences); - return; - } - } - throw BadInputException("Illegal constrint type " + rel + " !"); -} - -template -bool read_modulus(istream& in, Number& modulus) { - in >> std::ws; // gobble any leading white space - char dummy; - in >> dummy; - if (dummy != '(') - return false; - in >> modulus; - if (in.fail() || modulus == 0) - return false; - in >> std::ws; // gobble any white space before closing - in >> dummy; - if (dummy != ')') - return false; - return true; -} - -template -void read_symbolic_constraint(istream& in, string& rel, vector& left, Number& right, Number& modulus, bool forced_hom) { - string constraint; - - while (in.good()) { - char c; - c = in.get(); - if (in.fail()) - throw BadInputException("Symbolic constraint does not end with semicolon"); - if (c == ';') - break; - constraint += c; - } - - // remove white space - // we must take care that the removal of white space does not - // shadow syntax errors - string without_spaces; - bool digit_then_spaces = false; - bool has_content = false; - for (size_t j = 0; j < constraint.size(); ++j) { - char test = constraint[j]; - if (!isspace(test)) - has_content = true; - if (isspace(test)) - continue; - if (test == '.') { - if (j == constraint.size() - 1 || isspace(constraint[j + 1])) - throw BadInputException("Incomplete number"); - } - if (test == 'e') { - if (j == constraint.size() - 1 || isspace(constraint[j + 1])) - throw BadInputException("Incomplete number"); - if (j <= constraint.size() - 3 && (constraint[j + 1] == '+' || constraint[j + 1] == '-') && - isspace(constraint[j + 2])) - throw BadInputException("Incomplete number"); - } - if (!isdigit(test)) - digit_then_spaces = false; - else { - if (digit_then_spaces) - throw BadInputException("Incomplete number"); - // cout << "jjjj " << j << " |" << constraint[j+1] << "|" << endl; - if (j < constraint.size() - 1 && isspace(constraint[j + 1])) { - digit_then_spaces = true; - // cout << "Drin" << endl; - } - } - without_spaces += test; - } - if (!has_content) - throw BadInputException("Empty symbolic constraint"); - - // split into terms - // we separate by + and - - // except: first on lhs or rhs, between ( and ) and following e. - bool first_sign = true; - bool in_brackets = false; - bool relation_read = false; - size_t RHS_start = 0; - vector terms; - string current_term; - for (size_t j = 0; j < without_spaces.size(); ++j) { - char test = without_spaces[j]; - if (test == '(') - in_brackets = true; - if (test == ')') { - if (!in_brackets) - throw BadInputException("Closing bracket without opening bracket"); - in_brackets = false; - } - if (test == '+' || test == '-') { - if (!first_sign && !in_brackets) { - terms.push_back(current_term); - current_term.clear(); - } - } - first_sign = false; - - if (test == 'e') { - current_term += test; - if (j == without_spaces.size() - 1) - throw BadInputException("Incomplete number"); - if (without_spaces[j + 1] == '+' || without_spaces[j + 1] == '-') { - current_term += without_spaces[j + 1]; - j++; - } - continue; - } - - if (test == '=' || test == '<' || test == '>' || test == '~') { - terms.push_back(current_term); - current_term.clear(); - rel += test; - RHS_start = terms.size(); - if (relation_read) - throw BadInputException("Double relation in constraint"); - relation_read = true; - if (j == without_spaces.size() - 1) - throw BadInputException("Relation last character in constraint"); - if (without_spaces[j + 1] == '=') { - rel += without_spaces[j + 1]; - j++; - } - first_sign = true; - continue; - } - - current_term += test; - } - terms.push_back(current_term); - if (!relation_read) - throw BadInputException("No relation in constraint"); - - // for(size_t i=0;i (long)left.size()) - throw BadInputException("Index " + expo_string + " in symbolic constraint out of bounds"); - index--; - left[index] += side * sign * coeff; - } - else { // absolute term - right -= side * sign * coeff; - } - } - - // cout << "constraint " << left << rel << " " << right << endl; -} - -template -void read_constraints(istream& in, long dim, map > >& input_map, bool forced_hom) { - long nr_constraints; - in >> nr_constraints; - - if (in.fail() || nr_constraints < 0) { - throw BadInputException("Cannot read " + toString(nr_constraints) + " constraints!"); - } - - if (nr_constraints == 0) - return; - - bool symbolic = false; - - in >> std::ws; - int c = in.peek(); - if (c == 's') { - string dummy; - in >> dummy; - if (dummy != "symbolic") - throw BadInputException("Illegal keyword " + dummy + " in input!"); - symbolic = true; - } - - long hom_correction = 0; - if (forced_hom) - hom_correction = 1; - - for (long i = 0; i < nr_constraints; ++i) { - vector left(dim - hom_correction); - string rel, modulus_str; - Number right, modulus = 0; - - if (symbolic) { - read_symbolic_constraint(in, rel, left, right, modulus, forced_hom); - } - else { // ordinary constraint read here - for (long j = 0; j < dim - hom_correction; ++j) { - read_number(in, left[j]); - } - in >> rel; - read_number(in, right); - if (rel == "~") { - if (!read_modulus(in, modulus)) - // throw BadInputException("Congruence not allowed with field coefficients!"); - throw BadInputException("Error while reading modulus!"); - } - if (in.fail()) { - throw BadInputException("Error while reading constraint!"); - } - } - process_constraint(rel, left, right, modulus, input_map, forced_hom); - } -} - -template -bool read_sparse_vector(istream& in, vector& input_vec, long length) { - input_vec = vector(length, 0); - char dummy; - - while (in.good()) { - in >> std::ws; - int c = in.peek(); - if (c == ';') { - in >> dummy; // swallow ; - return true; - } - long pos; - in >> pos; - if (in.fail()) - return false; - pos--; - if (pos < 0 || pos >= length) - return false; - in >> std::ws; - c = in.peek(); - if (c != ':') - return false; - in >> dummy; // skip : - Number value; - read_number(in, value); - if (in.fail()) - return false; - input_vec[pos] = value; - } - - return false; -} - -template -bool read_formatted_vector(istream& in, vector& input_vec) { - input_vec.clear(); - in >> std::ws; - char dummy; - in >> dummy; // read first proper character - if (dummy != '[') - return false; - bool one_more_entry_required = false; - while (in.good()) { - in >> std::ws; - if (!one_more_entry_required && in.peek() == ']') { - in >> dummy; - return true; - } - Number number; - read_number(in, number); - if (in.fail()) - return false; - input_vec.push_back(number); - in >> std::ws; - one_more_entry_required = false; - if (in.peek() == ',' || in.peek() == ';') { // skip potential separator - in >> dummy; - one_more_entry_required = true; - } - } - return false; -} - -void read_polynomial(istream& in, string& polynomial) { - char c; - while (in.good()) { - in >> c; - if (in.fail()) - throw BadInputException("Error while reading polynomial!"); - if (c == ';') { - if (polynomial.size() == 0) - throw BadInputException("Error while reading polynomial!"); - return; - } - polynomial += c; - } -} - -template -bool read_formatted_matrix(istream& in, vector >& input_mat, bool transpose) { - input_mat.clear(); - in >> std::ws; - char dummy; - in >> dummy; // read first proper character - if (dummy != '[') - return false; - bool one_more_entry_required = false; - while (in.good()) { - in >> std::ws; - if (!one_more_entry_required && in.peek() == ']') { // closing ] found - in >> dummy; - if (transpose) - input_mat = transpose_mat(input_mat); - return true; - } - vector input_vec; - if (!read_formatted_vector(in, input_vec)) { - throw BadInputException("Error in reading input vector!"); - } - if (input_mat.size() > 0 && input_vec.size() != input_mat[0].size()) { - throw BadInputException("Rows of input matrix have unequal lengths!"); - } - input_mat.push_back(input_vec); - in >> std::ws; - one_more_entry_required = false; - if (in.peek() == ',' || in.peek() == ';') { // skip potential separator - in >> dummy; - one_more_entry_required = true; - } - } - - return false; -} - -template -void read_number_field(istream& in, renf_class& number_field) { - throw NumberFieldInputException(); -} - -#ifdef ENFNORMALIZ -template <> -void read_number_field(istream& in, renf_class& renf) { - char c; - string s; - in >> s; - if (s != "min_poly" && s != "minpoly") - throw BadInputException("Error in reading number field: expected keyword min_poly or minpoly"); - in >> ws; - c = in.peek(); - if (c != '(') - throw BadInputException("Error in reading number field: min_poly does not start with ("); - in >> c; - - string mp_string; - while (in.good()) { - c = in.peek(); - if (c == ')') { - in.get(c); - break; - } - in.get(c); - if (in.fail()) - throw BadInputException("Error in reading number field: min_poly not terminated by )"); - mp_string += c; - } - // omp_set_num_threads(1); - - in >> s; - if (s != "embedding") - throw BadInputException("Error in reading number field: expected keyword embedding"); - in >> ws; - string emb_string; - c = in.peek(); - if (c == '[') { - in >> c; - while (in.good()) { - in >> c; - if (c == ']') - break; - emb_string += c; - } - } - else - throw BadInputException("Error in reading number field: definition of embedding does not start with ["); - - if (c != ']') - throw BadInputException("Error in reading number field: definition of embedding does not end with ]"); - - if (in.fail()) - throw BadInputException("Could not read number field!"); - - renf = renf_class(mp_string, "a", emb_string); - // in >> set_renf(renf); - renf.set_istream(in); -} -#endif - -void read_num_param(istream& in, map& num_param_input, NumParam::Param numpar, const string& type_string) { - long value; - in >> value; - if (in.fail()) - throw BadInputException("Error in reading " + type_string); - num_param_input[numpar] = value; -} - -template -map > > readNormalizInput(istream& in, - OptionsHandler& options, - map& num_param_input, - string& polynomial, - renf_class& number_field) { - string type_string; - long i, j; - long nr_rows, nr_columns, nr_rows_or_columns; - InputType input_type; - Number number; - ConeProperty::Enum cp; - NumParam::Param numpar; - set num_par_already_set; - bool we_have_a_polynomial = false; - - map > > input_map; - - in >> std::ws; // eat up any leading white spaces - int c = in.peek(); - if (c == EOF) { - throw BadInputException("Empty input file!"); - } - bool new_input_syntax = !std::isdigit(c); - - if (new_input_syntax) { - long dim; - while (in.peek() == '/') { - skip_comment(in); - in >> std::ws; - } - in >> type_string; - if (!in.good() || type_string != "amb_space") { - throw BadInputException("First entry must be \"amb_space\"!"); - } - bool dim_known = false; - in >> std::ws; - c = in.peek(); - if (c == 'a') { - string dummy; - in >> dummy; - if (dummy != "auto") { - throw BadInputException("Bad amb_space value!"); - } - } - else { - in >> dim; - if (!in.good() || dim < 0) { - throw BadInputException("Bad amb_space value!"); - } - dim_known = true; - } - while (in.good()) { // main loop - - bool transpose = false; - in >> std::ws; // eat up any leading white spaces - c = in.peek(); - if (c == EOF) - break; - if (c == '/') { - skip_comment(in); - } - else { - in >> type_string; - if (in.fail()) { - throw BadInputException("Could not read type string!"); - } - if (std::isdigit(c)) { - throw BadInputException("Unexpected number " + type_string + " when expecting a type!"); - } - if (isConeProperty(cp, type_string)) { - options.activateInputFileConeProperty(cp); - continue; - } - if (isNumParam(numpar, type_string)) { - auto ns = num_par_already_set.find(numpar); - if (ns != num_par_already_set.end()) - throw BadInputException("Numerical parameter " + type_string + " set twice"); - read_num_param(in, num_param_input, numpar, type_string); - num_par_already_set.insert(numpar); - continue; - } - if (type_string == "LongLong") { - options.activateInputFileLongLong(); - continue; - } - if (type_string == "NoExtRaysOutput") { - options.activateNoExtRaysOutput(); - continue; - } - if (type_string == "NoMatricesOutput") { - options.activateNoMatricesOutput(); - continue; - } - if (type_string == "NoSuppHypsOutput") { - options.activateNoSuppHypsOutput(); - continue; - } - if (type_string == "number_field") { -#ifndef ENFNORMALIZ - throw BadInputException("number_field only allowed for Normaliz with e-antic"); -#else - read_number_field(in, number_field); -#endif - continue; - } - if (type_string == "total_degree") { - if (!dim_known) { - throw BadInputException("Ambient space must be known for " + type_string + "!"); - } - input_type = Type::grading; - save_matrix(input_map, input_type, - vector >(1, vector(dim + type_nr_columns_correction(input_type), 1))); - continue; - } - if (type_string == "nonnegative") { - if (!dim_known) { - throw BadInputException("Ambient space must be known for " + type_string + "!"); - } - input_type = Type::signs; - save_matrix(input_map, input_type, - vector >(1, vector(dim + type_nr_columns_correction(input_type), 1))); - continue; - } - if (type_string == "constraints") { - if (!dim_known) { - throw BadInputException("Ambient space must be known for " + type_string + "!"); - } - read_constraints(in, dim, input_map, false); - continue; - } - if (type_string == "hom_constraints") { - if (!dim_known) { - throw BadInputException("Ambient space must be known for " + type_string + "!"); - } - read_constraints(in, dim, input_map, true); - continue; - } - if (type_string == "polynomial") { - if (we_have_a_polynomial) - throw BadInputException("Only one polynomial allowed"); - read_polynomial(in, polynomial); - we_have_a_polynomial = true; - continue; - } - - input_type = to_type(type_string); - if (dim_known) - nr_columns = dim + type_nr_columns_correction(input_type); - - if (type_is_vector(input_type)) { - nr_rows_or_columns = nr_rows = 1; - in >> std::ws; // eat up any leading white spaces - c = in.peek(); - if (c == 'u') { // must be unit vector - string vec_kind; - in >> vec_kind; - if (vec_kind != "unit_vector") { - throw BadInputException("Error while reading " + type_string + ": unit_vector expected!"); - } - - long pos = 0; - in >> pos; - if (in.fail()) { - throw BadInputException("Error while reading " + type_string + " as a unit_vector!"); - } - - if (!dim_known) { - throw BadInputException("Ambient space must be known for unit vector " + type_string + "!"); - } - - vector > e_i = vector >(1, vector(nr_columns, 0)); - if (pos < 1 || pos > static_cast(e_i[0].size())) { - throw BadInputException("Error while reading " + type_string + " as a unit_vector " + toString(pos) + - "!"); - } - pos--; // in input file counting starts from 1 - e_i[0].at(pos) = 1; - save_matrix(input_map, input_type, e_i); - continue; - } // end unit vector - - if (c == 's') { // must be "sparse" - string vec_kind; - in >> vec_kind; - if (vec_kind != "sparse") { - throw BadInputException("Error while reading " + type_string + ": sparse vector expected!"); - } - - if (!dim_known) { - throw BadInputException("Ambient space must be known for sparse vector " + type_string + "!"); - } - - vector sparse_vec; - nr_columns = dim + type_nr_columns_correction(input_type); - bool success = read_sparse_vector(in, sparse_vec, nr_columns); - if (!success) { - throw BadInputException("Error while reading " + type_string + " as a sparse vector!"); - } - save_matrix(input_map, input_type, vector >(1, sparse_vec)); - continue; - } - - if (c == '[') { // must be formatted vector - vector formatted_vec; - bool success = read_formatted_vector(in, formatted_vec); - if (!dim_known) { - dim = formatted_vec.size() - type_nr_columns_correction(input_type); - dim_known = true; - nr_columns = dim + type_nr_columns_correction(input_type); - } - if (!success || (long)formatted_vec.size() != nr_columns) { - throw BadInputException("Error while reading " + type_string + " as a formatted vector!"); - } - save_matrix(input_map, input_type, vector >(1, formatted_vec)); - continue; - } // end formatted vector - } - else { // end vector, it is a matrix. Plain vector read as a one row matrix later on - in >> std::ws; - c = in.peek(); - - if (c != '[' && !std::isdigit(c)) { // must be transpose - string transpose_str; - in >> transpose_str; - if (transpose_str != "transpose") { - throw BadInputException("Illegal keyword " + transpose_str + " following matrix type!"); - } - transpose = true; - in >> std::ws; - c = in.peek(); - } - if (c == '[') { // it is a formatted matrix - vector > formatted_mat; - bool success = read_formatted_matrix(in, formatted_mat, transpose); - if (!success) { - throw BadInputException("Error while reading formatted matrix " + type_string + "!"); - } - if (formatted_mat.size() == 0) { // empty matrix - input_type = to_type(type_string); - save_empty_matrix(input_map, input_type); - continue; - } - if (!dim_known) { - dim = formatted_mat[0].size() - type_nr_columns_correction(input_type); - dim_known = true; - nr_columns = dim + type_nr_columns_correction(input_type); - } - - if ((long)formatted_mat[0].size() != nr_columns) { - throw BadInputException("Error while reading formatted matrix " + type_string + "!"); - } - - save_matrix(input_map, input_type, formatted_mat); - continue; - } // only plain matrix left - - in >> nr_rows_or_columns; // is number of columns if transposed - nr_rows = nr_rows_or_columns; // most of the time - } - - if (!dim_known) { - throw BadInputException("Ambient space must be known for plain matrix or vector " + type_string + "!"); - } - - if (transpose) - swap(nr_rows, nr_columns); - - if (in.fail() || nr_rows_or_columns < 0) { - throw BadInputException("Error while reading " + type_string + " (a " + toString(nr_rows) + "x" + - toString(nr_columns) + " matrix) !"); - } - if (nr_rows == 0) { - input_type = to_type(type_string); - save_empty_matrix(input_map, input_type); - continue; - } - - vector > M(nr_rows); - in >> std::ws; - c = in.peek(); - if (c == 's') { // must be sparse - string sparse_test; - in >> sparse_test; - if (sparse_test != "sparse") { - throw BadInputException("Error while reading " + type_string + ": sparse matrix expected!"); - } - for (long i = 0; i < nr_rows; ++i) { - bool success = read_sparse_vector(in, M[i], nr_columns); - if (!success) { - throw BadInputException("Error while reading " + type_string + ": corrupted sparse matrix"); - } - } - } - else { // dense matrix - for (i = 0; i < nr_rows; i++) { - M[i].resize(nr_columns); - for (j = 0; j < nr_columns; j++) { - read_number(in, M[i][j]); - // cout << M[i][j] << endl; - } - } - } - if (transpose) - M = transpose_mat(M); - save_matrix(input_map, input_type, M); - } - if (in.fail()) { - throw BadInputException("Error while reading " + type_string + " (a " + toString(nr_rows) + "x" + - toString(nr_columns) + " matrix) !"); - } - } - } - else { - // old input syntax - while (in.good()) { - in >> nr_rows; - if (in.fail()) - break; - in >> nr_columns; - if ((nr_rows < 0) || (nr_columns < 0)) { - throw BadInputException("Error while reading a " + toString(nr_rows) + "x" + toString(nr_columns) + " matrix !"); - } - vector > M(nr_rows, vector(nr_columns)); - for (i = 0; i < nr_rows; i++) { - for (j = 0; j < nr_columns; j++) { - read_number(in, M[i][j]); - } - } - - in >> type_string; - - if (in.fail()) { - throw BadInputException("Error while reading a " + toString(nr_rows) + "x" + toString(nr_columns) + " matrix!"); - } - - input_type = to_type(type_string); - - // check if this type already exists - save_matrix(input_map, input_type, M); - } - } - return input_map; -} diff -Nru normaliz-3.8.5+ds/source/libnormaliz/automorph.cpp normaliz-3.8.9+ds/source/libnormaliz/automorph.cpp --- normaliz-3.8.5+ds/source/libnormaliz/automorph.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/automorph.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -28,7 +28,7 @@ #include "libnormaliz/nmz_nauty.h" #include "libnormaliz/cone.h" #include "libnormaliz/full_cone.h" -#include "libnormaliz/map_operations.h" +// #include "libnormaliz/map_operations.h" namespace libnormaliz { diff -Nru normaliz-3.8.5+ds/source/libnormaliz/bottom.cpp normaliz-3.8.9+ds/source/libnormaliz/bottom.cpp --- normaliz-3.8.5+ds/source/libnormaliz/bottom.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/bottom.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,248 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(push, target(mic)) -#endif - -#include -#include - -#include -//#include -#include -#include - -#include "libnormaliz/general.h" -#include "libnormaliz/bottom.h" -#include "libnormaliz/vector_operations.h" -#include "libnormaliz/integer.h" -// #include "libnormaliz/full_cone.h" - -namespace libnormaliz { -using namespace std; - -long SubDivBound = 1000000; - -template -bool bottom_points_inner(Matrix& gens, - list >& local_new_points, - vector >& local_q_gens, - size_t& stellar_det_sum); - -template -void bottom_points(list >& new_points, const Matrix& given_gens, Integer VolumeBound) { - /* gens.pretty_print(cout); - cout << "=======================" << endl; - - gens.transpose().pretty_print(cout); - cout << "=======================" << endl;*/ - - Matrix gens, Trans, Trans_inv; - // given_gens.LLL_transform_transpose(gens,Trans,Trans_inv); // now in optimal_subdivision_point() - gens = given_gens; - - Integer volume; - // int dim = gens[0].size(); - Matrix Support_Hyperplanes = gens.invert(volume); - - vector grading; // = grading_; - if (grading.empty()) - grading = gens.find_linear_form(); - // cout << grading; - - list > bottom_candidates; - bottom_candidates.splice(bottom_candidates.begin(), new_points); - // Matrix(bottom_candidates).pretty_print(cout); - - if (verbose) { - verboseOutput() << "Computing bbottom points using projection " << endl; - } - - if (verbose) { - verboseOutput() << "simplex volume " << volume << endl; - } - - //---------------------------- begin stellar subdivision ------------------- - - size_t stellar_det_sum = 0; - vector > q_gens; // for successive stellar subdivision - q_gens.push_back(gens); - int level = 0; // level of subdivision - - std::exception_ptr tmp_exception; - bool skip_remaining = false; -#pragma omp parallel // reduction(+:stellar_det_sum) - { - try { - vector > local_q_gens; - list > local_new_points; - - while (!q_gens.empty()) { - if (skip_remaining) - break; - if (verbose) { -#pragma omp single - verboseOutput() << q_gens.size() << " simplices on level " << level++ << endl; - } - -#pragma omp for schedule(static) - for (size_t i = 0; i < q_gens.size(); ++i) { - if (skip_remaining) - continue; - - try { - bottom_points_inner(q_gens[i], local_new_points, local_q_gens, stellar_det_sum); - } catch (const std::exception&) { - tmp_exception = std::current_exception(); - skip_remaining = true; -#pragma omp flush(skip_remaining) - } - } - -#pragma omp single - { q_gens.clear(); } -#pragma omp critical(LOCALQGENS) - { q_gens.insert(q_gens.end(), local_q_gens.begin(), local_q_gens.end()); } - local_q_gens.clear(); -#pragma omp barrier - } - -#pragma omp critical(LOCALNEWPOINTS) - { new_points.splice(new_points.end(), local_new_points, local_new_points.begin(), local_new_points.end()); } - - } catch (const std::exception&) { - tmp_exception = std::current_exception(); - skip_remaining = true; -#pragma omp flush(skip_remaining) - } - - } // end parallel - - //---------------------------- end stellar subdivision ----------------------- - - if (!(tmp_exception == 0)) - std::rethrow_exception(tmp_exception); - - // cout << new_points.size() << " new points accumulated" << endl; - new_points.sort(); - new_points.unique(); - if (verbose) { - verboseOutput() << new_points.size() << " bottom points accumulated in total." << endl; - verboseOutput() << "The sum of determinants of the stellar subdivision is " << stellar_det_sum << endl; - } - - /* for(auto& it : new_points) - it=Trans_inv.VxM(it); */ -} - -//----------------------------------------------------------------------------------------- - -template -bool bottom_points_inner(Matrix& gens, - list >& local_new_points, - vector >& local_q_gens, - size_t& stellar_det_sum) { - INTERRUPT_COMPUTATION_BY_EXCEPTION - - vector grading = gens.find_linear_form(); - Integer volume; - int dim = gens[0].size(); - Matrix Support_Hyperplanes = gens.invert(volume); - - if (volume < SubDivBound) { -#pragma omp atomic - stellar_det_sum += convertTo(volume); - return false; // not subdivided - } - - // try st4ellar subdivision - Support_Hyperplanes = Support_Hyperplanes.transpose(); - Support_Hyperplanes.make_prime(); - vector new_point; - - if (new_point.empty()) { - list > Dummy; - new_point = gens.optimal_subdivision_point(); // projection method - } - - if (!new_point.empty()) { - // if (find(local_new_points.begin(), local_new_points.end(),new_point) == local_new_points.end()) - local_new_points.push_back(new_point); - Matrix stellar_gens(gens); - - int nr_hyps = 0; - for (int i = 0; i < dim; ++i) { - if (v_scalar_product(Support_Hyperplanes[i], new_point) != 0) { - stellar_gens[i] = new_point; - local_q_gens.push_back(stellar_gens); - - stellar_gens[i] = gens[i]; - } - else - nr_hyps++; - } - //#pragma omp critical(VERBOSE) - // cout << new_point << " liegt in " << nr_hyps <<" hyperebenen" << endl; - return true; // subdivided - } - else { // could not subdivided -#pragma omp atomic - stellar_det_sum += convertTo(volume); - return false; - } -} - -// returns -1 if maximum is negative -template -double max_in_col(const Matrix& M, size_t j) { - Integer max = -1; - for (size_t i = 0; i < M.nr_of_rows(); ++i) { - if (M[i][j] > max) - max = M[i][j]; - } - return convert_to_double(max); -} - -// returns 1 if minimum is positive -template -double min_in_col(const Matrix& M, size_t j) { - Integer min = 1; - for (size_t i = 0; i < M.nr_of_rows(); ++i) { - if (M[i][j] < min) - min = M[i][j]; - } - return convert_to_double(min); -} - -#ifndef NMZ_MIC_OFFLOAD // offload with long is not supported -template void bottom_points(list >& new_points, const Matrix& gens, long VolumeBound); -#endif // NMZ_MIC_OFFLOAD -template void bottom_points(list >& new_points, const Matrix& gens, long long VolumeBound); -template void bottom_points(list >& new_points, const Matrix& gens, mpz_class VolumeBound); - -} // namespace libnormaliz - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(pop) -#endif diff -Nru normaliz-3.8.5+ds/source/libnormaliz/bottom.h normaliz-3.8.9+ds/source/libnormaliz/bottom.h --- normaliz-3.8.5+ds/source/libnormaliz/bottom.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/bottom.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,43 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#ifndef LIBNORMALIZ_BOTTOM_H -#define LIBNORMALIZ_BOTTOM_H - -#include -#include - -#include "libnormaliz/general.h" -#include "libnormaliz/matrix.h" - -namespace libnormaliz { -using namespace std; - -extern long SubDivBound; - -template -void bottom_points(list >& new_points, const Matrix& gens, Integer VolumeBound); - -} // namespace libnormaliz - -#endif // BOTTOM_H diff -Nru normaliz-3.8.5+ds/source/libnormaliz/collection.cpp normaliz-3.8.9+ds/source/libnormaliz/collection.cpp --- normaliz-3.8.5+ds/source/libnormaliz/collection.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/collection.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -21,18 +21,19 @@ * terms of service. */ -#include +#include #include #include #include -#include +#include #include "libnormaliz/cone.h" #include "libnormaliz/vector_operations.h" -#include "libnormaliz/map_operations.h" -#include "libnormaliz/convert.h" +#include "libnormaliz/list_and_map_operations.h" +// #include "libnormaliz/convert.h" #include "libnormaliz/my_omp.h" #include "libnormaliz/collection.h" +#include "libnormaliz/full_cone.h" namespace libnormaliz { @@ -516,7 +517,17 @@ cout<< "***** Mini " << level << " " << my_place << " Gens " << GenKeys; cout << "mult " << multiplicity << " daughters " << Daughters; cout << "----------------------" << endl; -} +} + +#ifndef NMZ_MIC_OFFLOAD // offload with long is not supported +template class ConeCollection; +#endif +template class ConeCollection; +template class ConeCollection; + +#ifdef ENFNORMALIZ +template class ConeCollection; +#endif } // namespace diff -Nru normaliz-3.8.5+ds/source/libnormaliz/cone_and_control.cpp normaliz-3.8.9+ds/source/libnormaliz/cone_and_control.cpp --- normaliz-3.8.5+ds/source/libnormaliz/cone_and_control.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/cone_and_control.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(push, target(mic)) -#endif - -#include "libnormaliz/general.cpp" -#include "libnormaliz/input_type.cpp" -#include "libnormaliz/cone_property.cpp" -#include "libnormaliz/list_operations.cpp" -#include "libnormaliz/collection.cpp" -#include "libnormaliz/cone.cpp" - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(pop) -#endif diff -Nru normaliz-3.8.5+ds/source/libnormaliz/cone.cpp normaliz-3.8.9+ds/source/libnormaliz/cone.cpp --- normaliz-3.8.5+ds/source/libnormaliz/cone.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/cone.cpp 2020-09-25 14:54:40.000000000 +0000 @@ -21,22 +21,23 @@ * terms of service. */ -#include +#include #include #include #include -#include +#include #include "libnormaliz/cone.h" #include "libnormaliz/vector_operations.h" #include "libnormaliz/project_and_lift.h" -#include "libnormaliz/map_operations.h" -#include "libnormaliz/convert.h" +// #include "libnormaliz/map_operations.h" +// #include "libnormaliz/convert.h" #include "libnormaliz/full_cone.h" #include "libnormaliz/descent.h" #include "libnormaliz/my_omp.h" #include "libnormaliz/output.h" #include "libnormaliz/collection.h" +#include "libnormaliz/face_lattice.h" namespace libnormaliz { using namespace std; @@ -51,6 +52,19 @@ */ template +void check_length_of_vectors_in_input(const map > >&multi_input_data, size_t dim){ + for (auto& it: multi_input_data){ + size_t prescribed_length = dim + type_nr_columns_correction(it.first); + for(auto& v: it.second){ + if(v.size() == 0) + throw BadInputException("Vectors of length 0 not allowed in input"); + if(v.size() != prescribed_length) + throw BadInputException("Inconsistent length of vectors in input"); + } + } +} + +template template void Cone::check_add_input(const map > >& multi_add_data) { // if(!keep_convex_hull_data) @@ -71,6 +85,10 @@ throw BadInputException("Additional inhomogeneous input only with inhomogeneous original input"); } check_consistency_of_dimension(multi_add_data); + int inhom_corr = 0; + if(inhomogeneous) + inhom_corr = 1; + check_length_of_vectors_in_input(multi_add_data,dim-inhom_corr); } template @@ -109,11 +127,36 @@ return multi_input_data_QQ; } +bool renf_allowed(InputType input_type) { + switch (input_type) { + case Type::congruences: + case Type::inhom_congruences: + case Type::lattice: + case Type::cone_and_lattice: + case Type::rational_lattice: + case Type::normalization: + case Type::integral_closure: + case Type::offset: + case Type::rational_offset: + case Type::rees_algebra: + case Type::lattice_ideal: + case Type::strict_signs: + case Type::strict_inequalities: + case Type::hilbert_basis_rec_cone: + case Type::open_facets: + return false; + break; + default: + return true; + break; + } +} + bool denominator_allowed(InputType input_type) { switch (input_type) { case Type::congruences: case Type::inhom_congruences: - case Type::grading: + case Type::grading: case Type::dehomogenization: case Type::lattice: case Type::normalization: @@ -123,6 +166,8 @@ case Type::lattice_ideal: case Type::signs: case Type::strict_signs: + case Type::scale: + case Type::strict_inequalities: case Type::projection_coordinates: case Type::hilbert_basis_rec_cone: case Type::open_facets: @@ -135,16 +180,143 @@ } template +vector > find_input_matrix(const map > >& multi_input_data, + const InputType type) { + typename map > >::const_iterator it; + it = multi_input_data.find(type); + if (it != multi_input_data.end()) + return (it->second); + + vector > dummy; + return (dummy); +} + + +template +void scale_matrix(vector >& mat, const vector& scale_axes, bool dual) { + for (size_t j = 0; j < scale_axes.size(); ++j) { + if (scale_axes[j] == 0) + continue; + for (size_t i = 0; i < mat.size(); ++i) { + if (dual) + mat[i][j] /= scale_axes[j]; + else + mat[i][j] *= scale_axes[j]; + } + } +} + +template +void scale_input(map > >& multi_input_data, const vector scale_axes) { + + vector ScaleHelp = scale_axes; + ScaleHelp.resize(scale_axes.size()-1); + + auto it = multi_input_data.begin(); + for (; it != multi_input_data.end(); ++it) { + switch (it->first) { + case Type::inhom_inequalities: + case Type::inequalities: + case Type::inhom_equations: + case Type::equations: + case Type::inhom_excluded_faces: + case Type::excluded_faces: + case Type::dehomogenization: + case Type::grading: + scale_matrix(it->second, scale_axes, true); // true = dual space + break; + case Type::polytope: + scale_matrix(it->second, ScaleHelp, false); + break; + case Type::cone: + case Type::subspace: + case Type::lattice: + case Type::saturation: + case Type::vertices: + case Type::offset: + scale_matrix(it->second, scale_axes, false); // false = primal space + break; + default: + break; + } + } +} + +template +void apply_cale(map > >& multi_input_data) { + vector > scale_mat = find_input_matrix(multi_input_data, Type::scale); + vector scale_axes = scale_mat[0]; + scale_input(multi_input_data,scale_axes); +} + +void process_rational_lattice(map > >& multi_input_data){ + + Matrix RatLat=find_input_matrix(multi_input_data, Type::rational_lattice); + Matrix RatOff = find_input_matrix(multi_input_data, Type::rational_offset); + + if(RatLat.nr_of_rows() == 0 && RatOff.nr_of_rows() == 0) + return; + + size_t dim; + if(RatLat.nr_of_rows() >0 ) + dim = RatLat.nr_of_columns(); + else + dim = RatOff.nr_of_columns(); + + vector Den(dim,1); + for(size_t i=0; i< RatLat.nr_of_rows(); ++i){ + for(size_t j=0; j 0){ + for(size_t j=0; jfirst)) + throw BadInputException("Some input type not allowed together with rational_lattice or offset"); + } + + if(RatLat.nr_of_rows() > 0) + multi_input_data[Type::lattice] = RatLat.get_elements(); + if(RatOff.nr_of_rows()>0) + multi_input_data[Type::offset] = RatOff.get_elements(); + scale_input(multi_input_data, Den); + + if(contains(multi_input_data, Type::scale)) + throw BadInputException("Explicit input type scale only allowed for field coefficients"); + vector > DenMat; + DenMat.push_back(Den); + multi_input_data[Type::scale] = DenMat; // we use scale to ship Den +} + +template map > > Cone::mpqclass_input_to_integer( const map > >& multi_input_data_const) { + + /* cout << "---------------" << endl; + for(auto& jt: multi_input_data_const){ + cout << jt.second; + cout << "---------------" << endl; + } */ + + map > > multi_input_data( + multi_input_data_const); // since we want to change it internally + + if(contains(multi_input_data, Type::rational_lattice) || contains(multi_input_data, Type::rational_offset)) + process_rational_lattice(multi_input_data); + // The input type polytope is replaced by cone+grading in this routine. // Nevertheless it appears in the subsequent routines. // But any implications of its appearance must be handled here already. // However, polytope can still be used without conversion to cone via libnormaliz !!!!! - map > > multi_input_data( - multi_input_data_const); // since we want to change it internally - // since polytope will be converted to cone, we must do some checks here if (contains(multi_input_data, Type::polytope)) { polytope_in_input = true; @@ -251,18 +423,6 @@ } template -vector > find_input_matrix(const map > >& multi_input_data, - const InputType type) { - typename map > >::const_iterator it; - it = multi_input_data.find(type); - if (it != multi_input_data.end()) - return (it->second); - - vector > dummy; - return (dummy); -} - -template void insert_column(vector >& mat, size_t col, Integer entry) { if (mat.size() == 0) return; @@ -297,6 +457,7 @@ case Type::inhom_inequalities: // nothing to do case Type::inhom_equations: case Type::inhom_congruences: + case Type::inhom_excluded_faces: case Type::polyhedron: case Type::vertices: case Type::open_facets: @@ -342,6 +503,10 @@ template void Cone::modifyCone(const map > >& multi_add_input_const) { + + if(rational_lattice_in_input) + throw BadInputException("Modification of cone not possible with rational_lattice in construction"); + precomputed_extreme_rays=false; precomputed_support_hyperplanes=false; map > > multi_add_input(multi_add_input_const); @@ -396,6 +561,7 @@ } } } + delete_aux_cones(); is_Computed = ConeProperties(); setComputed(ConeProperty::Generators); if (Grading.size() > 0) @@ -445,8 +611,10 @@ //--------------------------------------------------------------------------- +//--------------------------------------------------------------------------- + template -Cone::~Cone() { +void Cone::delete_aux_cones(){ if (IntHullCone != NULL) delete IntHullCone; if (SymmCone != NULL) @@ -455,6 +623,11 @@ delete ProjCone; } +template +Cone::~Cone() { + delete_aux_cones(); +} + //--------------------------------------------------------------------------- template @@ -472,53 +645,6 @@ } template -void scale_matrix(vector >& mat, const vector& scale_axes, bool dual) { - for (size_t j = 0; j < scale_axes.size(); ++j) { - if (scale_axes[j] == 0) - continue; - for (size_t i = 0; i < mat.size(); ++i) { - if (dual) - mat[i][j] /= scale_axes[j]; - else - mat[i][j] *= scale_axes[j]; - } - } -} - -template -void scale_input(map > >& multi_input_data) { - vector > scale_mat = find_input_matrix(multi_input_data, Type::scale); - vector scale_axes = scale_mat[0]; - - auto it = multi_input_data.begin(); - for (; it != multi_input_data.end(); ++it) { - switch (it->first) { - case Type::inhom_inequalities: - case Type::inhom_equations: - case Type::inequalities: - case Type::equations: - case Type::dehomogenization: - case Type::grading: - scale_matrix(it->second, scale_axes, true); // true = dual space - break; - case Type::polytope: - case Type::cone: - case Type::subspace: - case Type::saturation: - case Type::vertices: - case Type::offset: - scale_matrix(it->second, scale_axes, false); // false = primal space - break; - case Type::signs: - throw BadInputException("signs not allowed with scale"); - break; - default: - break; - } - } -} - -template void check_types_precomputed(map > >& multi_input_data) { auto it = multi_input_data.begin(); @@ -544,10 +670,11 @@ initialize(); map > > multi_input_data(multi_input_data_const); if (contains(multi_input_data, Type::scale)) { - if (!using_renf()) - throw BadInputException("scale only allowed for field coefficients"); + if (using_renf()){ + apply_cale(multi_input_data); + } else - scale_input(multi_input_data); + throw BadInputException("Explicit nput type scale only allowed for field coefficients"); } process_multi_input_inner(multi_input_data); } @@ -558,25 +685,27 @@ lattice_ideal_input = false; nr_latt_gen = 0, nr_cone_gen = 0; inhom_input = false; - - if (using_renf()) { //better in a table - if (contains(multi_input_data, Type::lattice_ideal) || contains(multi_input_data, Type::lattice) || - contains(multi_input_data, Type::cone_and_lattice) || contains(multi_input_data, Type::congruences) || - contains(multi_input_data, Type::inhom_congruences) - // || contains(multi_input_data,Type::dehomogenization) - || contains(multi_input_data, Type::offset) || contains(multi_input_data, Type::excluded_faces) || - contains(multi_input_data, Type::open_facets) || contains(multi_input_data, Type::hilbert_basis_rec_cone) || - contains(multi_input_data, Type::strict_inequalities) || contains(multi_input_data, Type::strict_signs)) - throw BadInputException("Input type not allowed for field coefficients"); + + auto it = multi_input_data.begin(); + if (using_renf()) { + for (; it != multi_input_data.end(); ++it) { + if(!renf_allowed(it->first)) + throw BadInputException("Some onput type not allowed for field coefficients"); + } } - - // inequalities_present=false; //control choice of positive orthant ?? Done differently + + if (!using_renf() && contains(multi_input_data, Type::scale)) { + AxesScaling = multi_input_data[Type::scale][0]; // only possible with rational_lattice + setComputed(ConeProperty::AxesScaling); + rational_lattice_in_input = true; + } // NEW: Empty matrices have syntactical influence - auto it = multi_input_data.begin(); + it = multi_input_data.begin(); for (; it != multi_input_data.end(); ++it) { switch (it->first) { case Type::inhom_inequalities: + case Type::inhom_excluded_faces: case Type::inhom_equations: case Type::inhom_congruences: case Type::strict_inequalities: @@ -612,6 +741,7 @@ break; case Type::lattice: case Type::saturation: + case Type::rational_lattice: nr_latt_gen++; break; case Type::vertices: @@ -622,18 +752,6 @@ break; } - /* switch (it->first) { // chceck existence of inrqualities - case Type::inhom_inequalities: - case Type::strict_inequalities: - case Type::strict_signs: - case Type::signs: - case Type::inequalities: - case Type::excluded_faces: - case Type::support_hyperplanes: - inequalities_present=true; - default: - break; - }*/ } INTERRUPT_COMPUTATION_BY_EXCEPTION @@ -708,9 +826,9 @@ if (contains(multi_input_data, Type::rees_algebra) || contains(multi_input_data, Type::polytope) || polytope_in_input) { throw BadInputException("Types polytope and rees_algebra not allowed with inhomogeneous input or dehomogenization!"); } - if (contains(multi_input_data, Type::excluded_faces)) { - throw BadInputException("Type excluded_faces not allowed with inhomogeneous input or dehomogenization!"); - } + // if (contains(multi_input_data, Type::excluded_faces)) { + // throw BadInputException("Type excluded_faces not allowed with inhomogeneous input or dehomogenization!"); + // } } /*if(contains(multi_input_data,Type::grading) && contains(multi_input_data,Type::polytope)){ // now superfluous throw BadInputException("No explicit grading allowed with polytope!"); @@ -736,6 +854,8 @@ size_t inhom_corr = 0; // correction in the inhom_input case if (inhom_input) inhom_corr = 1; + if( it->second.front().size() == 0) + throw BadInputException("Ambient space of dimension 0 not allowed"); dim = it->second.front().size() - type_nr_columns_correction(it->first) + inhom_corr; // We now process input types that are independent of generators, constraints, lattice_ideal @@ -744,6 +864,9 @@ ExcludedFaces = find_input_matrix(multi_input_data, Type::excluded_faces); if (ExcludedFaces.nr_of_rows() == 0) ExcludedFaces = Matrix(0, dim); // we may need the correct number of columns + Matrix InhomExcludedFaces = find_input_matrix(multi_input_data, Type::inhom_excluded_faces); + if(InhomExcludedFaces.nr_of_rows() !=0) + ExcludedFaces.append(InhomExcludedFaces); // check for a grading vector > lf = find_input_matrix(multi_input_data, Type::grading); @@ -761,6 +884,8 @@ // cout << "Dim " << dim < 0) { - autom_codim_vectors=convertTo(lf[0][0]); + autom_codim_vectors=convertToLong(lf[0][0]); autom_codim_vectors_set=true; } lf = find_input_matrix(multi_input_data,Type::codim_bound_mult); if (lf.size() > 0) { - autom_codim_mult=convertTo(lf[0][0]); + autom_codim_mult=convertToLong(lf[0][0]); autom_codim_mult_set=true; }*/ @@ -882,7 +1007,10 @@ if(precomputed_support_hyperplanes && !cone_sat_ineq) throw BadInputException("Precomputed support hyperplanes do not support the cone"); - + + // Note: in the inhomogeneous case the original monoid generators as set hetre contain + // the verices. So the name is mathematically incorrect, but the different types will be separated + // in Full_Cone for the computation of generators OVER original monoid. if(cone_sat_eq && cone_sat_cong && cone_sat_ineq && Generators.nr_of_rows()!=0) set_original_monoid_generators(Generators); @@ -922,6 +1050,7 @@ // Ker.pretty_print(cout); assert(Ker.nr_of_rows() == 1); Generators[Generators.nr_of_rows() - 1] = Ker[0]; + InputGenerators[Generators.nr_of_rows() - 1] = Ker[0]; } BasisChangePointed = BasisChange; @@ -975,9 +1104,7 @@ if (Generators.nr_of_rows() != 0) { setComputed(ConeProperty::Generators); setComputed(ConeProperty::Sublattice); - } - - + } if (Inequalities.nr_of_rows() != 0 && !conversion_done) { if (inhomogeneous) @@ -1246,6 +1373,7 @@ case Type::inequalities: case Type::inhom_inequalities: case Type::excluded_faces: + case Type::inhom_excluded_faces: Inequalities.append(it.second); break; case Type::equations: @@ -1697,6 +1825,7 @@ dual_original_generators = false; general_no_grading_denom = false; polytope_in_input = false; + rational_lattice_in_input = false; face_codim_bound = -1; keep_convex_hull_data = false; @@ -1942,7 +2071,7 @@ return BasisChange.getRank(); } -template // computation depends on OriginalMonoidGenerators +template // computation depends on InputGenerators Integer Cone::getInternalIndex() { compute(ConeProperty::OriginalMonoidGenerators); return internal_index; @@ -1982,17 +2111,17 @@ template const Matrix& Cone::getOriginalMonoidGeneratorsMatrix() { compute(ConeProperty::OriginalMonoidGenerators); - return OriginalMonoidGenerators; + return InputGenerators; } template const vector >& Cone::getOriginalMonoidGenerators() { compute(ConeProperty::OriginalMonoidGenerators); - return OriginalMonoidGenerators.get_elements(); + return InputGenerators.get_elements(); } template size_t Cone::getNrOriginalMonoidGenerators() { compute(ConeProperty::OriginalMonoidGenerators); - return OriginalMonoidGenerators.nr_of_rows(); + return InputGenerators.nr_of_rows(); } template @@ -2012,21 +2141,21 @@ } template -const Matrix& Cone::getGeneratorsMatrix() { - compute(ConeProperty::Generators); - return Generators; +const Matrix& Cone::getTriangulationGeneratorsMatrix() { + compute(ConeProperty::TriangulationGenerators); + return TriangulationGenerators; } template -const vector >& Cone::getGenerators() { - compute(ConeProperty::Generators); - return Generators.get_elements(); +const vector >& Cone::getTriangulationGenerators() { + compute(ConeProperty::TriangulationGenerators); + return TriangulationGenerators.get_elements(); } template -size_t Cone::getNrGenerators() { - compute(ConeProperty::Generators); - return Generators.nr_of_rows(); +size_t Cone::getNrTriangulationGenerators() { + compute(ConeProperty::TriangulationGenerators); + return TriangulationGenerators.nr_of_rows(); } template @@ -2062,6 +2191,22 @@ } template +const Matrix& Cone::getExtremeRaysFloatMatrix() { + compute(ConeProperty::ExtremeRaysFloat); + return ExtremeRaysFloat; +} +template +const vector >& Cone::getExtremeRaysFloat() { + compute(ConeProperty::ExtremeRaysFloat); + return ExtremeRaysFloat.get_elements(); +} +template +size_t Cone::getNrExtremeRaysFloat() { + compute(ConeProperty::ExtremeRaysFloat); + return ExtremeRaysFloat.nr_of_rows(); +} + +template const Matrix& Cone::getSuppHypsFloatMatrix() { compute(ConeProperty::SuppHypsFloat); return SuppHypsFloat; @@ -2181,15 +2326,6 @@ || quality == ConeProperty::UnimodularTriangulation) ){ throw BadInputException("Illegal parameter in getTriangulation(ConeProperty::Enum quality)"); } - if(isComputed(quality)) // we have already what we want - return Triangulation; - if( ! (isComputed(ConeProperty::LatticePointTriangulation) || isComputed(ConeProperty::AllGeneratorsTriangulation) - || isComputed(ConeProperty::UnimodularTriangulation) ) ){ // ==> none of the refined computed - compute(quality); // compute the desired one - return Triangulation; - } - // remaining case: the computed refined triangulation is not the wanted one ==> start from scratch - is_Computed.reset(ConeProperty::Triangulation); compute(quality); return Triangulation; } @@ -2265,6 +2401,19 @@ } template +vector Cone::getAxesScaling() { + if(!isComputed(ConeProperty::AxesScaling)) + throw NotComputableException("AxesScaling is not a computation goal"); + return AxesScaling; +} + +template +vector Cone::getCoveringFace() { + compute(ConeProperty::CoveringFace); + return CoveringFace; +} + +template const Matrix& Cone::getHilbertBasisMatrix() { compute(ConeProperty::HilbertBasis); return HilbertBasis; @@ -2465,6 +2614,12 @@ } template +bool Cone::isEmptySemiOpen() { + compute(ConeProperty::IsEmptySemiOpen); + return empty_semiopen; +} + +template bool Cone::isInhomogeneous() { return inhomogeneous; } @@ -2539,18 +2694,13 @@ template const AutomorphismGroup& Cone::getAutomorphismGroup(ConeProperty::Enum quality) { + if (!(quality == ConeProperty::Automorphisms || quality == ConeProperty::RationalAutomorphisms || quality == ConeProperty::AmbientAutomorphisms || quality == ConeProperty::CombinatorialAutomorphisms || quality == ConeProperty::EuclideanAutomorphisms)) { throw BadInputException("Illegal parameter in getAutomorphismGroup(ConeProperty::Enum quality)"); } compute(quality); - is_Computed.reset(ConeProperty::Automorphisms); - is_Computed.reset(ConeProperty::RationalAutomorphisms); - is_Computed.reset(ConeProperty::AmbientAutomorphisms); - is_Computed.reset(ConeProperty::CombinatorialAutomorphisms); - is_Computed.reset(ConeProperty::EuclideanAutomorphisms); - setComputed(quality); return Automs; } @@ -2570,7 +2720,7 @@ template const map& Cone::getFaceLattice() { compute(ConeProperty::FaceLattice); - return FaceLattice; + return FaceLat; } template @@ -2585,6 +2735,24 @@ return f_vector; } +template +const map& Cone::getDualFaceLattice() { + compute(ConeProperty::DualFaceLattice); + return DualFaceLat; +} + +template +const vector& Cone::getDualIncidence() { + compute(ConeProperty::DualIncidence); + return DualSuppHypInd; +} + +template +vector Cone::getDualFVector() { + compute(ConeProperty::DualFVector); + return dual_f_vector; +} + //--------------------------------------------------------------------------- template @@ -2754,10 +2922,6 @@ //--------------------------------------------------------------------------- -template -void Cone::prepare_volume_computation(ConeProperties& ToCompute) { - assert(false); -} #ifdef ENFNORMALIZ template <> @@ -2768,7 +2932,7 @@ if (!inhomogeneous && !isComputed(ConeProperty::Grading)) throw NotComputableException("Volume needs a grading in the homogeneous case"); if (getRank() != dim) - throw NotComputableException("Normaliz requires full dimension for volume"); + throw NotComputableException("Normaliz requires full dimension for volume of algebraic polytope"); vector Grad; if (inhomogeneous) Grad = Dehomogenization; @@ -2798,24 +2962,58 @@ euclidean_height = sqrt(norm); } #endif - //--------------------------------------------------------------------------- template -template void Cone::compute_full_cone(ConeProperties& ToCompute) { - -#ifdef NMZ_EXTENDED_TESTS - if(!using_GMP() && !using_renf() && test_arith_overflow_full_cone) - throw ArithmeticException(0); -#endif - - if (ToCompute.test(ConeProperty::IsPointed) && Grading.size() == 0) { - if (verbose) { - verboseOutput() << "Checking pointedness first" << endl; - } - ConeProperties Dualize; - Dualize.set(ConeProperty::SupportHyperplanes); + if (change_integer_type) { + try { + compute_full_cone_inner(ToCompute); + } catch (const ArithmeticException& e) { + if (verbose) { + verboseOutput() << e.what() << endl; + verboseOutput() << "Restarting with a bigger type." << endl; + } + change_integer_type = false; + } + } + + if (!change_integer_type) { + if (!using_GMP() && !ToCompute.test(ConeProperty::DefaultMode)) { + compute_full_cone_inner(ToCompute); + } + else { + try { + compute_full_cone_inner(ToCompute); + } catch (const ArithmeticException& e) { // the nonly reason for failure is an overflow in a degree computation + if (verbose) { // so we can relax in default mode + verboseOutput() << e.what() << endl; + verboseOutput() << "Reducing computation goals." << endl; + } + ToCompute.reset(ConeProperty::HilbertBasis); + ToCompute.reset(ConeProperty::HilbertSeries); + compute_full_cone_inner(ToCompute); + } + } + } +} + + +template +template +void Cone::compute_full_cone_inner(ConeProperties& ToCompute) { + +#ifdef NMZ_EXTENDED_TESTS + if(!using_GMP() && !using_renf() && test_arith_overflow_full_cone) + throw ArithmeticException(0); +#endif + + if (ToCompute.test(ConeProperty::IsPointed) && Grading.size() == 0) { + if (verbose) { + verboseOutput() << "Checking pointedness first" << endl; + } + ConeProperties Dualize; + Dualize.set(ConeProperty::SupportHyperplanes); Dualize.set(ConeProperty::ExtremeRays); compute(Dualize); } @@ -2828,6 +3026,9 @@ /* activate bools in FC */ + if(ToCompute.test(ConeProperty::IsEmptySemiOpen) && !isComputed(ConeProperty::IsEmptySemiOpen)) + FC.check_semiopen_empty = true; + if(ToCompute.test(ConeProperty::FullConeDynamic)){ FC.do_supphyps_dynamic=true; if(IntHullNorm.size() > 0) @@ -2863,7 +3064,7 @@ if (ToCompute.test(ConeProperty::ConeDecomposition)) { FC.do_cone_dec = true; } - if (ToCompute.test(ConeProperty::Multiplicity)) { + if (ToCompute.test(ConeProperty::Multiplicity) || (using_renf() && ToCompute.test(ConeProperty::Volume))) { FC.do_multiplicity = true; } if (ToCompute.test(ConeProperty::TriangulationDetSum)) { @@ -2875,7 +3076,7 @@ if (ToCompute.test(ConeProperty::NoSubdivision)) { FC.use_bottom_points = false; } - if (ToCompute.test(ConeProperty::Deg1Elements)) { + if (ToCompute.test(ConeProperty::Deg1Elements) && !using_renf()) { FC.do_deg1_elements = true; } if (ToCompute.test(ConeProperty::StanleyDec)) { @@ -2985,12 +3186,13 @@ FC.do_integrally_closed || FC.keep_triangulation || FC.do_integrally_closed || FC.do_cone_dec || FC.do_determinants || FC.do_triangulation_size || FC.do_deg1_elements || FC.do_default_mode; - // Do we really need the Full_Cone? + // Do we really need the Full_Cone? ALREADY CHECKED - if (!must_triangulate && !FC.do_automorphisms && isComputed(ConeProperty::SupportHyperplanes) && + /* if (!must_triangulate && !FC.do_automorphisms && isComputed(ConeProperty::SupportHyperplanes) && isComputed(ConeProperty::ExtremeRays) && !ToCompute.test(ConeProperty::Grading) && - !ToCompute.test(ConeProperty::IsPointed) && !ToCompute.test(ConeProperty::ClassGroup)) - return; + !ToCompute.test(ConeProperty::IsPointed) && !ToCompute.test(ConeProperty::ClassGroup) && + !ToCompute.test(ConeProperty::IsEmptySemiOpen) ) + return; */ // restore if usaeful if (!must_triangulate && keep_convex_hull_data && ConvHullData.SLR.equal(BasisChangePointed) && @@ -3007,11 +3209,12 @@ } catch (const NotIntegrallyClosedException&) { } setComputed(ConeProperty::Sublattice); + extract_data(FC, ToCompute); + ToCompute.reset(is_Computed); // make sure we minimize the excluded faces if requested if (ToCompute.test(ConeProperty::ExcludedFaces) || ToCompute.test(ConeProperty::SupportHyperplanes)) { - FC.prepare_inclusion_exclusion(); + FC.prepare_inclusion_exclusion(); // WHY THIS ?????? } - extract_data(FC, ToCompute); if (isComputed(ConeProperty::IsPointed) && pointed) setComputed(ConeProperty::MaximalSubspace); } catch (const NonpointedException&) { @@ -3023,137 +3226,9 @@ } FC = Full_Cone(Matrix(1)); // to kill the old FC (almost) pass_to_pointed_quotient(); - compute_full_cone(ToCompute); - } -} - -#ifdef ENFNORMALIZ -template <> -template -void Cone::compute_full_cone(ConeProperties& ToCompute) { - if (ToCompute.test(ConeProperty::IsPointed) && Grading.size() == 0) { - if (verbose) { - verboseOutput() << "Checking pointedness first" << endl; - } - ConeProperties Dualize; - Dualize.set(ConeProperty::SupportHyperplanes); - Dualize.set(ConeProperty::ExtremeRays); - compute(Dualize); - } - - Matrix FC_Gens; - - BasisChangePointed.convert_to_sublattice(FC_Gens, Generators); - Full_Cone FC(FC_Gens, !ToCompute.test(ConeProperty::ModuleGeneratorsOverOriginalMonoid)); - // !ToCompute.test(ConeProperty::ModuleGeneratorsOverOriginalMonoid) blocks make_prime in full_cone.cpp - - /* activate bools in FC */ - - if(ToCompute.test(ConeProperty::FullConeDynamic)){ - FC.do_supphyps_dynamic=true; - if(IntHullNorm.size() > 0) - BasisChangePointed.convert_to_sublattice_dual(FC.IntHullNorm,IntHullNorm); - } - - FC.verbose = verbose; - FC.renf_degree = renf_degree; - - FC.inhomogeneous = inhomogeneous; - - if (ToCompute.test(ConeProperty::Triangulation)) { - FC.keep_triangulation = true; - } - - if (ToCompute.test(ConeProperty::Multiplicity) || ToCompute.test(ConeProperty::Volume)) { - FC.do_multiplicity = true; - } - - if (ToCompute.test(ConeProperty::ConeDecomposition)) { - FC.do_cone_dec = true; - } - - if (ToCompute.test(ConeProperty::TriangulationDetSum)) { - FC.do_determinants = true; - } - if (ToCompute.test(ConeProperty::TriangulationSize)) { - FC.do_triangulation = true; - } - if (ToCompute.test(ConeProperty::KeepOrder)) { - FC.keep_order = true; - } - - /* Give extra data to FC */ - if (isComputed(ConeProperty::ExtremeRays)) { - FC.Extreme_Rays_Ind = ExtremeRaysIndicator; - FC.is_Computed.set(ConeProperty::ExtremeRays); - } - - if (inhomogeneous) { - BasisChangePointed.convert_to_sublattice_dual_no_div(FC.Truncation, Dehomogenization); - } - - if (SupportHyperplanes.nr_of_rows() != 0) { - BasisChangePointed.convert_to_sublattice_dual(FC.Support_Hyperplanes, SupportHyperplanes); - } - if (isComputed(ConeProperty::SupportHyperplanes)) { - FC.is_Computed.set(ConeProperty::SupportHyperplanes); - FC.do_all_hyperplanes = false; - } - - if (isComputed(ConeProperty::Grading)) { - BasisChangePointed.convert_to_sublattice_dual(FC.Grading, Grading); - FC.is_Computed.set(ConeProperty::Grading); - } - - if(ToCompute.test(ConeProperty::Automorphisms)){ - FC.do_automorphisms = true; - FC.quality_of_automorphisms = AutomParam::algebraic; - } - - bool must_triangulate = FC.do_h_vector || FC.do_Hilbert_basis || FC.do_multiplicity || FC.do_Stanley_dec || - FC.do_module_rank || FC.do_module_gens_intcl || FC.do_bottom_dec || FC.do_hsop || - FC.do_integrally_closed || FC.keep_triangulation || FC.do_integrally_closed || FC.do_cone_dec || - FC.do_determinants || FC.do_triangulation_size || FC.do_deg1_elements || FC.do_default_mode; - - FC.keep_convex_hull_data = keep_convex_hull_data; - - if (!must_triangulate && keep_convex_hull_data && ConvHullData.SLR.equal(BasisChangePointed) && - ConvHullData.nr_threads == omp_get_max_threads() && ConvHullData.Generators.nr_of_rows() > 0) { - FC.keep_order = true; - FC.restore_previous_vcomputation(ConvHullData, true); // true=primal - } - - /* do the computation */ - - try { - try { - FC.compute(); - } catch (const NotIntegrallyClosedException&) { - } - setComputed(ConeProperty::Sublattice); - // make sure we minimize the excluded faces if requested - - extract_data(FC, ToCompute); - if (isComputed(ConeProperty::IsPointed) && pointed) - setComputed(ConeProperty::MaximalSubspace); - } catch (const NonpointedException&) { - if(precomputed_extreme_rays) - throw BadInputException("Cone not pointed for precomputed data"); - setComputed(ConeProperty::Sublattice); - extract_data(FC, ToCompute); - if (ToCompute.test(ConeProperty::Deg1Elements) || ToCompute.test(ConeProperty::ModuleGenerators) || - ToCompute.test(ConeProperty::Volume)) - throw NotComputableException("Normaliz requires pointedness for lattice points or volume"); - - if (verbose) { - verboseOutput() << "Cone not pointed. Restarting computation." << endl; - } - FC = Full_Cone(Matrix(1)); // to kill the old FC (almost) - pass_to_pointed_quotient(); - compute_full_cone(ToCompute); + compute_full_cone_inner(ToCompute); } } -#endif //--------------------------------------------------------------------------- @@ -3295,6 +3370,34 @@ return; } +// If this function is called, either no type of automorphisms has been computed +// or the computed one is different than the one asked for +// So we can reset all of them. +template +void Cone::prepare_automorphisms() { + + is_Computed.reset(ConeProperty::Automorphisms); + is_Computed.reset(ConeProperty::RationalAutomorphisms); + is_Computed.reset(ConeProperty::AmbientAutomorphisms); + is_Computed.reset(ConeProperty::CombinatorialAutomorphisms); + is_Computed.reset(ConeProperty::EuclideanAutomorphisms); +} + +// Similarly for triangulations +// If we have the basic triangulation already, we restore it. +template +void Cone::prepare_refined_triangulation() { + + if(isComputed(ConeProperty::Triangulation)){ + Triangulation = BasicTriangulation; + TriangulationGenerators = BasicTriangulationGenerators; + } + + is_Computed.reset(ConeProperty::AllGeneratorsTriangulation); + is_Computed.reset(ConeProperty::UnimodularTriangulation); + is_Computed.reset(ConeProperty::LatticePointTriangulation); +} + template void Cone::handle_dynamic(const ConeProperties& ToCompute) { if (ToCompute.test(ConeProperty::Dynamic)) @@ -3358,7 +3461,7 @@ template ConeProperties Cone::compute(ConeProperties ToCompute) { - // cout << "AAAA " << ToCompute << endl; + // cout << "AAAA " << ToCompute << " IIIII " << inhomogeneous << endl; size_t nr_computed_at_start = is_Computed.count(); @@ -3385,6 +3488,15 @@ else setComputed(ConeProperty::Grading); } + + // we don't want a different order of the generators if an order sensitive goal + // has already been computed + if( (isComputed(ConeProperty::Triangulation) || isComputed(ConeProperty::StanleyDec)) + && (ToCompute.test(ConeProperty::Triangulation) || ToCompute.test(ConeProperty::StanleyDec)) ){ + Generators = BasicTriangulationGenerators; + ToCompute.set(ConeProperty::KeepOrder); + is_Computed.reset(ConeProperty::ExtremeRays); // we may have lost ExtremeRaysIndicator + } if(ToCompute.test(ConeProperty::NoGradingDenom)){ GradingDenom = 1; @@ -3421,6 +3533,9 @@ if (!isComputed(ConeProperty::OriginalMonoidGenerators) && !dual_original_generators) throw BadInputException("KeepOrder can only be set if the cone or the dual has original generators"); } + + if(ToCompute.test(ConeProperty::IsEmptySemiOpen) && ExcludedFaces.nr_of_rows() == 0) + throw BadInputException("IsEmptySemiOpen can only be computed with excluded faces"); INTERRUPT_COMPUTATION_BY_EXCEPTION @@ -3448,11 +3563,19 @@ ToCompute.set(ConeProperty::NakedDual); } // to control the computation of rational solutions in the inhomogeneous case + + if(using_renf()) + ToCompute.check_Q_permissible(false); // before implications! ToCompute.check_conflicting_variants(); - ToCompute.set_preconditions(inhomogeneous, using_renf()); + ToCompute.set_preconditions(inhomogeneous, using_renf()); + + if(using_renf()) + ToCompute.check_Q_permissible(true); // after implications! + if(ToCompute.test(ConeProperty::Sublattice) && !isComputed(ConeProperty::Generators)) ToCompute.set(ConeProperty::ExtremeRays, ConeProperty::SupportHyperplanes); + ToCompute.check_sanity(inhomogeneous); if (inhomogeneous) { if (Grading.size() == 0) { @@ -3463,6 +3586,12 @@ } } + prepare_refined_triangulation(); + prepare_automorphisms(); + + // ToCompute.set_default_goals(inhomogeneous,using_renf()); + ToCompute.check_sanity(inhomogeneous); + ToCompute.reset(is_Computed); @@ -3481,6 +3610,12 @@ } } + // to protect against intermediate computaions of generators in interactive use + if (ToCompute.test(ConeProperty::ModuleGeneratorsOverOriginalMonoid) || ToCompute.test(ConeProperty::IsIntegrallyClosed) ) { + Generators = InputGenerators; + is_Computed.reset(ConeProperty::ExtremeRays); + } + /* if(!inhomogeneous && ToCompute.test(ConeProperty::NoGradingDenom) && Grading.size()==0) throw BadInputException("Options require an explicit grading."); */ @@ -3492,7 +3627,18 @@ return ConeProperties(); // HSOP if HSOP is applied to an already computed Hilbert series } // Alternatively one could do complete_HilbertSeries_comp(ToCompute) // at the very beginning of this function - + + /*if(ToCompute.test(ConeProperty::IsEmptySemiOpen) && !isComputed(ConeProperty::IsEmptySemiOpen)){ + compute_generators(ToCompute); + ConeProperties ToComputeFirst; + ToComputeFirst.set(ConeProperty::Generators); + ToComputeFirst.set(ConeProperty::SupportHyperplanes); + ToComputeFirst.set(ConeProperty::ExtremeRays); + ToComputeFirst.set(ConeProperty::IsEmptySemiOpen); + compute_full_cone(ToComputeFirst); + ToCompute.reset(is_Computed); + }*/ + check_integrally_closed(ToCompute); // check cheap necessary conditions try_multiplicity_of_para(ToCompute); @@ -3516,6 +3662,11 @@ compute_projection(ToCompute); INTERRUPT_COMPUTATION_BY_EXCEPTION + +#ifdef ENFNORMALIZ + if(using_renf()) + prepare_volume_computation(ToCompute); +#endif treat_polytope_as_being_hom_defined(ToCompute); // if necessary @@ -3528,7 +3679,8 @@ return ConeProperties(); } - try_approximation_or_projection(ToCompute); + if(!using_renf()) + try_approximation_or_projection(ToCompute); ToCompute.reset(is_Computed); if (ToCompute.goals().none()) { @@ -3560,15 +3712,16 @@ // cout << "SSSS " << ToCompute.full_cone_goals() << endl; - if (ToCompute.full_cone_goals().any()) { + if (ToCompute.full_cone_goals(using_renf()).any()) { compute_generators(ToCompute); if (!isComputed(ConeProperty::Generators)) { throw FatalException("Could not get Generators."); } } + ToCompute.reset(is_Computed); - /* cout << "TTTT " << ToCompute.full_cone_goals() << endl; - cout << "TTTT IIIII " << ToCompute.full_cone_goals() << endl;*/ + /* cout << "TTTT " << ToCompute.full_cone_goals(using_renf()) << endl;*/ + /* cout << "TTTT IIIII " << ToCompute.full_cone_goals() << endl;*/ if (rees_primary && (ToCompute.test(ConeProperty::ReesPrimaryMultiplicity) || ToCompute.test(ConeProperty::Multiplicity) || ToCompute.test(ConeProperty::HilbertSeries) || ToCompute.test(ConeProperty::DefaultMode))) { @@ -3583,50 +3736,27 @@ return ConeProperties(); } - try_Hilbert_Series_from_lattice_points(ToCompute); + if(!using_renf()) + try_Hilbert_Series_from_lattice_points(ToCompute); ToCompute.reset(is_Computed); complete_HilbertSeries_comp(ToCompute); complete_sublattice_comp(ToCompute); if (ToCompute.goals().none()) { return ConeProperties(); } + + // the actual computation + + if (isComputed(ConeProperty::SupportHyperplanes) && using_renf()) + ToCompute.reset(ConeProperty::DefaultMode); - /* cout << "UUUU " << ToCompute.full_cone_goals() << endl; - cout << "UUUU All " << ToCompute << endl; + /* cout << "UUUU " << ToCompute.full_cone_goals(using_renf()) << endl;*/ + /* cout << "UUUU All " << ToCompute << endl; cout << "UUUU IIIII " << ToCompute.full_cone_goals() << endl;*/ // the computation of the full cone - if (ToCompute.full_cone_goals().any()) { - if (change_integer_type) { - try { - compute_full_cone(ToCompute); - } catch (const ArithmeticException& e) { - if (verbose) { - verboseOutput() << e.what() << endl; - verboseOutput() << "Restarting with a bigger type." << endl; - } - change_integer_type = false; - } - } - - if (!change_integer_type) { - if (!using_GMP() && !ToCompute.test(ConeProperty::DefaultMode)) { - compute_full_cone(ToCompute); - } - else { - try { - compute_full_cone(ToCompute); - } catch (const ArithmeticException& e) { // the nonly reason for failure is an overflow in a degree computation - if (verbose) { // so we can relax in default mode - verboseOutput() << e.what() << endl; - verboseOutput() << "Reducing computation goals." << endl; - } - ToCompute.reset(ConeProperty::HilbertBasis); - ToCompute.reset(ConeProperty::HilbertSeries); - compute_full_cone(ToCompute); - } - } - } + if (ToCompute.full_cone_goals(using_renf()).any()) { + compute_full_cone(ToCompute); } // cout << " VVVV " << ToCompute.full_cone_goals() << endl; @@ -3635,6 +3765,10 @@ find_witness(ToCompute); } + if(using_renf()) + compute_lattice_points_in_polytope(ToCompute); + ToCompute.reset(is_Computed); // already computed + if(precomputed_extreme_rays && inhomogeneous) compute_affine_dim_and_recession_rank(); @@ -3660,6 +3794,7 @@ compute_vertices_float(ToCompute); compute_supp_hyps_float(ToCompute); + compute_extreme_rays_float(ToCompute); if (ToCompute.test(ConeProperty::WeightedEhrhartSeries)) compute_weighted_Ehrhart(ToCompute); @@ -3692,113 +3827,8 @@ return ToCompute; } -#ifdef ENFNORMALIZ -template <> -ConeProperties Cone::compute(ConeProperties ToCompute) { - - handle_dynamic(ToCompute); - -#ifndef NMZ_NAUTY - if ( ToCompute.test(ConeProperty::Automorphisms) || ToCompute.test(ConeProperty::RationalAutomorphisms) || - ToCompute.test(ConeProperty::AmbientAutomorphisms) || ToCompute.test(ConeProperty::CombinatorialAutomorphisms) || - ToCompute.test(ConeProperty::EuclideanAutomorphisms)) - throw BadInputException("automorphism groups only computable with nauty"); -#endif - - ToCompute.reset(is_Computed); - if (ToCompute.none()) { - return ConeProperties(); - } - set_parallelization(); - if (ToCompute.test(ConeProperty::GradingIsPositive)) { - if (Grading.size() == 0) - throw BadInputException("No grading declared that could be positive."); - else - setComputed(ConeProperty::Grading); - } - - if(ToCompute.test(ConeProperty::NoGradingDenom)){ - GradingDenom = 1; - setComputed(ConeProperty::GradingDenom); - } - - change_integer_type = false; - - if (BasisMaxSubspace.nr_of_rows() > 0 && !isComputed(ConeProperty::MaximalSubspace)) { - BasisMaxSubspace = Matrix(0, dim); - compute(ConeProperty::MaximalSubspace); - } - - ToCompute.check_Q_permissible(false); // before implications! - ToCompute.reset(is_Computed); - - ToCompute.set_preconditions(inhomogeneous, using_renf()); - - ToCompute.check_Q_permissible(true); // after implications! - - // ToCompute.prepare_compute_options(inhomogeneous, using_renf()); - - // ToCompute.set_default_goals(inhomogeneous,using_renf()); - ToCompute.check_sanity(inhomogeneous); - - /* preparation: get generators if necessary */ - compute_generators(ToCompute); - - if (!isComputed(ConeProperty::Generators)) { - throw FatalException("Could not get Generators."); - } - - ToCompute.reset(is_Computed); // already computed - if (ToCompute.goals().none()) { - return ConeProperties(); - } - - prepare_volume_computation(ToCompute); - - // the actual computation - - if (isComputed(ConeProperty::SupportHyperplanes)) - ToCompute.reset(ConeProperty::DefaultMode); - - if (ToCompute.full_cone_goals().any() || ToCompute.test(ConeProperty::Volume)) { - compute_full_cone(ToCompute); - } - compute_projection(ToCompute); - - if(precomputed_extreme_rays && inhomogeneous) - compute_affine_dim_and_recession_rank(); - - compute_lattice_points_in_polytope(ToCompute); - - make_face_lattice(ToCompute); - - compute_combinatorial_automorphisms(ToCompute); - compute_euclidean_automorphisms(ToCompute); - - if (ToCompute.test(ConeProperty::IntegerHull)) { - compute_integer_hull(); - } - - compute_refined_triangulation(ToCompute); - - complete_sublattice_comp(ToCompute); - - /* check if everything is computed */ - ToCompute.reset(is_Computed); // remove what is now computed - - compute_vertices_float(ToCompute); - compute_supp_hyps_float(ToCompute); - ToCompute.reset(is_Computed); - - if (!ToCompute.test(ConeProperty::DefaultMode) && ToCompute.goals().any()) { - throw NotComputableException(ToCompute.goals()); - } - ToCompute.reset_compute_options(); - return ToCompute; -} -#endif //--------------------------------------------------------------------------- @@ -3908,6 +3938,8 @@ Dual_Cone.dualize_cone(); } catch (const NonpointedException&) { }; // we don't mind if the dual cone is not pointed + + // cout << "GGGGG " << Dual_Cone.is_Computed << endl; if (Dual_Cone.isComputed(ConeProperty::SupportHyperplanes)) { if (keep_convex_hull_data) { @@ -3918,7 +3950,6 @@ // Dual_Cone.getSupportHyperplanes()); extract_supphyps(Dual_Cone, Generators, false); // false means: no dualization setComputed(ConeProperty::Generators); - check_gens_vs_reference(); // get minmal set of support_hyperplanes if possible if (Dual_Cone.isComputed(ConeProperty::ExtremeRays)) { @@ -4225,24 +4256,6 @@ } //--------------------------------------------------------------------------- -template -void Cone::check_gens_vs_reference() { - if (ReferenceGenerators.nr_of_rows() > 0) { - if (!Generators.equal(ReferenceGenerators)) { - Triangulation.clear(); - StanleyDec.clear(); - is_Computed.reset(ConeProperty::Triangulation); - is_Computed.reset(ConeProperty::StanleyDec); - is_Computed.reset(ConeProperty::TriangulationSize); - is_Computed.reset(ConeProperty::TriangulationDetSum); - is_Computed.reset(ConeProperty::IsTriangulationPartial); - is_Computed.reset(ConeProperty::IsTriangulationNested); - is_Computed.reset(ConeProperty::ConeDecomposition); - } - } -} - -//--------------------------------------------------------------------------- // This function creates convex hull data from precomputed support hyperplanes and extreme rays // so that these can be used in interactive mode for the modification of the originally constructed cone @@ -4422,10 +4435,14 @@ verboseOutput() << "transforming data..." << flush; } + // It is important to extract the generators from the full cone. + // The generators extracted from the full cone are the "purified" versions of + // the generators with which the full cone was constructed. Since the order + // can change, ExtremeRays is reset. Will be set again below. if (FC.isComputed(ConeProperty::Generators)) { BasisChangePointed.convert_from_sublattice(Generators, FC.getGenerators()); setComputed(ConeProperty::Generators); - check_gens_vs_reference(); + is_Computed.reset(ConeProperty::ExtremeRays); } if (keep_convex_hull_data) { @@ -4438,6 +4455,16 @@ setComputed(ConeProperty::MaximalSubspace); setComputed(ConeProperty::IsPointed); } + + if(FC.isComputed(ConeProperty::IsEmptySemiOpen)){ + empty_semiopen = false; + if(FC.index_covering_face < ExcludedFaces.nr_of_rows()){ + empty_semiopen = true; + CoveringFace = ExcludedFaces[FC.index_covering_face]; + setComputed(ConeProperty::CoveringFace); + } + setComputed(ConeProperty::IsEmptySemiOpen); + } Integer local_grading_denom; if(is_Computed.goals_using_grading(inhomogeneous).any()) // in this case we do not pull @@ -4478,10 +4505,13 @@ ModuleGeneratorsOverOriginalMonoid.sort_by_weights(WeightsGrad, GradAbs); setComputed(ConeProperty::ModuleGeneratorsOverOriginalMonoid); } - - if (FC.isComputed(ConeProperty::ExtremeRays)) { + + // Important: must be done after ModuleGeneratorsOverOriginalMonoid because in this case + // generators may not be primitive or can contain duplicates genereating the same ray + if (FC.isComputed(ConeProperty::ExtremeRays) ) { set_extreme_rays(FC.getExtremeRays()); } + if (FC.isComputed(ConeProperty::SupportHyperplanes)) { /* if (inhomogeneous) { // remove irrelevant support hyperplane 0 ... 0 1 @@ -4517,6 +4547,14 @@ is_Computed.reset(ConeProperty::LatticePointTriangulation); // must reset these friends is_Computed.reset(ConeProperty::AllGeneratorsTriangulation); // when the basic triangulation is_Computed.reset(ConeProperty::UnimodularTriangulation); // is recomputed + + // It is important to keep the TriangulationGenerators in interactive mode for two reasons: + // (i) relaiable connection to the Triangulation, + // (ii) potential reordering of generators between retrieval of triangulation + // and generators. + BasisChangePointed.convert_from_sublattice(TriangulationGenerators, FC.getGenerators()); + BasicTriangulationGenerators = TriangulationGenerators; + setComputed(ConeProperty::TriangulationGenerators); size_t tri_size = FC.Triangulation.size(); FC.Triangulation.sort(compareKeys); // necessary to make triangulation unique @@ -4527,8 +4565,6 @@ for (size_t i = 0; i < tri_size; ++i) { simp = FC.Triangulation.front(); Triangulation[i].first.swap(simp.key); - /* sort(Triangulation[i].first.begin(), Triangulation[i].first.end()); -- no longer allowed here because of - * ConeDecomposition. Done in full_cone.cpp, transfer_triangulation_to top */ if (FC.isComputed(ConeProperty::TriangulationDetSum)) convert(Triangulation[i].second, simp.vol); else @@ -4539,19 +4575,18 @@ } if (FC.isComputed(ConeProperty::ConeDecomposition)) setComputed(ConeProperty::ConeDecomposition); - setComputed(ConeProperty::Triangulation); + setComputed(ConeProperty::Triangulation); + BasicTriangulation = Triangulation; } if (FC.isComputed(ConeProperty::StanleyDec)) { StanleyDec.clear(); StanleyDec.splice(StanleyDec.begin(), FC.StanleyDec); setComputed(ConeProperty::StanleyDec); + BasisChangePointed.convert_from_sublattice(TriangulationGenerators, FC.getGenerators()); + setComputed(ConeProperty::TriangulationGenerators); } - if (isComputed(ConeProperty::Triangulation) || isComputed(ConeProperty::TriangulationSize) || - isComputed(ConeProperty::TriangulationDetSum) || isComputed(ConeProperty::StanleyDec)) - ReferenceGenerators = Generators; - if (FC.isComputed(ConeProperty::InclusionExclusionData)) { InExData.clear(); InExData.reserve(FC.InExCollect.size()); @@ -4698,43 +4733,8 @@ } if (FC.isComputed(ConeProperty::Automorphisms)) { - Automs.order = FC.Automs.order; - Automs.Qualities = FC.Automs.Qualities; - - vector SuppHypsKey, ExtRaysKey, VerticesKey, GensKey; - - Automs.GenPerms = extract_permutations(FC.Automs.GenPerms, FC.Automs.GensRef, ExtremeRays, true, GensKey); - if (inhomogeneous) { - Automs.ExtRaysPerms = - extract_permutations(FC.Automs.GenPerms, FC.Automs.GensRef, ExtremeRaysRecCone, true, ExtRaysKey); - Automs.VerticesPerms = - extract_permutations(FC.Automs.GenPerms, FC.Automs.GensRef, VerticesOfPolyhedron, true, VerticesKey); - } - else { - Automs.ExtRaysPerms = Automs.GenPerms; - ExtRaysKey = GensKey; - } - - Automs.LinFormPerms = - extract_permutations(FC.Automs.LinFormPerms, FC.Automs.LinFormsRef, SupportHyperplanes, false, SuppHypsKey); - Automs.SuppHypsPerms = Automs.LinFormPerms; - - Automs.GenOrbits = extract_subsets(FC.Automs.GenOrbits, FC.Automs.GensRef.nr_of_rows(), GensKey); - sort_individual_vectors(Automs.GenOrbits); - if (inhomogeneous) { - Automs.VerticesOrbits = extract_subsets(FC.Automs.GenOrbits, FC.Automs.GensRef.nr_of_rows(), VerticesKey); - sort_individual_vectors(Automs.VerticesOrbits); - - Automs.ExtRaysOrbits = extract_subsets(FC.Automs.GenOrbits, FC.Automs.GensRef.nr_of_rows(), ExtRaysKey); - sort_individual_vectors(Automs.ExtRaysOrbits); - } - else { - Automs.ExtRaysOrbits = Automs.GenOrbits; - } - - Automs.LinFormOrbits = extract_subsets(FC.Automs.LinFormOrbits, FC.Automs.LinFormsRef.nr_of_rows(), SuppHypsKey); - sort_individual_vectors(Automs.LinFormOrbits); - Automs.SuppHypsOrbits = Automs.LinFormOrbits; + + extract_automorphisms(FC.Automs, true); // true = must transform if (ToCompute.test(ConeProperty::Automorphisms)) setComputed(ConeProperty::Automorphisms); @@ -4810,7 +4810,8 @@ Matrix& FC_Vectors, const Matrix& ConeVectors, bool primal, - vector& Key) { + vector& Key, + const bool must_transform) { // Key has the same meaning as in extract_subsets, // but is computed by searching the properly transformed vectors of ConeVectors in FC_Vectors: ConeVector[i] = // FC_Vector[Key[i]] It is assumed that each permutation in FC_Permutations can be restricted to Image(Key) The induced @@ -4826,12 +4827,27 @@ VectorsRef[FC_Vectors[i]] = i; } Key.resize(ConeVectors.nr_of_rows()); + + /*cout << "--------------" << endl; + FC_Vectors.pretty_print(cout); + cout << "--------------" << endl; + ConeVectors.pretty_print(cout); + cout << "=============" << endl;*/ + for (size_t i = 0; i < ConeVectors.nr_of_rows(); ++i) { vector search; - if (primal) - BasisChangePointed.convert_to_sublattice(search, ConeVectors[i]); - else - BasisChangePointed.convert_to_sublattice_dual(search, ConeVectors[i]); + if(must_transform){ + if (primal) + BasisChangePointed.convert_to_sublattice(search, ConeVectors[i]); + else + BasisChangePointed.convert_to_sublattice_dual(search, ConeVectors[i]); + } + else{ + if (primal) + convert(search,ConeVectors[i]); + else + convert(search,ConeVectors[i]); + } if (using_renf()) { v_standardize(search); } @@ -4922,7 +4938,7 @@ if (!isComputed(ConeProperty::HilbertBasis)) return; - if (HilbertBasis.nr_of_rows() > OriginalMonoidGenerators.nr_of_rows()) { + if (HilbertBasis.nr_of_rows() > InputGenerators.nr_of_rows()) { integrally_closed = false; setComputed(ConeProperty::IsIntegrallyClosed); if(!ToCompute.test(ConeProperty::WitnessNotIntegrallyClosed)) @@ -4943,14 +4959,14 @@ // we must collect all original generators that lie in the maximal subspace - for (size_t i = 0; i < OriginalMonoidGenerators.nr_of_rows(); ++i) { + for (size_t i = 0; i < InputGenerators.nr_of_rows(); ++i) { size_t j; for (j = 0; j < SupportHyperplanes.nr_of_rows(); ++j) { - if (v_scalar_product(OriginalMonoidGenerators[i], SupportHyperplanes[j]) != 0) + if (v_scalar_product(InputGenerators[i], SupportHyperplanes[j]) != 0) break; } if (j == SupportHyperplanes.nr_of_rows()) - origens_in_subspace.append(OriginalMonoidGenerators[i]); + origens_in_subspace.append(InputGenerators[i]); } Matrix M = Sub.to_sublattice(origens_in_subspace); unit_group_index = M.full_rank_index(); @@ -4977,10 +4993,10 @@ Matrix gens_quot; Matrix hilb_quot; if (!pointed) { - gens_quot = BasisChangePointed.to_sublattice(OriginalMonoidGenerators); + gens_quot = BasisChangePointed.to_sublattice(InputGenerators); hilb_quot = BasisChangePointed.to_sublattice(HilbertBasis); } - Matrix& gens = pointed ? OriginalMonoidGenerators : gens_quot; + Matrix& gens = pointed ? InputGenerators : gens_quot; Matrix& hilb = pointed ? HilbertBasis : hilb_quot; integrally_closed = true; @@ -5009,7 +5025,7 @@ if(using_renf()) return; if (!isComputed(ConeProperty::OriginalMonoidGenerators)) { - OriginalMonoidGenerators = Input; + InputGenerators = Input; setComputed(ConeProperty::OriginalMonoidGenerators); } // Generators = Input; @@ -5023,7 +5039,7 @@ template void Cone::set_extreme_rays(const vector& ext) { - + assert(ext.size() == Generators.nr_of_rows()); ExtremeRays = Generators.submatrix(ext); // extreme rays of the homogenized cone ExtremeRaysIndicator = ext; @@ -5107,6 +5123,30 @@ //--------------------------------------------------------------------------- template +void Cone::compute_extreme_rays_float(ConeProperties& ToCompute) { + if (!ToCompute.test(ConeProperty::ExtremeRaysFloat) || isComputed(ConeProperty::ExtremeRaysFloat)) + return; + if (!isComputed(ConeProperty::ExtremeRays)) + throw NotComputableException("ExtremeRaysFloat not computable without extreme rays"); + if (inhomogeneous) + convert(ExtremeRaysFloat, ExtremeRaysRecCone); + else + convert(ExtremeRaysFloat, ExtremeRays); + vector norm; + if (!inhomogeneous){ + if(isComputed(ConeProperty::Grading)){ + convert(norm, Grading); + nmz_float GD = 1.0 / convertTo(GradingDenom); + v_scalar_multiplication(norm, GD); + } + } + ExtremeRaysFloat.standardize_rows(norm); + setComputed(ConeProperty::ExtremeRaysFloat); +} + +//--------------------------------------------------------------------------- + +template void Cone::compute_supp_hyps_float(ConeProperties& ToCompute) { if (!ToCompute.test(ConeProperty::SuppHypsFloat) || isComputed(ConeProperty::SuppHypsFloat)) return; @@ -5156,7 +5196,7 @@ HSeries.set_expansion_degree(save_expansion_degree); long long nlp = 0; if (expansion.size() > 1) { - nlp = convertTo(expansion[1]); + nlp = convertToLongLong(expansion[1]); } number_lattice_points = nlp; setComputed(ConeProperty::NumberLatticePoints); @@ -5268,7 +5308,7 @@ face_codim_bound = bound; is_Computed.reset(ConeProperty::FaceLattice); is_Computed.reset(ConeProperty::FVector); - FaceLattice.clear(); + FaceLat.clear(); f_vector.clear(); } @@ -5285,7 +5325,7 @@ //--------------------------------------------------------------------------- template void Cone::try_symmetrization(ConeProperties& ToCompute) { - if (dim <= 1) + if (dim <= 1 || using_renf()) return; if (ToCompute.test(ConeProperty::NoSymmetrization) || ToCompute.test(ConeProperty::Descent)) @@ -5481,10 +5521,10 @@ template void Cone::compute_integral(ConeProperties& ToCompute) { - if (BasisMaxSubspace.nr_of_rows() > 0) - throw NotComputableException("Integral not computable for polyhedra containing an affine space of dim > 0"); if (isComputed(ConeProperty::Integral) || !ToCompute.test(ConeProperty::Integral)) return; + if (BasisMaxSubspace.nr_of_rows() > 0) + throw NotComputableException("Integral not computable for polyhedra containing an affine space of dim > 0"); if (IntData.getPolynomial() == "") throw BadInputException("Polynomial weight missing"); #ifdef NMZ_COCOA @@ -5504,6 +5544,8 @@ void Cone::compute_virt_mult(ConeProperties& ToCompute) { if (isComputed(ConeProperty::VirtualMultiplicity) || !ToCompute.test(ConeProperty::VirtualMultiplicity)) return; + if (BasisMaxSubspace.nr_of_rows() > 0) + throw NotComputableException("Virtual multiplicity not computable for polyhedra containing an affine space of dim > 0"); if (IntData.getPolynomial() == "") throw BadInputException("Polynomial weight missing"); #ifdef NMZ_COCOA @@ -5519,6 +5561,8 @@ void Cone::compute_weighted_Ehrhart(ConeProperties& ToCompute) { if (isComputed(ConeProperty::WeightedEhrhartSeries) || !ToCompute.test(ConeProperty::WeightedEhrhartSeries)) return; + if (BasisMaxSubspace.nr_of_rows() > 0) + throw NotComputableException("Weighted Ehrhart series not computable for polyhedra containing an affine space of dim > 0"); if (IntData.getPolynomial() == "") throw BadInputException("Polynomial weight missing"); /* if(get_rank_internal()==0) @@ -6257,12 +6301,13 @@ if (BasisMaxSubspace.nr_of_rows() > 0) throw NotComputableException("Volume not computable for polyhedra containing an affine space of dim > 0"); volume = multiplicity; + setComputed(ConeProperty::Volume); euclidean_volume = mpq_to_nmz_float(volume) * euclidean_corr_factor(); setComputed(ConeProperty::EuclideanVolume); - setComputed(ConeProperty::Volume); return; } + /* compute(ConeProperty::Generators); compute(ConeProperty::AffineDim); @@ -6310,7 +6355,7 @@ euclidean_volume = VolCone.getEuclideanVolume(); setComputed(ConeProperty::Volume); setComputed(ConeProperty::EuclideanVolume); - return; + return; */ } //--------------------------------------------------------------------------- @@ -6531,7 +6576,7 @@ template void Cone::try_multiplicity_by_descent(ConeProperties& ToCompute) { - if(inhomogeneous) // in this case multiplicity defined algebraically, not as the volume of a polytope + if(inhomogeneous || using_renf()) // in this case multiplicity defined algebraically, not as the volume of a polytope return; if (!ToCompute.test(ConeProperty::Multiplicity) || ToCompute.test(ConeProperty::NoDescent) || @@ -6562,6 +6607,12 @@ try_multiplicity_of_para(ToCompute); // we try this first, even if Descent is set if (isComputed(ConeProperty::Multiplicity)) return; + + if(BasisChangePointed.getRank() == 0){ + multiplicity = 1; + setComputed(ConeProperty::Multiplicity); + return; + } if (verbose) verboseOutput() << "Multiplicity by descent in the face lattice" << endl; @@ -6630,18 +6681,11 @@ verboseOutput() << "Multiplicity by descent done" << endl; } -#ifdef ENFNORMALIZ -template <> -void Cone::try_multiplicity_by_descent(ConeProperties& ToCompute) { - assert(false); -} -#endif - //--------------------------------------------------------------------------- template void Cone::try_multiplicity_of_para(ConeProperties& ToCompute) { - if (((!inhomogeneous && !ToCompute.test(ConeProperty::Multiplicity)) || + if (( using_renf() || (!inhomogeneous && !ToCompute.test(ConeProperty::Multiplicity)) || (inhomogeneous && !ToCompute.test(ConeProperty::Volume))) || !check_parallelotope()) return; @@ -6738,150 +6782,225 @@ template void Cone::treat_polytope_as_being_hom_defined(ConeProperties ToCompute) { - if (!inhomogeneous) + + if(!inhomogeneous) + return; + + if (using_renf()) return; - if (!ToCompute.test(ConeProperty::EhrhartSeries) && !ToCompute.test(ConeProperty::Triangulation) && - !ToCompute.test(ConeProperty::ConeDecomposition) && !ToCompute.test(ConeProperty::StanleyDec)) + if (ToCompute.intersection_with(treated_as_hom_props()).none()) return; // homogeneous treatment not necessary + + ConeProperties ToComputeFirst; + ToComputeFirst.set(ConeProperty::Generators); + ToComputeFirst.set(ConeProperty::SupportHyperplanes); + ToComputeFirst.set(ConeProperty::ExtremeRays); - compute(ConeProperty::Generators, ConeProperty::AffineDim); + compute(ToComputeFirst); ToCompute.reset(is_Computed); - if (affine_dim == -1 && Generators.nr_of_rows() > 0) { - throw NotComputableException( - "Ehrhart series, triangulation, cone decomposition, Stanley decomposition not computable for empty polytope with " - "non-subspace recession cone."); + bool empty_polytope = true; + for (size_t i = 0; i < Generators.nr_of_rows(); ++i){ + Integer test = v_scalar_product(Dehomogenization, Generators[i]); + if (test <= 0) + throw NotComputableException("At least one goal not computable for unbounded polyhedra."); + if(test > 0) + empty_polytope = false; } - for (size_t i = 0; i < Generators.nr_of_rows(); ++i) - if (v_scalar_product(Dehomogenization, Generators[i]) <= 0) - throw NotComputableException( - "Ehrhart series, triangulation, cone decomposition, Stanley decomposition not computable for unbounded " - "polyhedra."); - - // swap(VerticesOfPolyhedron,ExtremeRays); - - vector SaveGrading; - swap(Grading, SaveGrading); - bool save_grad_computed = isComputed(ConeProperty::Grading); - Integer SaveDenom = GradingDenom; - bool save_denom_computed = isComputed(ConeProperty::GradingDenom); - - bool saveFaceLattice = ToCompute.test(ConeProperty::FaceLattice); // better to do this in the - bool saveFVector = ToCompute.test(ConeProperty::FVector); // original inhomogeneous settimg - ToCompute.reset(ConeProperty::FaceLattice); - ToCompute.reset(ConeProperty::FVector); - - bool save_Hilbert_series_to_comp = - ToCompute.test(ConeProperty::HilbertSeries); // on the homogenous cone EhrhartSeries is used - // bool save_Explicit_Hilbert_series_to_comp=ToCompute.test(ConeProperty::ExplicitHilbertSeries); - bool save_Hilbert_series_is_comp = isComputed(ConeProperty::HilbertSeries); - // bool save_Explicit_Hilbert_series_is_comp=isComputed(ConeProperty::ExplicitHilbertSeries); - ToCompute.reset(ConeProperty::HilbertSeries); - HilbertSeries SaveHSeries; - swap(HSeries, SaveHSeries); - - mpq_class save_mult = multiplicity; - bool save_Multiplicity_is_comp = isComputed(ConeProperty::Multiplicity); - bool save_Multiplicity_to_comp = ToCompute.test(ConeProperty::Multiplicity); - - assert(isComputed(ConeProperty::Dehomogenization)); - vector SaveDehomogenization; - swap(Dehomogenization, SaveDehomogenization); - bool save_dehom_computed = isComputed(ConeProperty::Dehomogenization); - - bool save_hilb_bas = ToCompute.test(ConeProperty::HilbertBasis); - - bool save_module_rank = ToCompute.test(ConeProperty::ModuleRank); - - ToCompute.reset(ConeProperty::VerticesOfPolyhedron); // - ToCompute.reset(ConeProperty::ModuleRank); // - ToCompute.reset(ConeProperty::RecessionRank); // these 5 will be computed below - // ToCompute.reset(ConeProperty::AffineDim); // <--------- already done - ToCompute.reset(ConeProperty::VerticesOfPolyhedron); // + if (empty_polytope && Generators.nr_of_rows() > 0) { + throw NotComputableException( + "At least obe goal not computable for empty polytope with non-subspace recession cone."); + } - bool save_mod_gen_over_ori = ToCompute.test(ConeProperty::ModuleGeneratorsOverOriginalMonoid); - ToCompute.reset(ConeProperty::ModuleGeneratorsOverOriginalMonoid); + if(empty_polytope){ + affine_dim = -1; + setComputed(ConeProperty::AffineDim); + volume = 0; + euclidean_volume = 0; + setComputed(ConeProperty::Volume); + setComputed(ConeProperty::EuclideanVolume); + ToCompute.reset(is_Computed); + } - inhomogeneous = false; - Grading = SaveDehomogenization; - setComputed(ConeProperty::Grading); - if (save_hilb_bas || save_module_rank || save_mod_gen_over_ori) - ToCompute.set(ConeProperty::Deg1Elements); - ToCompute.reset(ConeProperty::HilbertBasis); - - compute(ToCompute); // <--------------------------------------------------- Here we compute - // cout << "IS "<< is_Computed << endl; - - VerticesOfPolyhedron = ExtremeRays; - ExtremeRaysRecCone.resize(0, dim); // in the homogeneous case ExtremeRays=ExtremeEaysRecCone - setComputed(ConeProperty::VerticesOfPolyhedron); + Cone Hom(*this); // make a copy and make it homogeneous + Hom.Grading = Dehomogenization; + Hom.setComputed(ConeProperty::Grading); + Hom.Dehomogenization.resize(0); + Hom.inhomogeneous = false; + ConeProperties HomToCompute = ToCompute; + HomToCompute.reset(ConeProperty::FaceLattice); // better to do this in the + HomToCompute.reset(ConeProperty::FVector); // original inhomogeneous settimg + HomToCompute.reset(ConeProperty::Incidence); // + + HomToCompute.reset(ConeProperty::VerticesOfPolyhedron); // + HomToCompute.reset(ConeProperty::ModuleRank); // + HomToCompute.reset(ConeProperty::RecessionRank); // these 6 will be computed below + HomToCompute.reset(ConeProperty::AffineDim); // // + HomToCompute.reset(ConeProperty::VerticesOfPolyhedron); // + HomToCompute.reset(ConeProperty::ModuleGenerators); // + HomToCompute.reset(ConeProperty::ModuleGeneratorsOverOriginalMonoid); // + + ToCompute.reset(ConeProperty::HilbertBasis); // we definitely don't want this + + if (ToCompute.test(ConeProperty::HilbertBasis) || ToCompute.test(ConeProperty::ModuleRank) + || ToCompute.test(ConeProperty::ModuleGeneratorsOverOriginalMonoid) ) + HomToCompute.set(ConeProperty::LatticePoints); // ==> NoGradingDenom - is_Computed.reset(ConeProperty::IsDeg1ExtremeRays); // makes no sense in the inhomogeneous case - deg1_extreme_rays = false; + Hom.compute(HomToCompute); // <----------------- Here we compute - compute(ConeProperty::Sublattice); + /* compute(ConeProperty::Sublattice); if (!isComputed(ConeProperty::Sublattice)) throw FatalException("Could not compute sublattice"); + + pass_to_pointed_quotient();*/ - if (isComputed(ConeProperty::Deg1Elements)) { - swap(ModuleGenerators, Deg1Elements); - is_Computed.reset(ConeProperty::Deg1Elements); + if (Hom.isComputed(ConeProperty::Deg1Elements)) { + swap(ModuleGenerators, Hom.Deg1Elements); setComputed(ConeProperty::HilbertBasis); setComputed(ConeProperty::ModuleGenerators); module_rank = ModuleGenerators.nr_of_rows(); setComputed(ConeProperty::ModuleRank); - if (save_mod_gen_over_ori) { + number_lattice_points = module_rank; + setComputed(ConeProperty::NumberLatticePoints); + + if (ToCompute.test(ConeProperty::ModuleGeneratorsOverOriginalMonoid) ) { ModuleGeneratorsOverOriginalMonoid = ModuleGenerators; setComputed(ConeProperty::ModuleGeneratorsOverOriginalMonoid); } } - - if (isComputed(ConeProperty::HilbertSeries)) { + + if(Hom.isComputed(ConeProperty::NumberLatticePoints)){ // sometimes computed from the Hilbert series + number_lattice_points = Hom.number_lattice_points; + setComputed(ConeProperty::NumberLatticePoints); + + } + + IntData = Hom.IntData; + if(Hom.isComputed(ConeProperty::WeightedEhrhartSeries)) + setComputed(ConeProperty::WeightedEhrhartSeries); + if(Hom.isComputed(ConeProperty::WeightedEhrhartQuasiPolynomial)) + setComputed(ConeProperty::WeightedEhrhartQuasiPolynomial); + if(Hom.isComputed(ConeProperty::Integral)) + setComputed(ConeProperty::Integral); + if(Hom.isComputed(ConeProperty::EuclideanIntegral)) + setComputed(ConeProperty::EuclideanIntegral); + if(Hom.isComputed(ConeProperty::VirtualMultiplicity)) + setComputed(ConeProperty::VirtualMultiplicity); + + if (Hom.isComputed(ConeProperty::HilbertSeries)) { setComputed(ConeProperty::EhrhartSeries); - swap(EhrSeries, HSeries); - swap(HSeries, SaveHSeries); + swap(EhrSeries, Hom.HSeries); + } + + if(Hom.isComputed(ConeProperty::HSOP)) + setComputed(ConeProperty::HSOP); + + if(Hom.isComputed(ConeProperty::Volume)){ + volume = Hom.volume; + setComputed(ConeProperty::Volume); + } + if(Hom.isComputed(ConeProperty::EuclideanVolume)){ + euclidean_volume = Hom.euclidean_volume; + setComputed(ConeProperty::EuclideanVolume); + } + + if(Hom.isComputed(ConeProperty::Triangulation)){ + swap(Triangulation, Hom.Triangulation); + setComputed(ConeProperty::Triangulation); + swap(TriangulationGenerators, Hom.TriangulationGenerators); + setComputed(ConeProperty::TriangulationGenerators); + if(Hom.isComputed(ConeProperty::LatticePointTriangulation)) + setComputed(ConeProperty::LatticePointTriangulation); + if(Hom.isComputed(ConeProperty::AllGeneratorsTriangulation)) + setComputed(ConeProperty::AllGeneratorsTriangulation); + } + + if(Hom.isComputed(ConeProperty::TriangulationSize)) { + TriangulationSize = Hom.TriangulationSize; + setComputed(ConeProperty::TriangulationSize); + } + if(Hom.isComputed(ConeProperty::TriangulationDetSum)) { + TriangulationDetSum= Hom.TriangulationDetSum; + setComputed(ConeProperty::TriangulationDetSum); + } + + if(Hom.isComputed(ConeProperty::Triangulation) || Hom.isComputed(ConeProperty::TriangulationSize) + || Hom.isComputed(ConeProperty::TriangulationDetSum) ){ + triangulation_is_nested = Hom.triangulation_is_nested; + triangulation_is_partial = Hom.triangulation_is_partial; + setComputed(ConeProperty::IsTriangulationPartial); + setComputed(ConeProperty::IsTriangulationNested); } - ToCompute.set(ConeProperty::HilbertSeries, save_Hilbert_series_to_comp); - setComputed(ConeProperty::HilbertSeries, save_Hilbert_series_is_comp); - // ToCompute.set(ConeProperty::ExplicitHilbertSeries,save_Explicit_Hilbert_series_to_comp); - // setComputed(ConeProperty::ExplicitHilbertSeries,save_Explicit_Hilbert_series_is_comp); - - multiplicity = save_mult; - setComputed(ConeProperty::Multiplicity, save_Multiplicity_is_comp); - ToCompute.set(ConeProperty::Multiplicity, save_Multiplicity_to_comp); - - ToCompute.set(ConeProperty::HilbertBasis, save_hilb_bas); - setComputed(ConeProperty::Dehomogenization, save_dehom_computed); - swap(SaveDehomogenization, Dehomogenization); - setComputed(ConeProperty::Grading, save_grad_computed); - setComputed(ConeProperty::GradingDenom, save_denom_computed); - swap(SaveGrading, Grading); - GradingDenom = SaveDenom; - - ToCompute.set(ConeProperty::FaceLattice, saveFaceLattice); - ToCompute.set(ConeProperty::FVector, saveFVector); - - inhomogeneous = true; - recession_rank = BasisMaxSubspace.nr_of_rows(); - setComputed(ConeProperty::RecessionRank); + if(Hom.isComputed(ConeProperty::ConeDecomposition)){ + swap(OpenFacets,Hom.OpenFacets); + setComputed(ConeProperty::ConeDecomposition); + } - if (affine_dim == -1) { - volume = 0; - euclidean_volume = 0; + if(Hom.isComputed(ConeProperty::StanleyDec)){ + swap(StanleyDec,Hom.StanleyDec); + setComputed(ConeProperty::StanleyDec); + } + + if(Hom.isComputed(ConeProperty::ExcludedFaces)){ + swap(ExcludedFaces,Hom.ExcludedFaces); + setComputed(ConeProperty::ExcludedFaces); + } + + if(Hom.isComputed(ConeProperty::CoveringFace)){ + swap(CoveringFace,Hom.CoveringFace); + setComputed(ConeProperty::CoveringFace); + } + + if(Hom.isComputed(ConeProperty::IsEmptySemiOpen)){ + empty_semiopen = Hom.empty_semiopen; + setComputed(ConeProperty::IsEmptySemiOpen); } - /* - if(isComputed(ConeProperty::Sublattice)){ - if (get_rank_internal() == recession_rank) { - affine_dim = -1; - } else { - affine_dim = get_rank_internal()-1; - } + bool automs_computed = false; + if(Hom.isComputed(ConeProperty::Automorphisms)){ + setComputed(ConeProperty::Automorphisms); + automs_computed = true; + } + if(Hom.isComputed(ConeProperty::RationalAutomorphisms)){ + setComputed(ConeProperty::RationalAutomorphisms); + automs_computed = true; + } + if(Hom.isComputed(ConeProperty::EuclideanAutomorphisms)){ + setComputed(ConeProperty::EuclideanAutomorphisms); + automs_computed = true; + } + if(Hom.isComputed(ConeProperty::CombinatorialAutomorphisms)){ + setComputed(ConeProperty::CombinatorialAutomorphisms); + automs_computed = true; + } + if(automs_computed){ + Automs = Hom.Automs; + Automs.VerticesPerms = Automs.ExtRaysPerms; //make things inhomogeneous + Automs.VerticesOrbits = Automs.ExtRaysOrbits; + Automs.ExtRaysPerms.clear(); + Automs.ExtRaysOrbits.clear(); + } + if(Hom.isComputed(ConeProperty::DualIncidence)){ + swap(Hom.DualSuppHypInd, DualSuppHypInd); + setComputed(ConeProperty::DualIncidence); + } + if(Hom.isComputed(ConeProperty::DualFaceLattice)){ + swap(Hom.DualFaceLat, DualFaceLat); + setComputed(ConeProperty::DualFaceLattice); + } + if(Hom.isComputed(ConeProperty::DualFVector)){ + dual_f_vector = Hom.dual_f_vector; + setComputed(ConeProperty::DualFVector); + } + recession_rank = Hom.BasisMaxSubspace.nr_of_rows(); // in our polytope case + setComputed(ConeProperty::RecessionRank); + if(!empty_polytope){ + affine_dim = getRank() - 1; setComputed(ConeProperty::AffineDim); - }*/ + } } //--------------------------------------------------------------------------- @@ -6904,7 +7023,7 @@ vector h_vec_pos(1), h_vec_neg; for (size_t i = 0; i < ModuleGenerators.nr_of_rows(); ++i) { - long deg = convertTo(v_scalar_product(Grading, ModuleGenerators[i])); + long deg = convertToLong(v_scalar_product(Grading, ModuleGenerators[i])); if (deg >= 0) { if (deg >= (long)h_vec_pos.size()) h_vec_pos.resize(deg + 1); @@ -6951,491 +7070,145 @@ //--------------------------------------------------------------------------- -struct FaceInfo { - // dynamic_bitset ExtremeRays; - dynamic_bitset HypsContaining; - int max_cutting_out; - bool max_subset; - // bool max_prec; - bool simple; -}; - -bool face_compare(const pair& a, const pair& b) { - return (a.first < b.first); -} - template void Cone::make_face_lattice(const ConeProperties& ToCompute) { - bool something_to_do = (ToCompute.test(ConeProperty::FaceLattice) && !isComputed(ConeProperty::FaceLattice)) || + + bool something_to_do_primal = (ToCompute.test(ConeProperty::FaceLattice) && !isComputed(ConeProperty::FaceLattice)) || (ToCompute.test(ConeProperty::FVector) && !isComputed(ConeProperty::FVector)) || (ToCompute.test(ConeProperty::Incidence) && !isComputed(ConeProperty::Incidence)); - - if (!something_to_do) + + bool something_to_do_dual = (ToCompute.test(ConeProperty::DualFaceLattice) && !isComputed(ConeProperty::DualFaceLattice)) || + (ToCompute.test(ConeProperty::DualFVector) && !isComputed(ConeProperty::DualFVector)) || + (ToCompute.test(ConeProperty::DualIncidence) && !isComputed(ConeProperty::DualIncidence)); + + if(!something_to_do_dual && !something_to_do_primal) return; - if (verbose) - verboseOutput() << "Computing incidence/face lattice/f-vector ... " << endl; - - FaceLattice.clear(); - f_vector.clear(); - + if(something_to_do_dual && something_to_do_primal) + throw BadInputException("Only one of primal or dual face lattice/f-vector/incidence allowed"); + + if(something_to_do_dual && inhomogeneous) + throw BadInputException("Dual face lattice/f-vector/incidence not computable for inhomogeneous input"); + compute(ConeProperty::ExtremeRays, ConeProperty::SupportHyperplanes); // both necessary // since ExtremeRays can be comuted without SupportHyperplanes // if the cone is not full dimensional + + bool only_f_vector = (something_to_do_primal && !ToCompute.test(ConeProperty::FaceLattice) && + !ToCompute.test(ConeProperty::Incidence)) + || (something_to_do_dual && !ToCompute.test(ConeProperty::DualFaceLattice) && + !ToCompute.test(ConeProperty::DualIncidence)); + + bool dualize = only_f_vector && ((something_to_do_primal && ExtremeRays.nr_of_rows() < SupportHyperplanes.nr_of_rows()) + || (something_to_do_dual && ExtremeRays.nr_of_rows() > SupportHyperplanes.nr_of_rows()) ) + && face_codim_bound < 0; + - bool bound_codim = false; - if (face_codim_bound >= 0) - bound_codim = true; - - size_t nr_supphyps = SupportHyperplanes.nr_of_rows(); - size_t nr_extr_rec_cone = ExtremeRaysRecCone.nr_of_rows(); - size_t nr_gens = ExtremeRays.nr_of_rows(); - size_t nr_vert = nr_gens - nr_extr_rec_cone; - - SuppHypInd.clear(); - SuppHypInd.resize(nr_supphyps); - - // order of the extreme rays: - // - // first the vertices of polyhedron (in the inhomogeneous case) - // then the extreme rays of the (recession) cone - // - - // order of the extreme rays: - // - // first the vertices of polyhedron (in the inhomogeneous case) - // then the extreme rays of the (recession) cone - // - - bool skip_remaining = false; - std::exception_ptr tmp_exception; - - int nr_simplial_facets = 0; - -#pragma omp parallel for - for (size_t i = 0; i < nr_supphyps; ++i) { - if (skip_remaining) - continue; - - int nr_gens_in_hyp = 0; - - SuppHypInd[i].resize(nr_gens); - - try { - INTERRUPT_COMPUTATION_BY_EXCEPTION - - if (inhomogeneous) { - for (size_t j = 0; j < nr_vert; ++j) { - if (v_scalar_product(SupportHyperplanes[i], VerticesOfPolyhedron[j]) == 0) { - nr_gens_in_hyp++; - SuppHypInd[i][j] = true; - } - } - } - - for (size_t j = 0; j < nr_extr_rec_cone; ++j) { - if (v_scalar_product(SupportHyperplanes[i], ExtremeRaysRecCone[j]) == 0) { - nr_gens_in_hyp++; - SuppHypInd[i][j + nr_vert] = true; - } - } - - if (nr_gens_in_hyp == (int)(getRank() - 1)) -#pragma omp atomic - nr_simplial_facets++; - - } catch (const std::exception&) { - tmp_exception = std::current_exception(); - skip_remaining = true; -#pragma omp flush(skip_remaining) - } - } - if (!(tmp_exception == 0)) - std::rethrow_exception(tmp_exception); - - if (verbose) - verboseOutput() << "Simplicial facets " << nr_simplial_facets << " of " << nr_supphyps << endl; - - if (ToCompute.test(ConeProperty::Incidence)) - setComputed(ConeProperty::Incidence); - - if (!ToCompute.test(ConeProperty::FVector) && !ToCompute.test(ConeProperty::FaceLattice)) { - if (verbose) - verboseOutput() << "done" << endl; - return; + if( (something_to_do_primal && !dualize) || (something_to_do_dual && dualize) || inhomogeneous ){ + make_face_lattice_primal(ToCompute); } - - dynamic_bitset SimpleVert(nr_gens); - size_t nr_simpl = 0; - for (size_t j = 0; j < nr_gens; ++j) { - size_t nr_cont = 0; - for (size_t i = 0; i < nr_supphyps; ++i) - if (SuppHypInd[i][j]) - nr_cont++; - if (nr_cont == getRank() - 1) { - SimpleVert[j] = 1; - nr_simpl++; - } + else{ + make_face_lattice_dual(ToCompute); } - if (verbose) - verboseOutput() << "Cosimplicial gens " << nr_simpl << " of " << nr_gens << endl; - - bool use_simple_vert = (10 * nr_simpl > nr_gens); - - vector prel_f_vector(dim + 1, 0); - - dynamic_bitset the_cone(nr_gens); - the_cone.set(); - dynamic_bitset empty(nr_supphyps); - dynamic_bitset AllFacets(nr_supphyps); - AllFacets.set(); - - map > NewFaces; - map > WorkFaces; - - WorkFaces[empty] = make_pair(empty, AllFacets); // start with the full cone - dynamic_bitset ExtrRecCone(nr_gens); // in the inhomogeneous case - if (inhomogeneous) { // we exclude the faces of the recession cone - for (size_t j = 0; j < nr_extr_rec_cone; ++j) - ExtrRecCone[j + nr_vert] = 1; - ; - } - - Matrix EmbeddedSuppHyps = BasisChange.to_sublattice_dual(SupportHyperplanes); - Matrix EmbeddedSuppHyps_MI; - if (change_integer_type) - BasisChange.convert_to_sublattice_dual(EmbeddedSuppHyps_MI, SupportHyperplanes); - - /*for(int i=0;i< 10000;++i){ // for pertubation of order of supphyps - int j=rand()%nr_supphyps; - int k=rand()%nr_supphyps; - swap(SuppHypInd[j],SuppHypInd[k]); - swap(EmbeddedSuppHyps[j],EmbeddedSuppHyps[k]); - if(change_integer_type) - swap(EmbeddedSuppHyps_MI[j],EmbeddedSuppHyps_MI[k]); - }*/ - - vector Unit_bitset(nr_supphyps); - for (size_t i = 0; i < nr_supphyps; ++i) { - Unit_bitset[i].resize(nr_supphyps); - Unit_bitset[i][i] = 1; - } - - long codimension_so_far = 0; // the lower bound for the codimension so far - - const long VERBOSE_STEPS = 50; - const size_t RepBound = 1000; - bool report_written = false; - - size_t total_inter = 0; - size_t avoided_inter = 0; - size_t total_new = 0; - size_t total_simple = 1; // the full cone is cosimplicial - size_t total_max_subset = 0; - - while (true) { - codimension_so_far++; // codimension of faces put into NewFaces - bool CCC = false; - if (codimension_so_far == 1) - CCC = true; - - if (bound_codim && codimension_so_far > face_codim_bound + 1) - break; - size_t nr_faces = WorkFaces.size(); - if (verbose) { - if (report_written) - verboseOutput() << endl; - verboseOutput() << "codim " << codimension_so_far - 1 << " faces to process " << nr_faces << endl; - report_written = false; - } - - long step_x_size = nr_faces - VERBOSE_STEPS; - -#pragma omp parallel - { - size_t Fpos = 0; - auto F = WorkFaces.begin(); - list > FreeFaces, Faces; - pair fr; - fr.first.resize(nr_gens); - fr.second.HypsContaining.resize(nr_supphyps); - for (size_t i = 0; i < nr_supphyps; ++i) { - FreeFaces.push_back(fr); - } - -#pragma omp for schedule(dynamic) - for (size_t kkk = 0; kkk < nr_faces; ++kkk) { - if (skip_remaining) - continue; - - for (; kkk > Fpos; ++Fpos, ++F) - ; - for (; kkk < Fpos; --Fpos, --F) - ; - - if (verbose && nr_faces >= RepBound) { -#pragma omp critical(VERBOSE) - while ((long)(kkk * VERBOSE_STEPS) >= step_x_size) { - step_x_size += nr_faces; - verboseOutput() << "." << flush; - report_written = true; - } - } - - Faces.clear(); - - try { - INTERRUPT_COMPUTATION_BY_EXCEPTION - - dynamic_bitset beta_F = F->second.first; + +} +//--------------------------------------------------------------------------- - bool F_simple = ((long)F->first.count() == codimension_so_far - 1); +template +void Cone::make_face_lattice_primal(const ConeProperties& ToCompute) { -#pragma omp atomic - prel_f_vector[codimension_so_far - 1]++; - - dynamic_bitset Gens = the_cone; // make indicator vector of *F - for (int i = 0; i < (int)nr_supphyps; ++i) { - if (F->second.first[nr_supphyps - 1 - i] == 0) // does not define F - continue; - // beta_F=i; - Gens = Gens & SuppHypInd[i]; - } - - dynamic_bitset MM_mother = F->second.second; - - // now we produce the intersections with facets - dynamic_bitset Intersect(nr_gens); - - int start; - if (CCC) - start = 0; - else { - start = F->second.first.find_first(); - start = nr_supphyps - start; - } - - for (size_t i = start; i < nr_supphyps; ++i) { - if (F->first[i] == 1) { // contains *F - continue; - } -#pragma omp atomic - total_inter++; - if (MM_mother[i] == 0) { // using restriction criteria of the paper -#pragma omp atomic - avoided_inter++; - continue; - } - Intersect = Gens & SuppHypInd[i]; - if (inhomogeneous && Intersect.is_subset_of(ExtrRecCone)) - continue; - - Faces.splice(Faces.end(), FreeFaces, FreeFaces.begin()); - Faces.back().first = Intersect; - Faces.back().second.max_cutting_out = i; - Faces.back().second.max_subset = true; - // Faces.back().second.HypsContaining.reset(); - // Faces.push_back(make_pair(Intersect,fr)); - } - - Faces.sort(face_compare); - for (auto Fac = Faces.begin(); Fac != Faces.end(); ++Fac) { - if (Fac != Faces.begin()) { - auto Gac = Fac; - --Gac; - if (Fac->first == Gac->first) { - Fac->second.max_subset = false; - Gac->second.max_subset = false; - } - } - } - - for (auto Fac = Faces.end(); Fac != Faces.begin();) { // first we check for inclusion - - --Fac; - - if (!Fac->second.max_subset) - continue; - - auto Gac = Fac; - Gac++; - for (; Gac != Faces.end(); Gac++) { - if (!Gac->second.max_subset) - continue; - if (Fac->first.is_subset_of(Gac->first)) { - Fac->second.max_subset = false; - break; - } - } - } - - dynamic_bitset MM_F(nr_supphyps); - - for (auto Fac = Faces.end(); Fac != Faces.begin();) { - --Fac; - - if (!Fac->second.max_subset) - continue; - -#pragma omp atomic - total_max_subset++; - - INTERRUPT_COMPUTATION_BY_EXCEPTION - - dynamic_bitset Containing = F->first; - Containing[Fac->second.max_cutting_out] = 1; - - bool simple = false; - if (F_simple && use_simple_vert) { - if ((Fac->first & SimpleVert).any()) { - simple = true; - } - } - - if (!simple) { - bool extra_hyp = false; - for (size_t j = 0; j < nr_supphyps; ++j) { // beta_F - if (Containing[j] == 0 && Fac->first.is_subset_of(SuppHypInd[j])) { - Containing[j] = 1; - extra_hyp = true; - } - } - simple = F_simple && !extra_hyp; - } - - int codim_of_face = 0; // to make gcc happy - if (simple) - codim_of_face = codimension_so_far; - else { - dynamic_bitset Containing(nr_supphyps); - for (size_t j = 0; j < nr_supphyps; ++j) { // beta_F - if (Containing[j] == 0 && Fac->first.is_subset_of(SuppHypInd[j])) { - Containing[j] = 1; - } - } - vector selection = bitset_to_bool(Containing); - if (change_integer_type) { - try { - codim_of_face = EmbeddedSuppHyps_MI.submatrix(selection).rank(); - } catch (const ArithmeticException& e) { - change_integer_type = false; - } - } - if (!change_integer_type) - codim_of_face = EmbeddedSuppHyps.submatrix(selection).rank(); - - if (codim_of_face > codimension_so_far) { - Fac->second.max_subset = false; - continue; - } - } - - MM_F[Fac->second.max_cutting_out] = 1; - Fac->second.simple = simple; - Fac->second.HypsContaining = Containing; - } - - for (auto Fac = Faces.end(); Fac != Faces.begin();) { // why backwards?? - - --Fac; - - if (!Fac->second.max_subset) - continue; - - bool simple = Fac->second.simple; - - beta_F[nr_supphyps - 1 - Fac->second.max_cutting_out] = - 1; // we must go to revlex, beta_F reconstituted below - -#pragma omp critical(INSERT_NEW) - { - total_new++; - - if (simple) { - NewFaces[Fac->second.HypsContaining] = make_pair(beta_F, MM_F); - total_simple++; - } - else { - auto G = NewFaces.find(Fac->second.HypsContaining); - if (G == NewFaces.end()) { - NewFaces[Fac->second.HypsContaining] = make_pair(beta_F, MM_F); - } - else { - if (G->second.first < beta_F) { // because of revlex < instead of > - G->second.first = beta_F; - G->second.second = MM_F; - } - } - } - } // critical - - beta_F[nr_supphyps - 1 - Fac->second.max_cutting_out] = 0; - } - } catch (const std::exception&) { - tmp_exception = std::current_exception(); - skip_remaining = true; -#pragma omp flush(skip_remaining) - } + if(verbose && ToCompute.test(ConeProperty::DualFVector)) + verboseOutput() << "Going to the primal side for the dual f-vector" << endl; + if (verbose) + verboseOutput() << "Computing incidence/face lattice/f-vector ... " << endl; - FreeFaces.splice(FreeFaces.end(), Faces); - } // omp for - } // parallel - if (!(tmp_exception == 0)) - std::rethrow_exception(tmp_exception); - - if (ToCompute.test(ConeProperty::FaceLattice)) - for (auto H = WorkFaces.begin(); H != WorkFaces.end(); ++H) - FaceLattice[H->first] = codimension_so_far - 1; - WorkFaces.clear(); - if (NewFaces.empty()) - break; - swap(WorkFaces, NewFaces); + Matrix SuppHypPointed; + BasisChangePointed.convert_to_sublattice_dual(SuppHypPointed,SupportHyperplanes); + Matrix VertOfPolPointed; + BasisChangePointed.convert_to_sublattice(VertOfPolPointed,VerticesOfPolyhedron); + Matrix ExtrRCPointed; + BasisChangePointed.convert_to_sublattice(ExtrRCPointed,ExtremeRaysRecCone); + FaceLattice FL(SuppHypPointed,VertOfPolPointed,ExtrRCPointed,inhomogeneous); + + if(ToCompute.test(ConeProperty::FaceLattice) || ToCompute.test(ConeProperty::FVector) + || ToCompute.test(ConeProperty::DualFVector)) + FL.compute(face_codim_bound,verbose,change_integer_type); + + if(ToCompute.test(ConeProperty::Incidence)){ + FL.get(SuppHypInd); + setComputed(ConeProperty::Incidence); } - - if (inhomogeneous && nr_vert != 1) { // we want the empty face in the face lattice - // (never the case in homogeneous computations) - dynamic_bitset NoGens(nr_gens); - size_t codim_max_subspace = EmbeddedSuppHyps.rank(); - FaceLattice[AllFacets] = codim_max_subspace; - if (!(bound_codim && (int)codim_max_subspace > face_codim_bound)) - prel_f_vector[codim_max_subspace]++; + if(ToCompute.test(ConeProperty::FaceLattice) ){ + FL.get(FaceLat); + setComputed(ConeProperty::FaceLattice); } - - size_t total_nr_faces = 0; - for (int i = prel_f_vector.size() - 1; i >= 0; --i) { - if (prel_f_vector[i] != 0) { - f_vector.push_back(prel_f_vector[i]); - total_nr_faces += prel_f_vector[i]; + if(ToCompute.test(ConeProperty::FaceLattice) || ToCompute.test(ConeProperty::FVector) + || ToCompute.test(ConeProperty::DualFVector)){ + vector prel_f_vector = FL.getFVector(); + if(!ToCompute.test(ConeProperty::DualFVector)){ + f_vector = prel_f_vector; + setComputed(ConeProperty::FVector); + } + else{ + dual_f_vector.resize(prel_f_vector.size()); + for(size_t i = 0; i< prel_f_vector.size(); ++i) + dual_f_vector[i] = prel_f_vector[prel_f_vector.size()-1-i]; + setComputed(ConeProperty::DualFVector); } } +} - // cout << " Total " << FaceLattice.size() << endl; - - if (verbose) { - verboseOutput() << endl << "Total number of faces computed " << total_nr_faces << endl; - verboseOutput() << "f-vector " << f_vector; - } - - if (ToCompute.test(ConeProperty::FaceLattice)) - setComputed(ConeProperty::FaceLattice); - setComputed(ConeProperty::FVector); - - /* - if(verbose){ - verboseOutput() << "done" << endl; - - cout << "total " << total_inter << " avoided " << avoided_inter << " computed " << total_inter-avoided_inter << endl; - - cout << "faces sent to NewFaces " << total_new << " cosimplicial " << total_simple << " degenerate " << total_nr_faces - - total_simple << endl; +//--------------------------------------------------------------------------- - cout << "total max subset " << total_max_subset < +void Cone::make_face_lattice_dual(const ConeProperties& ToCompute) { + + if(verbose && ToCompute.test(ConeProperty::FVector)) + verboseOutput() << "Going to the dual side for the primal f-vector" << endl; + if (verbose) + verboseOutput() << "Computing dual incidence/face lattice/f-vector ... " << endl; + + // Note for the coordinate transformation: + // On the dual space we must use the dual coordinate transformation + // Since the primal extreme rays are the support hyperplanes on the dual space + // they must be transformed by the dual of the dual = primal + // The support hyperplanes are extreme rays on the dual. + // They are transformed by the primal of the dual = dual. + + Matrix SuppHypPointed; + BasisChangePointed.convert_to_sublattice(SuppHypPointed,ExtremeRays); // We dualize !!!! + Matrix VertOfPolPointed; // empty matrix in the dual case + Matrix ExtrRCPointed; + BasisChangePointed.convert_to_sublattice_dual(ExtrRCPointed,SupportHyperplanes); // We dualize !!!! - if(total_nr_faces - total_simple!=0) - cout << "average number of computations degenerate " << (float) (total_new+1 - total_simple) /(float) (total_nr_faces - - total_simple) << endl; else cout << "all faces cosimpliocial" << endl; + FaceLattice FL(SuppHypPointed,VertOfPolPointed,ExtrRCPointed,inhomogeneous); + + if(ToCompute.test(ConeProperty::DualFaceLattice) || ToCompute.test(ConeProperty::DualFVector) + || ToCompute.test(ConeProperty::FVector)) + FL.compute(face_codim_bound,verbose,change_integer_type); + + if(ToCompute.test(ConeProperty::DualIncidence)){ + FL.get(DualSuppHypInd); + setComputed(ConeProperty::DualIncidence); + } + if(ToCompute.test(ConeProperty::DualFaceLattice) ){ + FL.get(DualFaceLat); + setComputed(ConeProperty::DualFaceLattice); + } + if(ToCompute.test(ConeProperty::DualFaceLattice) || ToCompute.test(ConeProperty::DualFVector) + || ToCompute.test(ConeProperty::FVector)){ + vector prel_f_vector = FL.getFVector(); + if(!ToCompute.test(ConeProperty::FVector)){ + dual_f_vector = prel_f_vector; + setComputed(ConeProperty::DualFVector); + } + else{ + dual_f_vector.resize(prel_f_vector.size()); + for(size_t i = 0; i< prel_f_vector.size(); ++i) + f_vector[i] = prel_f_vector[prel_f_vector.size()-1-i]; + setComputed(ConeProperty::FVector); + } } - */ } //--------------------------------------------------------------------------- @@ -7473,36 +7246,11 @@ } Automs.compute(AutomParam::combinatorial); - - if (verbose) + + if (verbose) verboseOutput() << Automs.getQualitiesString() << "automorphism group of order " << Automs.getOrder() << " done" << endl; - vector ExtRaysKey, VerticesKey; - - if (inhomogeneous) { - Automs.ExtRaysPerms = extract_permutations(Automs.GenPerms, Automs.GensRef, ExtremeRaysRecCone, true, ExtRaysKey); - Automs.VerticesPerms = extract_permutations(Automs.GenPerms, Automs.GensRef, VerticesOfPolyhedron, true, VerticesKey); - } - else { - Automs.ExtRaysPerms = Automs.GenPerms; - } - - Automs.SuppHypsPerms = Automs.LinFormPerms; - - sort_individual_vectors(Automs.GenOrbits); - if (inhomogeneous) { - Automs.VerticesOrbits = extract_subsets(Automs.GenOrbits, Automs.GensRef.nr_of_rows(), VerticesKey); - sort_individual_vectors(Automs.VerticesOrbits); - - Automs.ExtRaysOrbits = extract_subsets(Automs.GenOrbits, Automs.GensRef.nr_of_rows(), ExtRaysKey); - sort_individual_vectors(Automs.ExtRaysOrbits); - } - else { - Automs.ExtRaysOrbits = Automs.GenOrbits; - } - - sort_individual_vectors(Automs.LinFormOrbits); - Automs.SuppHypsOrbits = Automs.LinFormOrbits; + extract_automorphisms(Automs); setComputed(ConeProperty::CombinatorialAutomorphisms); } @@ -7538,44 +7286,70 @@ Automs = AutomorphismGroup(ExtremeRays, SupportHyperplanes, SpecialLinFoprms); - if(ExtremeRays.nr_of_rows()==0){ - setComputed(ConeProperty::EuclideanAutomorphisms); - return; - } - Automs.compute(AutomParam::euclidean); - + if (verbose) verboseOutput() << Automs.getQualitiesString() << "automorphism group of order " << Automs.getOrder() << " done" << endl; - vector VerticesKey; + extract_automorphisms(Automs); + + setComputed(ConeProperty::EuclideanAutomorphisms); +} + +//--------------------------------------------------------------------------- +template +template +void Cone::extract_automorphisms(AutomorphismGroup& AutomsComputed, const bool must_transform){ + + Automs.order = AutomsComputed.order; + Automs.Qualities = AutomsComputed.Qualities; + + vector SuppHypsKey, ExtRaysKey, VerticesKey, GensKey; + Automs.GenPerms = extract_permutations(AutomsComputed.GenPerms, AutomsComputed.GensRef, ExtremeRays, true, GensKey, must_transform); + + Automs.ExtRaysPerms.clear(); // not necessarily set below if (inhomogeneous) { - Automs.VerticesPerms = extract_permutations(Automs.GenPerms, Automs.GensRef, VerticesOfPolyhedron, true, VerticesKey); + + if(ExtremeRaysRecCone.nr_of_rows() >0 ){ + Automs.ExtRaysPerms = + extract_permutations(AutomsComputed.GenPerms, AutomsComputed.GensRef, ExtremeRaysRecCone, true, ExtRaysKey, must_transform); + } + Automs.VerticesPerms = + extract_permutations(AutomsComputed.GenPerms, AutomsComputed.GensRef, VerticesOfPolyhedron, true, VerticesKey, must_transform); } else { Automs.ExtRaysPerms = Automs.GenPerms; + ExtRaysKey = GensKey; } + Automs.LinFormPerms = + extract_permutations(AutomsComputed.LinFormPerms, AutomsComputed.LinFormsRef, SupportHyperplanes, false, SuppHypsKey, must_transform); Automs.SuppHypsPerms = Automs.LinFormPerms; + Automs.GenOrbits = extract_subsets(AutomsComputed.GenOrbits, AutomsComputed.GensRef.nr_of_rows(), GensKey); + sort_individual_vectors(Automs.GenOrbits); if (inhomogeneous) { - Automs.VerticesOrbits = extract_subsets(Automs.GenOrbits, Automs.GensRef.nr_of_rows(), VerticesKey); + Automs.VerticesOrbits = extract_subsets(AutomsComputed.GenOrbits, AutomsComputed.GensRef.nr_of_rows(), VerticesKey); sort_individual_vectors(Automs.VerticesOrbits); + Automs.ExtRaysOrbits.clear(); // not necessarily set below + if(ExtremeRaysRecCone.nr_of_rows() >0 ){ + Automs.ExtRaysOrbits = extract_subsets(AutomsComputed.GenOrbits, AutomsComputed.GensRef.nr_of_rows(), ExtRaysKey); + sort_individual_vectors(Automs.ExtRaysOrbits); + } } else { Automs.ExtRaysOrbits = Automs.GenOrbits; } + Automs.LinFormOrbits = extract_subsets(AutomsComputed.LinFormOrbits, AutomsComputed.LinFormsRef.nr_of_rows(), SuppHypsKey); sort_individual_vectors(Automs.LinFormOrbits); Automs.SuppHypsOrbits = Automs.LinFormOrbits; - - setComputed(ConeProperty::EuclideanAutomorphisms); + } -//---------------------------------------------------------------------------- - +//--------------------------------------------------------------------------- template void Cone::compute_refined_triangulation(ConeProperties& ToCompute){ @@ -7608,12 +7382,11 @@ template void Cone::prepare_collection(ConeCollection& Coll){ - check_gens_vs_reference(); compute(ConeProperty::Triangulation); - BasisChangePointed.convert_to_sublattice(Coll.Generators,Generators); + BasisChangePointed.convert_to_sublattice(Coll.Generators,BasicTriangulationGenerators); vector, IntegerColl> > CollTriangulation; - for(auto& T: Triangulation){ + for(auto& T: BasicTriangulation){ IntegerColl CollMult = convertTo(T.second); CollTriangulation.push_back(make_pair(T.first, CollMult)); } @@ -7625,8 +7398,7 @@ template void Cone::extract_data(ConeCollection& Coll){ - BasisChangePointed.convert_from_sublattice(Generators, Coll.Generators); - ReferenceGenerators = Generators; + BasisChangePointed.convert_from_sublattice(TriangulationGenerators, Coll.Generators); Triangulation.clear(); Coll.flatten(); for(auto& T: Coll.getKeysAndMult()){ @@ -7647,7 +7419,7 @@ for(auto& T: Triangulation){ Integer grad_prod = 1; for(auto& k: T.first) - grad_prod *= v_scalar_product(Generators[k], TestGrad); + grad_prod *= v_scalar_product(TriangulationGenerators[k], TestGrad); mpz_class gp_mpz = convertTo(grad_prod); mpz_class vol_mpz = convertTo(T.second); mpq_class quot = vol_mpz; @@ -7663,10 +7435,9 @@ void Cone::extract_data(ConeCollection& Coll){ if(BasisChangePointed.IsIdentity()) - swap(Generators,Coll.Generators); + swap(TriangulationGenerators,Coll.Generators); else - Generators = BasisChangePointed.from_sublattice(Coll.Generators); - ReferenceGenerators = Generators; + TriangulationGenerators = BasisChangePointed.from_sublattice(Coll.Generators); Triangulation.clear(); Coll.flatten(); Triangulation.clear(); @@ -7682,7 +7453,7 @@ for(auto& T: Triangulation){ Integer grad_prod = 1; for(auto& k: T.first) - grad_prod *= v_scalar_product(Generators[k], TestGrad); + grad_prod *= v_scalar_product(TriangulationGenerators[k], TestGrad); mpz_class gp_mpz = convertTo(grad_prod); mpz_class vol_mpz = convertTo(T.second); mpq_class quot = vol_mpz; @@ -7735,6 +7506,9 @@ if(!ToCompute.test(ConeProperty::LatticePointTriangulation) || isComputed(ConeProperty::LatticePointTriangulation)) return; + if(inhomogeneous && getNrExtremeRays() >0) + throw BadInputException("LatticePointTriangulation not defined for unbounded polyhedra"); + if(verbose) verboseOutput() << "Computing lattice points triangulation" << endl; @@ -7768,7 +7542,7 @@ ConeCollection OMT; prepare_collection(OMT); Matrix OMPointed; - BasisChangePointed.convert_to_sublattice(OMPointed,OriginalMonoidGenerators); + BasisChangePointed.convert_to_sublattice(OMPointed,InputGenerators); OMT.insert_all_gens(); extract_data(OMT); setComputed(ConeProperty::AllGeneratorsTriangulation); @@ -7817,8 +7591,8 @@ throw FatalException("property has no matrix output"); } switch (property) { - case ConeProperty::Generators: - return this->getGeneratorsMatrix(); + case ConeProperty::TriangulationGenerators: + return this->getTriangulationGeneratorsMatrix(); case ConeProperty::ExtremeRays: return this->getExtremeRaysMatrix(); case ConeProperty::VerticesOfPolyhedron: @@ -7864,6 +7638,8 @@ switch (property) { case ConeProperty::SuppHypsFloat: return this->getSuppHypsFloatMatrix(); + case ConeProperty::ExtremeRaysFloat: + return this->getSuppHypsFloatMatrix(); case ConeProperty::VerticesFloat: return this->getVerticesFloatMatrix(); default: @@ -7890,6 +7666,10 @@ return this->getWitnessNotIntegrallyClosed(); case ConeProperty::GeneratorOfInterior: return this->getGeneratorOfInterior(); + case ConeProperty::CoveringFace: + return this->getCoveringFace(); + case ConeProperty::AxesScaling: + return this->getAxesScaling(); default: throw FatalException("Vector property without output"); } @@ -8021,6 +7801,8 @@ return this->isInhomogeneous(); case ConeProperty::IsGorenstein: return this->isGorenstein(); + case ConeProperty::IsEmptySemiOpen: + return this->isEmptySemiOpen(); case ConeProperty::IsTriangulationNested: return this->isTriangulationNested(); case ConeProperty::IsTriangulationPartial: @@ -8121,6 +7903,9 @@ C.getNrVerticesFloat(); C.getVerticesOfPolyhedron(); C.getModuleGenerators(); + C.getExtremeRaysFloat(); + C.getExtremeRaysFloatMatrix(); + C.getNrExtremeRaysFloat(); vector > trivial = {{-1,1},{1,1}}; vector > excl = {{-1,1}}; diff -Nru normaliz-3.8.5+ds/source/libnormaliz/cone_dual_mode.cpp normaliz-3.8.9+ds/source/libnormaliz/cone_dual_mode.cpp --- normaliz-3.8.5+ds/source/libnormaliz/cone_dual_mode.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/cone_dual_mode.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -23,7 +23,7 @@ //--------------------------------------------------------------------------- -#include +#include #include #include #include @@ -33,7 +33,7 @@ #include "libnormaliz/cone_dual_mode.h" #include "libnormaliz/vector_operations.h" -#include "libnormaliz/list_operations.h" +#include "libnormaliz/list_and_map_operations.h" #include "libnormaliz/full_cone.h" // #include "libnormaliz/cone_helper.h" #include "libnormaliz/my_omp.h" @@ -216,7 +216,7 @@ // halfspace_gen_as_cand.father=0; halfspace_gen_as_cand.old_tot_deg = 0; (halfspace_gen_as_cand.values)[hyp_counter] = orientation; // value under the new linear form - halfspace_gen_as_cand.sort_deg = convertTo(orientation); + halfspace_gen_as_cand.sort_deg = convertToLong(orientation); assert(orientation != 0); if (!truncate || halfspace_gen_as_cand.values[0] <= 1) { // the only critical case is the positive halfspace gen in round 0 @@ -245,7 +245,7 @@ bool all_positice_level = pointed; for (auto& h : Intermediate_HB.Candidates) { // dividing into negative and positive Integer new_val = v_scalar_product(hyperplane, h.cand); - long new_val_long = convertTo(new_val); + long new_val_long = convertToLong(new_val); h.reducible = false; h.mother = 0; // h.father=0; @@ -546,7 +546,7 @@ if (diff > 0) { new_candidate.values[hyp_counter] = diff; - new_candidate.sort_deg = p_cand->sort_deg + n_cand->sort_deg - 2 * convertTo(neg_val); + new_candidate.sort_deg = p_cand->sort_deg + n_cand->sort_deg - 2 * convertToLong(neg_val); if (do_reduction && (Pos_Table[omp_get_thread_num()].is_reducible_unordered(new_candidate) || Neutr_Table[omp_get_thread_num()].is_reducible_unordered(new_candidate))) continue; @@ -559,7 +559,7 @@ if (!do_reduction) // don't need new negative elements anymore continue; new_candidate.values[hyp_counter] = -diff; - new_candidate.sort_deg = p_cand->sort_deg + n_cand->sort_deg - 2 * convertTo(pos_val); + new_candidate.sort_deg = p_cand->sort_deg + n_cand->sort_deg - 2 * convertToLong(pos_val); if (Neg_Table[omp_get_thread_num()].is_reducible_unordered(new_candidate)) { continue; } @@ -574,7 +574,7 @@ if (diff == 0) { new_candidate.values[hyp_counter] = 0; new_candidate.sort_deg = - p_cand->sort_deg + n_cand->sort_deg - 2 * convertTo(pos_val); // pos_val==neg_val + p_cand->sort_deg + n_cand->sort_deg - 2 * convertToLong(pos_val); // pos_val==neg_val if (do_reduction && Neutr_Table[omp_get_thread_num()].is_reducible_unordered(new_candidate)) { continue; } diff -Nru normaliz-3.8.5+ds/source/libnormaliz/cone.h normaliz-3.8.9+ds/source/libnormaliz/cone.h --- normaliz-3.8.5+ds/source/libnormaliz/cone.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/cone.h 2020-09-25 14:54:40.000000000 +0000 @@ -258,6 +258,7 @@ //--------------------------------------------------------------------------- ~Cone(); + void delete_aux_cones(); //--------------------------------------------------------------------------- // give additional data @@ -355,9 +356,9 @@ Cone& getSymmetrizedCone() const; Cone& getProjectCone() const; - const Matrix& getGeneratorsMatrix(); - const vector >& getGenerators(); - size_t getNrGenerators(); + const Matrix& getTriangulationGeneratorsMatrix(); + const vector >& getTriangulationGenerators(); + size_t getNrTriangulationGenerators(); const Matrix& getExtremeRaysMatrix(); const vector >& getExtremeRays(); @@ -366,6 +367,10 @@ const Matrix& getVerticesFloatMatrix(); const vector >& getVerticesFloat(); size_t getNrVerticesFloat(); + + const Matrix& getExtremeRaysFloatMatrix(); + const vector >& getExtremeRaysFloat(); + size_t getNrExtremeRaysFloat(); const Matrix& getSuppHypsFloatMatrix(); const vector >& getSuppHypsFloat(); @@ -403,6 +408,8 @@ vector getWitnessNotIntegrallyClosed(); vector getGeneratorOfInterior(); + vector getCoveringFace(); + vector getAxesScaling(); const Matrix& getHilbertBasisMatrix(); const vector >& getHilbertBasis(); @@ -429,6 +436,10 @@ const map& getFaceLattice(); vector getFVector(); const vector& getIncidence(); + + const map& getDualFaceLattice(); + vector getDualFVector(); + const vector& getDualIncidence(); // the actual grading is Grading/GradingDenom vector getGrading(); @@ -461,6 +472,7 @@ bool isDeg1HilbertBasis(); bool isIntegrallyClosed(); bool isGorenstein(); + bool isEmptySemiOpen(); bool isReesPrimary(); bool isIntHullCone(); Integer getReesPrimaryMultiplicity(); @@ -543,12 +555,15 @@ bool verbose; ConeProperties is_Computed; // Matrix GeneratorsOfToricRing; - Matrix OriginalMonoidGenerators; + Matrix InputGenerators; Matrix Generators; - Matrix ReferenceGenerators; + Matrix TriangulationGenerators; // the generators for the last computed truangulation + Matrix BasicTriangulationGenerators; // the generators for the basic truangulation + // Matrix ReferenceGenerators; Matrix ExtremeRays; // of the homogenized cone Matrix ExtremeRaysRecCone; // of the recession cone, = ExtremeRays in the homogeneous case Matrix VerticesFloat; + Matrix ExtremeRaysFloat; vector ExtremeRaysIndicator; Matrix VerticesOfPolyhedron; Matrix SupportHyperplanes; @@ -558,7 +573,8 @@ Integer TriangulationDetSum; bool triangulation_is_nested; bool triangulation_is_partial; - vector, Integer> > Triangulation; + vector, Integer> > Triangulation; // the last computed triangulation + vector, Integer> > BasicTriangulation; // the basic triangulation vector > OpenFacets; vector projection_coord_indicator; vector, long> > InExData; @@ -573,6 +589,8 @@ mpq_class VirtualMultiplicity; vector WitnessNotIntegrallyClosed; vector GeneratorOfInterior; + vector CoveringFace; + vector AxesScaling; Matrix HilbertBasis; Matrix HilbertBasisRecCone; Matrix BasisMaxSubspace; @@ -590,25 +608,29 @@ Integer unit_group_index; size_t number_lattice_points; vector f_vector; + vector dual_f_vector; vector Pair; // for indicator vectors in project-and_lift vector ParaInPair; // if polytope is a parallelotope bool check_parallelotope(); bool is_parallelotope; - map FaceLattice; + map FaceLat; + map DualFaceLat; vector SuppHypInd; // incidemnce vectors of the support hyperplanes + vector DualSuppHypInd; bool pointed; bool inhomogeneous; bool precomputed_extreme_rays; bool precomputed_support_hyperplanes; + bool empty_semiopen; bool input_automorphisms; bool polytope_in_input; - bool gorensetin; + bool rational_lattice_in_input; bool deg1_extreme_rays; bool deg1_hilbert_basis; @@ -677,7 +699,7 @@ void convert_lattice_generators_to_constraints(Matrix& LatticeGenerators); void convert_equations_to_inequalties(); - void check_gens_vs_reference(); // to make sure that newly computed generators agrre with the previously computed + // void check_gens_vs_reference(); // to make sure that newly computed generators agrre with the previously computed void setGrading(const vector& lf); void setWeights(); @@ -694,6 +716,8 @@ void make_Hilbert_series_from_pos_and_neg(const vector& h_vec_pos, const vector& h_vec_neg); void make_face_lattice(const ConeProperties& ToCompute); + void make_face_lattice_primal(const ConeProperties& ToCompute); + void make_face_lattice_dual(const ConeProperties& ToCompute); void compute_combinatorial_automorphisms(const ConeProperties& ToCompute); void compute_euclidean_automorphisms(const ConeProperties& ToCompute); @@ -705,6 +729,12 @@ void compute_refined_triangulation(ConeProperties& ToCompute); + template + void extract_automorphisms(AutomorphismGroup& AutomsComputed, const bool must_transform = false); + + void prepare_automorphisms(); + void prepare_refined_triangulation(); + template void compute_unimodular_triangulation(ConeProperties& ToCompute); template @@ -723,9 +753,10 @@ #ifdef NMZ_EXTENDED_TESTS void set_extended_tests(ConeProperties& ToCompute); #endif - - template + void compute_full_cone(ConeProperties& ToCompute); + template + void compute_full_cone_inner(ConeProperties& ToCompute); void pass_to_pointed_quotient(); @@ -789,6 +820,7 @@ void compute_vertices_float(ConeProperties& ToCompute); void compute_supp_hyps_float(ConeProperties& ToCompute); + void compute_extreme_rays_float(ConeProperties& ToCompute); void make_StanleyDec_export(); @@ -832,7 +864,8 @@ Matrix& FC_Vectors, const Matrix& ConeVectors, bool primal, - vector& Key); + vector& Key, + const bool must_transform); vector > extract_subsets(const vector >& FC_Subsets, size_t max_index, const vector& Key); }; diff -Nru normaliz-3.8.5+ds/source/libnormaliz/cone_property.cpp normaliz-3.8.9+ds/source/libnormaliz/cone_property.cpp --- normaliz-3.8.5+ds/source/libnormaliz/cone_property.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/cone_property.cpp 2020-09-25 14:54:40.000000000 +0000 @@ -27,7 +27,7 @@ #include #include -#include +#include #include "libnormaliz/general.h" @@ -178,9 +178,29 @@ static ConeProperties ret; ret.set(ConeProperty::VerticesOfPolyhedron); ret.set(ConeProperty::ModuleGenerators); - ret.set(ConeProperty::ReesPrimaryMultiplicity); ret.set(ConeProperty::AffineDim); ret.set(ConeProperty::ModuleRank); + ret.set(ConeProperty::RecessionRank); + return ret; +} + +ConeProperties treated_as_hom_props(){ + static ConeProperties ret; + ret.set(ConeProperty::WeightedEhrhartSeries); + ret.set(ConeProperty::Integral); + ret.set(ConeProperty::EuclideanIntegral); + ret.set(ConeProperty::WeightedEhrhartQuasiPolynomial); + ret.set(ConeProperty::VirtualMultiplicity); + ret.set(ConeProperty::EhrhartSeries); + // ret.set(ConeProperty::Triangulation); + ret.set(ConeProperty::LatticePointTriangulation); + ret.set(ConeProperty::ConeDecomposition); + ret.set(ConeProperty::StanleyDec); + ret.set(ConeProperty::Volume); + ret.set(ConeProperty::EuclideanVolume); + ret.set(ConeProperty::DualIncidence); + ret.set(ConeProperty::DualFVector); + ret.set(ConeProperty::DualFaceLattice); return ret; } @@ -191,26 +211,22 @@ ret.set(ConeProperty::Dehomogenization); ret.set(ConeProperty::WitnessNotIntegrallyClosed); ret.set(ConeProperty::GeneratorOfInterior); - ret.set(ConeProperty::Integral); - ret.set(ConeProperty::VirtualMultiplicity); - ret.set(ConeProperty::EuclideanIntegral); ret.set(ConeProperty::IsDeg1ExtremeRays); ret.set(ConeProperty::IsDeg1HilbertBasis); ret.set(ConeProperty::IsIntegrallyClosed); ret.set(ConeProperty::IsReesPrimary); + ret.set(ConeProperty::ReesPrimaryMultiplicity); ret.set(ConeProperty::IsGorenstein); ret.set(ConeProperty::InclusionExclusionData); - ret.set(ConeProperty::WeightedEhrhartSeries); - ret.set(ConeProperty::WeightedEhrhartQuasiPolynomial); ret.set(ConeProperty::Symmetrize); ret.set(ConeProperty::NoSymmetrization); ret.set(ConeProperty::ClassGroup); ret.set(ConeProperty::UnitGroupIndex); - ret.set(ConeProperty::UnimodularTriangulation); + // ret.set(ConeProperty::UnimodularTriangulation); return ret; } -ConeProperties all_full_cone_goals() { +ConeProperties all_full_cone_goals(bool renf) { static ConeProperties ret; ret.set(ConeProperty::ExtremeRays); ret.set(ConeProperty::SupportHyperplanes); @@ -223,7 +239,8 @@ ret.set(ConeProperty::TriangulationSize); ret.set(ConeProperty::ModuleRank); ret.set(ConeProperty::IsPointed); - ret.set(ConeProperty::IsIntegrallyClosed); + ret.set(ConeProperty::IsIntegrallyClosed); + ret.set(ConeProperty::IsEmptySemiOpen); ret.set(ConeProperty::Triangulation); ret.set(ConeProperty::StanleyDec); ret.set(ConeProperty::ConeDecomposition); @@ -234,7 +251,10 @@ ret.set(ConeProperty::ClassGroup); ret.set(ConeProperty::HSOP); ret.set(ConeProperty::Generators); + ret.set(ConeProperty::TriangulationGenerators); ret.set(ConeProperty::Grading); + if(renf) + ret.set(ConeProperty::Volume); return ret; } @@ -260,8 +280,8 @@ return ret; } -ConeProperties ConeProperties::full_cone_goals() const{ - return intersection_with(all_full_cone_goals()); +ConeProperties ConeProperties::full_cone_goals(bool renf) const{ + return intersection_with(all_full_cone_goals(renf)); } ConeProperties ConeProperties::goals_using_grading(bool inhomogeneous) const{ @@ -291,6 +311,15 @@ throw BadInputException("At least one of the listed computation goals not yet implemernted"); } + if(CPs.test(ConeProperty::TriangulationGenerators)) + CPs.set(ConeProperty::Triangulation); + + if(CPs.test(ConeProperty::CoveringFace)) + CPs.set(ConeProperty::IsEmptySemiOpen); + + if(CPs.test(ConeProperty::IsEmptySemiOpen)) + CPs.set(ConeProperty::SupportHyperplanes); + // unimodular triangulation ==> HilbertBasis if (CPs.test(ConeProperty::UnimodularTriangulation)) CPs.set(ConeProperty::HilbertBasis); @@ -384,6 +413,11 @@ if (!inhomogeneous) CPs.set(ConeProperty::Grading); } + + if (CPs.test(ConeProperty::ExtremeRaysFloat)) { + CPs.set(ConeProperty::SupportHyperplanes); + CPs.set(ConeProperty::ExtremeRays); + } // SuppHypsFloat ==> SupportHyperplanes if (CPs.test(ConeProperty::SuppHypsFloat)) { @@ -501,6 +535,12 @@ CPs.set(ConeProperty::StanleyDec); } + // This implication is meant for more stability in interactive use. + // Does not write tri ile by itself. + if(CPs.test(ConeProperty::StanleyDec)) + CPs.set(ConeProperty::Triangulation); + + // Volume + Integral ==> NoGradingDenom if (CPs.test(ConeProperty::Volume) || CPs.test(ConeProperty::Integral)) { CPs.set(ConeProperty::NoGradingDenom); @@ -590,6 +630,7 @@ copy.reset(ConeProperty::ConeDecomposition); copy.reset(ConeProperty::DefaultMode); copy.reset(ConeProperty::Generators); + copy.reset(ConeProperty::TriangulationGenerators); copy.reset(ConeProperty::Sublattice); copy.reset(ConeProperty::MaximalSubspace); copy.reset(ConeProperty::Equations); @@ -598,6 +639,7 @@ copy.reset(ConeProperty::EmbeddingDim); copy.reset(ConeProperty::IsPointed); copy.reset(ConeProperty::IsInhomogeneous); + copy.reset(ConeProperty::IsEmptySemiOpen); copy.reset(ConeProperty::AffineDim); copy.reset(ConeProperty::ModuleGenerators); copy.reset(ConeProperty::Deg1Elements); @@ -617,9 +659,13 @@ copy.reset(ConeProperty::GradingIsPositive); copy.reset(ConeProperty::VerticesFloat); copy.reset(ConeProperty::SuppHypsFloat); + copy.reset(ConeProperty::ExtremeRaysFloat); copy.reset(ConeProperty::FaceLattice); copy.reset(ConeProperty::FVector); copy.reset(ConeProperty::Incidence); + copy.reset(ConeProperty::DualFaceLattice); + copy.reset(ConeProperty::DualFVector); + copy.reset(ConeProperty::DualIncidence); copy.reset(ConeProperty::AmbientAutomorphisms); copy.reset(ConeProperty::Automorphisms); copy.reset(ConeProperty::CombinatorialAutomorphisms); @@ -688,7 +734,17 @@ throw BadInputException("ConeDecomposition cannot be combined with refined triangulation"); if(nr_triangs > 1) - throw BadInputException("Only one type of triangulation allowed."); + throw BadInputException("Only one type of triangulation allowed."); + + + bool something_to_do_primal = CPs.test(ConeProperty::FaceLattice)|| CPs.test(ConeProperty::FVector) + || CPs.test(ConeProperty::Incidence); + + bool something_to_do_dual = CPs.test(ConeProperty::DualFaceLattice)|| CPs.test(ConeProperty::DualFVector) + || CPs.test(ConeProperty::DualIncidence); + + if(something_to_do_dual && something_to_do_primal) + throw BadInputException("Only one of primal or dual face lattice/f-vector/incidence allowed"); size_t automs = 0; if (CPs.test(ConeProperty::Automorphisms)) @@ -704,11 +760,15 @@ if (automs > 1) throw BadInputException("Only one type of automorphism group allowed."); - if(inhomogeneous && intersection_with(only_homogeneous_props()).any()) - throw BadInputException(" Onerof the goals not computable in the inhomogeneous case."); + if(inhomogeneous && intersection_with(only_homogeneous_props()).any()){ + errorOutput() << *this << endl; + throw BadInputException(" One of the goals in last line not computable in the inhomogeneous case."); + } - if(!inhomogeneous && intersection_with(only_inhomogeneous_props()).any()) + if(!inhomogeneous && intersection_with(only_inhomogeneous_props()).any()){ + errorOutput() << *this << endl; throw BadInputException(" One of the goals not computable in the homogeneous case."); + } } /* conversion */ @@ -717,11 +777,13 @@ vector initializeCPN() { vector CPN(ConeProperty::EnumSize); CPN.at(ConeProperty::Generators) = "Generators"; + CPN.at(ConeProperty::TriangulationGenerators) = "TriangulationGenerators"; CPN.at(ConeProperty::ExtremeRays) = "ExtremeRays"; CPN.at(ConeProperty::VerticesFloat) = "VerticesFloat"; CPN.at(ConeProperty::VerticesOfPolyhedron) = "VerticesOfPolyhedron"; CPN.at(ConeProperty::SupportHyperplanes) = "SupportHyperplanes"; CPN.at(ConeProperty::SuppHypsFloat) = "SuppHypsFloat"; + CPN.at(ConeProperty::ExtremeRaysFloat) = "ExtremeRaysFloat"; CPN.at(ConeProperty::TriangulationSize) = "TriangulationSize"; CPN.at(ConeProperty::TriangulationDetSum) = "TriangulationDetSum"; CPN.at(ConeProperty::Triangulation) = "Triangulation"; @@ -805,10 +867,13 @@ CPN.at(ConeProperty::EhrhartSeries) = "EhrhartSeries"; CPN.at(ConeProperty::EhrhartQuasiPolynomial) = "EhrhartQuasiPolynomial"; CPN.at(ConeProperty::IsGorenstein) = "IsGorenstein"; + CPN.at(ConeProperty::IsEmptySemiOpen) = "IsEmptySemiOpen"; CPN.at(ConeProperty::NoPeriodBound) = "NoPeriodBound"; CPN.at(ConeProperty::NoLLL) = "NoLLL"; CPN.at(ConeProperty::NoRelax) = "NoRelax"; CPN.at(ConeProperty::GeneratorOfInterior) = "GeneratorOfInterior"; + CPN.at(ConeProperty::AxesScaling) = "AxesScaling"; + CPN.at(ConeProperty::CoveringFace) = "CoveringFace"; CPN.at(ConeProperty::NakedDual) = "NakedDual"; CPN.at(ConeProperty::FullConeDynamic) = "FullConeDynamic"; CPN.at(ConeProperty::TestArithOverflowFullCone) = "TestArithOverflowFullCone"; @@ -828,11 +893,14 @@ CPN.at(ConeProperty::FaceLattice) = "FaceLattice"; CPN.at(ConeProperty::FVector) = "FVector"; CPN.at(ConeProperty::Incidence) = "Incidence"; + CPN.at(ConeProperty::DualFaceLattice) = "DualFaceLattice"; + CPN.at(ConeProperty::DualFVector) = "DualFVector"; + CPN.at(ConeProperty::DualIncidence) = "DualIncidence"; CPN.at(ConeProperty::Dynamic) = "Dynamic"; CPN.at(ConeProperty::Static) = "Static"; // detect changes in size of Enum, to remember to update CPN! - static_assert(ConeProperty::EnumSize == 112, "ConeProperties Enum size does not fit! Update cone_property.cpp!"); + static_assert(ConeProperty::EnumSize == 120, "ConeProperties Enum size does not fit! Update cone_property.cpp!"); // assert all fields contain an non-empty string for (size_t i = 0; i < ConeProperty::EnumSize; i++) { assert(CPN.at(i).size() > 0); diff -Nru normaliz-3.8.5+ds/source/libnormaliz/cone_property.h normaliz-3.8.9+ds/source/libnormaliz/cone_property.h --- normaliz-3.8.5+ds/source/libnormaliz/cone_property.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/cone_property.h 2020-09-25 14:54:40.000000000 +0000 @@ -64,7 +64,7 @@ enum Enum { // matrix valued START_ENUM_RANGE(FIRST_MATRIX), - Generators, + TriangulationGenerators, ExtremeRays, VerticesOfPolyhedron, SupportHyperplanes, @@ -81,6 +81,7 @@ END_ENUM_RANGE(LAST_MATRIX), START_ENUM_RANGE(FIRST_MATRIX_FLOAT), + ExtremeRaysFloat, SuppHypsFloat, VerticesFloat, END_ENUM_RANGE(LAST_MATRIX_FLOAT), @@ -91,6 +92,8 @@ Dehomogenization, WitnessNotIntegrallyClosed, GeneratorOfInterior, + CoveringFace, + AxesScaling, END_ENUM_RANGE(LAST_VECTOR), // integer valued @@ -145,6 +148,7 @@ IsReesPrimary, IsInhomogeneous, IsGorenstein, + IsEmptySemiOpen, // // checking properties of already computed data // (cannot be used as a computation goal) @@ -177,9 +181,14 @@ EhrhartQuasiPolynomial, WeightedEhrhartSeries, WeightedEhrhartQuasiPolynomial, + // FaceLattice, + DualFaceLattice, FVector, + DualFVector, Incidence, + DualIncidence, + // Sublattice, // ClassGroup, @@ -220,12 +229,14 @@ // Dynamic, Static, + END_ENUM_RANGE(LAST_PROPERTY), // // ONLY FOR INTERNAL CONTROL // - // ExplicitHilbertSeries, + END_ENUM_RANGE(FIRST_INTERNAL), NakedDual, FullConeDynamic, + Generators, // // ONLY FOR E§XTENDED TESTS // @@ -238,7 +249,7 @@ TestLinearAlgebraGMP, TestSimplexParallel, TestLibNormaliz, - END_ENUM_RANGE(LAST_PROPERTY), + END_ENUM_RANGE(LAST_INTERNAL), EnumSize // this has to be the last entry, to get the number of entries in the enum @@ -284,7 +295,7 @@ /* return the restriction of this to the goals / options */ ConeProperties goals() const; ConeProperties options() const; - ConeProperties full_cone_goals() const; + ConeProperties full_cone_goals(bool renf) const; ConeProperties goals_using_grading(bool inhomogeneous) const; /* the following methods are used internally */ @@ -312,10 +323,11 @@ ConeProperties all_options(); // returns cps with the options set ConeProperties all_goals(); // returns cps with the options set -ConeProperties all_full_cone_goals(); // returns the goals controlling compute_full_cone() +ConeProperties all_full_cone_goals(bool renf); // returns the goals controlling compute_full_cone() ConeProperties all_goals_using_grading(bool inhomogeneous); // returns the goals which depend on grading ConeProperties only_homogeneous_props(); ConeProperties only_inhomogeneous_props(); +ConeProperties treated_as_hom_props(); } // namespace libnormaliz diff -Nru normaliz-3.8.5+ds/source/libnormaliz/convert.h normaliz-3.8.9+ds/source/libnormaliz/convert.h --- normaliz-3.8.5+ds/source/libnormaliz/convert.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/convert.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,66 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -//--------------------------------------------------------------------------- - -#ifndef LIBNORMALIZ_CONVERT_H -#define LIBNORMALIZ_CONVERT_H - -//--------------------------------------------------------------------------- - -//#include -#include -#include - -namespace libnormaliz { - -// conversion for integers, throws ArithmeticException if conversion fails -template -inline void convert(ToType& ret, const FromType& val) { - if (!try_convert(ret, val)) { - throw ArithmeticException(val); - } -} - -// conversion of vectors -template -inline void convert(vector& ret_vect, const vector& from_vect) { - size_t s = from_vect.size(); - ret_vect.resize(s); - for (size_t i = 0; i < s; ++i) - convert(ret_vect[i], from_vect[i]); -} - -// general conversion with return, throws ArithmeticException if conversion fails -template -ToType convertTo(const FromType& val) { - ToType copy; - convert(copy, val); - return copy; -} - -} // namespace libnormaliz - -//--------------------------------------------------------------------------- -#endif -//--------------------------------------------------------------------------- diff -Nru normaliz-3.8.5+ds/source/libnormaliz/dynamic_bitset.cpp normaliz-3.8.9+ds/source/libnormaliz/dynamic_bitset.cpp --- normaliz-3.8.5+ds/source/libnormaliz/dynamic_bitset.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/dynamic_bitset.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,30 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#include "libnormaliz/dynamic_bitset.h" - -namespace libnormaliz { - -const size_t dynamic_bitset::npos; - -} diff -Nru normaliz-3.8.5+ds/source/libnormaliz/enumeration.cpp normaliz-3.8.9+ds/source/libnormaliz/enumeration.cpp --- normaliz-3.8.5+ds/source/libnormaliz/enumeration.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/enumeration.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(push, target(mic)) -#endif - -#include "libnormaliz/HilbertSeries.cpp" - -#ifdef NMZ_COCOA -#include "libnormaliz/nmz_integrate.h" - -namespace libnormaliz { -bool verbose_INT; -} - -#include "libnormaliz/nmz_polynomial.cpp" -#include "libnormaliz/nmz_integral.cpp" - -#endif // NMZ_COCOA - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(pop) -#endif diff -Nru normaliz-3.8.5+ds/source/libnormaliz/face_lattice.cpp normaliz-3.8.9+ds/source/libnormaliz/face_lattice.cpp --- normaliz-3.8.5+ds/source/libnormaliz/face_lattice.cpp 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/face_lattice.cpp 2020-08-29 07:43:26.000000000 +0000 @@ -0,0 +1,516 @@ +/* + * Normaliz + * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * As an exception, when this program is distributed through (i) the App Store + * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play + * by Google Inc., then that store may impose any digital rights management, + * device limits and/or redistribution restrictions that are required by its + * terms of service. + */ + +#include "libnormaliz/cone.h" +#include "libnormaliz/face_lattice.h" +#include "libnormaliz/vector_operations.h" + +namespace libnormaliz { + +using namespace std; + +template +FaceLattice::FaceLattice() { + +} + +// It is assumed that the matrices in the constructor are for the pointed quotient, +// even if the names of the parameters don't indicate that. + +template +FaceLattice::FaceLattice(const Matrix& SupportHyperplanes, const Matrix& VerticesOfPolyhedron, + const Matrix& ExtremeRaysRecCone, const bool cone_inhomogeneous){ + + inhomogeneous = cone_inhomogeneous; + + nr_supphyps = SupportHyperplanes.nr_of_rows(); + nr_extr_rec_cone = ExtremeRaysRecCone.nr_of_rows(); + nr_vert = VerticesOfPolyhedron.nr_of_rows(); + nr_gens = nr_extr_rec_cone + nr_vert; + + SuppHyps = SupportHyperplanes; + dim = SupportHyperplanes[0].size(); + + SuppHypInd.clear(); + SuppHypInd.resize(nr_supphyps); + + // order of the extreme rays: + // + // first the vertices of polyhedron (in the inhomogeneous case) + // then the extreme rays of the (recession) cone + // + + bool skip_remaining = false; + std::exception_ptr tmp_exception; + + int nr_simplial_facets = 0; + +#pragma omp parallel for + for (size_t i = 0; i < nr_supphyps; ++i) { + if (skip_remaining) + continue; + + int nr_gens_in_hyp = 0; + + SuppHypInd[i].resize(nr_gens); + + try { + INTERRUPT_COMPUTATION_BY_EXCEPTION + + if (inhomogeneous) { + for (size_t j = 0; j < nr_vert; ++j) { + if (v_scalar_product(SupportHyperplanes[i], VerticesOfPolyhedron[j]) == 0) { + nr_gens_in_hyp++; + SuppHypInd[i][j] = true; + } + } + } + + for (size_t j = 0; j < nr_extr_rec_cone; ++j) { + if (v_scalar_product(SupportHyperplanes[i], ExtremeRaysRecCone[j]) == 0) { + nr_gens_in_hyp++; + SuppHypInd[i][j + nr_vert] = true; + } + } + + if (nr_gens_in_hyp == (int)(dim - 1)) +//#pragma omp atomic + nr_simplial_facets++; + + } catch (const std::exception&) { + tmp_exception = std::current_exception(); + skip_remaining = true; +#pragma omp flush(skip_remaining) + } + } + if (!(tmp_exception == 0)) + std::rethrow_exception(tmp_exception); + + // if (verbose) + // verboseOutput() << "Simplicial facets " << nr_simplial_facets << " of " << nr_supphyps << endl; + +} + +struct FaceInfo { + // dynamic_bitset ExtremeRays; + dynamic_bitset HypsContaining; + int max_cutting_out; + bool max_subset; + // bool max_prec; + bool simple; +}; + +bool face_compare(const pair& a, const pair& b) { + return (a.first < b.first); +} + +template +void FaceLattice::compute(const long face_codim_bound, const bool verbose, bool change_integer_type) { + + bool bound_codim = false; + if (face_codim_bound >= 0) + bound_codim = true; + + dynamic_bitset SimpleVert(nr_gens); + size_t nr_simpl = 0; + for (size_t j = 0; j < nr_gens; ++j) { + size_t nr_cont = 0; + for (size_t i = 0; i < nr_supphyps; ++i) + if (SuppHypInd[i][j]) + nr_cont++; + if (nr_cont == dim - 1) { + SimpleVert[j] = 1; + nr_simpl++; + } + } + if (verbose) + verboseOutput() << "Cosimplicial gens " << nr_simpl << " of " << nr_gens << endl; + + bool use_simple_vert = (10 * nr_simpl > nr_gens); + + vector prel_f_vector(dim + 1, 0); + + dynamic_bitset the_cone(nr_gens); + the_cone.set(); + dynamic_bitset empty(nr_supphyps); + dynamic_bitset AllFacets(nr_supphyps); + AllFacets.set(); + + map > NewFaces; + map > WorkFaces; + + WorkFaces[empty] = make_pair(empty, AllFacets); // start with the full cone + dynamic_bitset ExtrRecCone(nr_gens); // in the inhomogeneous case + if (inhomogeneous) { // we exclude the faces of the recession cone + for (size_t j = 0; j < nr_extr_rec_cone; ++j) + ExtrRecCone[j + nr_vert] = 1; + ; + } + + Matrix SuppHyps_MI; + if(change_integer_type) + convert(SuppHyps_MI, SuppHyps); + + /*for(int i=0;i< 10000;++i){ // for pertubation of order of supphyps + int j=rand()%nr_supphyps; + int k=rand()%nr_supphyps; + swap(SuppHypInd[j],SuppHypInd[k]); + swap(EmbeddedSuppHyps[j],EmbeddedSuppHyps[k]); + if(change_integer_type) + swap(EmbeddedSuppHyps_MI[j],EmbeddedSuppHyps_MI[k]); + }*/ + + vector Unit_bitset(nr_supphyps); + for (size_t i = 0; i < nr_supphyps; ++i) { + Unit_bitset[i].resize(nr_supphyps); + Unit_bitset[i][i] = 1; + } + + long codimension_so_far = 0; // the lower bound for the codimension so far + + const long VERBOSE_STEPS = 50; + const size_t RepBound = 1000; + bool report_written = false; + + size_t total_inter = 0; + size_t avoided_inter = 0; + size_t total_new = 0; + size_t total_simple = 1; // the full cone is cosimplicial + size_t total_max_subset = 0; + + while (true) { + codimension_so_far++; // codimension of faces put into NewFaces + bool CCC = false; + if (codimension_so_far == 1) + CCC = true; + + if (bound_codim && codimension_so_far > face_codim_bound + 1) + break; + size_t nr_faces = WorkFaces.size(); + if (verbose) { + if (report_written) + verboseOutput() << endl; + verboseOutput() << "codim " << codimension_so_far - 1 << " faces to process " << nr_faces << endl; + report_written = false; + } + + long step_x_size = nr_faces - VERBOSE_STEPS; + + bool skip_remaining = false; + std::exception_ptr tmp_exception; + +#pragma omp parallel + { + size_t Fpos = 0; + auto F = WorkFaces.begin(); + list > FreeFaces, Faces; + pair fr; + fr.first.resize(nr_gens); + fr.second.HypsContaining.resize(nr_supphyps); + for (size_t i = 0; i < nr_supphyps; ++i) { + FreeFaces.push_back(fr); + } + +#pragma omp for schedule(dynamic) + for (size_t kkk = 0; kkk < nr_faces; ++kkk) { + if (skip_remaining) + continue; + + for (; kkk > Fpos; ++Fpos, ++F) + ; + for (; kkk < Fpos; --Fpos, --F) + ; + + if (verbose && nr_faces >= RepBound) { +#pragma omp critical(VERBOSE) + while ((long)(kkk * VERBOSE_STEPS) >= step_x_size) { + step_x_size += nr_faces; + verboseOutput() << "." << flush; + report_written = true; + } + } + + Faces.clear(); + + try { + INTERRUPT_COMPUTATION_BY_EXCEPTION + + dynamic_bitset beta_F = F->second.first; + + bool F_simple = ((long)F->first.count() == codimension_so_far - 1); + +#pragma omp atomic + prel_f_vector[codimension_so_far - 1]++; + + dynamic_bitset Gens = the_cone; // make indicator vector of *F + for (int i = 0; i < (int)nr_supphyps; ++i) { + if (F->second.first[nr_supphyps - 1 - i] == 0) // does not define F + continue; + // beta_F=i; + Gens = Gens & SuppHypInd[i]; + } + + dynamic_bitset MM_mother = F->second.second; + + // now we produce the intersections with facets + dynamic_bitset Intersect(nr_gens); + + int start; + if (CCC) + start = 0; + else { + start = F->second.first.find_first(); + start = nr_supphyps - start; + } + + for (size_t i = start; i < nr_supphyps; ++i) { + if (F->first[i] == 1) { // contains *F + continue; + } +#pragma omp atomic + total_inter++; + if (MM_mother[i] == 0) { // using restriction criteria of the paper +#pragma omp atomic + avoided_inter++; + continue; + } + Intersect = Gens & SuppHypInd[i]; + if (inhomogeneous && Intersect.is_subset_of(ExtrRecCone)) + continue; + + Faces.splice(Faces.end(), FreeFaces, FreeFaces.begin()); + Faces.back().first = Intersect; + Faces.back().second.max_cutting_out = i; + Faces.back().second.max_subset = true; + // Faces.back().second.HypsContaining.reset(); + // Faces.push_back(make_pair(Intersect,fr)); + } + + Faces.sort(face_compare); + for (auto Fac = Faces.begin(); Fac != Faces.end(); ++Fac) { + if (Fac != Faces.begin()) { + auto Gac = Fac; + --Gac; + if (Fac->first == Gac->first) { + Fac->second.max_subset = false; + Gac->second.max_subset = false; + } + } + } + + for (auto Fac = Faces.end(); Fac != Faces.begin();) { // first we check for inclusion + + --Fac; + + if (!Fac->second.max_subset) + continue; + + auto Gac = Fac; + Gac++; + for (; Gac != Faces.end(); Gac++) { + if (!Gac->second.max_subset) + continue; + if (Fac->first.is_subset_of(Gac->first)) { + Fac->second.max_subset = false; + break; + } + } + } + + dynamic_bitset MM_F(nr_supphyps); + + for (auto Fac = Faces.end(); Fac != Faces.begin();) { + --Fac; + + if (!Fac->second.max_subset) + continue; + +#pragma omp atomic + total_max_subset++; + + INTERRUPT_COMPUTATION_BY_EXCEPTION + + dynamic_bitset Containing = F->first; + Containing[Fac->second.max_cutting_out] = 1; + + bool simple = false; + if (F_simple && use_simple_vert) { + if ((Fac->first & SimpleVert).any()) { + simple = true; + } + } + + if (!simple) { + bool extra_hyp = false; + for (size_t j = 0; j < nr_supphyps; ++j) { // beta_F + if (Containing[j] == 0 && Fac->first.is_subset_of(SuppHypInd[j])) { + Containing[j] = 1; + extra_hyp = true; + } + } + simple = F_simple && !extra_hyp; + } + + int codim_of_face = 0; // to make gcc happy + if (simple) + codim_of_face = codimension_so_far; + else { + dynamic_bitset Containing(nr_supphyps); + for (size_t j = 0; j < nr_supphyps; ++j) { // beta_F + if (Containing[j] == 0 && Fac->first.is_subset_of(SuppHypInd[j])) { + Containing[j] = 1; + } + } + vector selection = bitset_to_bool(Containing); + if (change_integer_type) { + try { + codim_of_face = SuppHyps_MI.submatrix(selection).rank(); + } catch (const ArithmeticException& e) { + change_integer_type = false; + } + } + if (!change_integer_type) + codim_of_face = SuppHyps.submatrix(selection).rank(); + + if (codim_of_face > codimension_so_far) { + Fac->second.max_subset = false; + continue; + } + } + + MM_F[Fac->second.max_cutting_out] = 1; + Fac->second.simple = simple; + Fac->second.HypsContaining = Containing; + } + + for (auto Fac = Faces.end(); Fac != Faces.begin();) { // why backwards?? + + --Fac; + + if (!Fac->second.max_subset) + continue; + + bool simple = Fac->second.simple; + + beta_F[nr_supphyps - 1 - Fac->second.max_cutting_out] = + 1; // we must go to revlex, beta_F reconstituted below + +#pragma omp critical(INSERT_NEW) + { + total_new++; + + if (simple) { + NewFaces[Fac->second.HypsContaining] = make_pair(beta_F, MM_F); + total_simple++; + } + else { + auto G = NewFaces.find(Fac->second.HypsContaining); + if (G == NewFaces.end()) { + NewFaces[Fac->second.HypsContaining] = make_pair(beta_F, MM_F); + } + else { + if (G->second.first < beta_F) { // because of revlex < instead of > + G->second.first = beta_F; + G->second.second = MM_F; + } + } + } + } // critical + + beta_F[nr_supphyps - 1 - Fac->second.max_cutting_out] = 0; + } + } catch (const std::exception&) { + tmp_exception = std::current_exception(); + skip_remaining = true; +#pragma omp flush(skip_remaining) + } + + FreeFaces.splice(FreeFaces.end(), Faces); + } // omp for + } // parallel + if (!(tmp_exception == 0)) + std::rethrow_exception(tmp_exception); + + // if (ToCompute.test(ConeProperty::FaceLattice)) + for (auto H = WorkFaces.begin(); H != WorkFaces.end(); ++H) + FaceLat[H->first] = codimension_so_far - 1; + WorkFaces.clear(); + if (NewFaces.empty()) + break; + swap(WorkFaces, NewFaces); + } + + if (inhomogeneous && nr_vert != 1) { // we want the empty face in the face lattice + // (never the case in homogeneous computations) + dynamic_bitset NoGens(nr_gens); + size_t codim_max_subspace = SuppHyps.rank(); + FaceLat[AllFacets] = codim_max_subspace; + if (!(bound_codim && (int)codim_max_subspace > face_codim_bound)) + prel_f_vector[codim_max_subspace]++; + } + + size_t total_nr_faces = 0; + for (int i = prel_f_vector.size() - 1; i >= 0; --i) { + if (prel_f_vector[i] != 0) { + f_vector.push_back(prel_f_vector[i]); + total_nr_faces += prel_f_vector[i]; + } + } + + // cout << " Total " << FaceLattice.size() << endl; + + if (verbose) { + verboseOutput() << endl << "Total number of faces computed " << total_nr_faces << endl; + verboseOutput() << "f-vector " << f_vector; + } + + +} + +template +vector FaceLattice::getFVector(){ + return f_vector; +} + +template +void FaceLattice::get(map& FaceLatticeOutput){ + swap(FaceLat,FaceLatticeOutput); + +} + +template +void FaceLattice::get(vector& SuppHypIndOutput){ + swap(SuppHypInd,SuppHypIndOutput); +} + +#ifndef NMZ_MIC_OFFLOAD // offload with long is not supported +template class FaceLattice; +#endif +template class FaceLattice; +template class FaceLattice; + +#ifdef ENFNORMALIZ +template class FaceLattice; +#endif + +} // namespace libnormaliz diff -Nru normaliz-3.8.5+ds/source/libnormaliz/face_lattice.h normaliz-3.8.9+ds/source/libnormaliz/face_lattice.h --- normaliz-3.8.5+ds/source/libnormaliz/face_lattice.h 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/face_lattice.h 2020-08-29 07:43:26.000000000 +0000 @@ -0,0 +1,74 @@ +/* + * Normaliz + * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * As an exception, when this program is distributed through (i) the App Store + * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play + * by Google Inc., then that store may impose any digital rights management, + * device limits and/or redistribution restrictions that are required by its + * terms of service. + */ + +#ifndef LIBNORMALIZ_FAVE_LATTICE_H_ +#define LIBNORMALIZ_FAVE_LATTICE_H_ + +#include +#include +#include +#include + +#include +#include +#include "libnormaliz/dynamic_bitset.h" + +namespace libnormaliz { +using std::map; +using std::pair; +using std::vector; + +template +class FaceLattice { + + bool verbose; + bool inhomogeneous; + + size_t nr_supphyps; + size_t nr_extr_rec_cone; + size_t nr_vert; + size_t nr_gens; + + size_t dim; // we aqssume pointed! + + Matrix SuppHyps; // local storage for supporet hypeplanes + + map FaceLat; + vector SuppHypInd; + vector f_vector; + +public: + + FaceLattice(const Matrix& SupportHyperplanes, const Matrix& VerticesOfPolyhedron, + const Matrix& ExtremeRaysRecCone, const bool cone_inhomogeneous); + FaceLattice(); + void compute(const long face_codim_bound, const bool verbose, bool change_integer_type); + vector getFVector(); + void get(map& FaceLatticeOutput); + void get(vector& SuppHypIndOutput); + +}; + +} // namespace libnormaliz + +#endif /* LIBNORMALIZ_FAVE_LATTICE_H__ */ diff -Nru normaliz-3.8.5+ds/source/libnormaliz/full_cone.cpp normaliz-3.8.9+ds/source/libnormaliz/full_cone.cpp --- normaliz-3.8.5+ds/source/libnormaliz/full_cone.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/full_cone.cpp 2020-09-25 14:54:40.000000000 +0000 @@ -23,22 +23,22 @@ //--------------------------------------------------------------------------- -#include +#include #include #include #include #include #include -#include +#include #include -#include +#include #include "libnormaliz/cone.h" #include "libnormaliz/full_cone.h" #include "libnormaliz/project_and_lift.h" #include "libnormaliz/vector_operations.h" -#include "libnormaliz/list_operations.h" -#include "libnormaliz/map_operations.h" +#include "libnormaliz/list_and_map_operations.h" +// #include "libnormaliz/map_operations.h" #include "libnormaliz/integer.h" #include "libnormaliz/sublattice_representation.h" #include "libnormaliz/offload_handler.h" @@ -340,7 +340,7 @@ assert(dim == 0); if (verbose) { - verboseOutput() << "Zero cone detected!" << endl; + errorOutput() << "WARNING: Zero cone detected!" << endl; } // The basis change already is transforming to zero. @@ -1186,19 +1186,39 @@ } } // ranktest else { // now the comparison test + + // cout << "comp " << Facets_0_1_thread.size() << endl; /* #pragma omp atomic NrComp++; */ auto a = Facets_0_1_thread.begin(); + CommonGens = RelGen_PosHyp & NegHyp_Pointer->GenInHyp; - for (; a != Facets_0_1_thread.end(); ++a) { - if (CommonGens.is_subset_of(*a) && (*a != PosHyp_Pointer->GenInHyp) && - (*a != NegHyp_Pointer->GenInHyp)) { + /*for (; a != Facets_0_1_thread.end(); ++a) { + bool contains = true; + for(size_t i=0; i< nr_CommonGens; ++i){ + if(!(*a)[common_key[i]]){ + contains = false; + break; + } + } + if ((contains && *a != PosHyp_Pointer->GenInHyp) && (*a != NegHyp_Pointer->GenInHyp)) { common_subfacet = false; Facets_0_1_thread.splice(Facets_0_1_thread.begin(), Facets_0_1_thread, - a); // for the "darwinistic" mewthod + a); // for the "darwinistic" mewthod break; } + }*/ + + + for (; a != Facets_0_1_thread.end(); ++a) { + if (CommonGens.is_subset_of(*a) && (*a != PosHyp_Pointer->GenInHyp) && + (*a != NegHyp_Pointer->GenInHyp)) { + common_subfacet = false; + Facets_0_1_thread.splice(Facets_0_1_thread.begin(), Facets_0_1_thread, + a); // for the "darwinistic" mewthod + break; + } } } // else @@ -1912,7 +1932,7 @@ if (time_measured) { mpq_class large_factor_mpq(ticks_rank_per_row); mpz_class add = round(large_factor_mpq); - large_factor += convertTo(add); + large_factor += convertToLong(add); } large = (large_factor * Comparisons[Pyramid_key.size() - dim] > old_nr_supp_hyps); } @@ -3735,9 +3755,7 @@ Matrix Gred = NewCoordinates.to_sublattice(Gens); vector GradT = NewCoordinates.to_sublattice_dual(Grading); - Matrix GradMat(0, dim); - GradMat.append(GradT); - Cone ProjCone(Type::cone, Gred, Type::grading, GradMat); + Cone ProjCone(Type::cone, Gred, Type::grading, Matrix(GradT)); ConeProperties ForDeg1; ForDeg1.set(ConeProperty::Projection); ForDeg1.set(ConeProperty::NoLLL); @@ -4054,7 +4072,7 @@ template void Full_Cone::finish_Hilbert_series() { if (do_h_vector) { - Hilbert_Series.setShift(convertTo(shift)); + Hilbert_Series.setShift(convertToLong(shift)); Hilbert_Series.adjustShift(); // now the shift in the HilbertSeries may change and we would have to adjust // the shift, the grading and more in the Full_Cone to continue to add data! @@ -4477,12 +4495,28 @@ // if no bool is set it does support hyperplanes and extreme rays template void Full_Cone::compute() { + + InputGenerators = Generators; // purified input -- in case we get an exception + omp_start_level = omp_get_level(); + + /*cout << "==============" << endl; + Generators.pretty_print(cout); + cout << "==============" << endl;*/ if (dim == 0) { set_zero_cone(); + deactivate_completed_tasks(); + prepare_inclusion_exclusion(); return; } + + if(using_renf()){ + assert(Truncation.size() == 0 || Grading.size() == 0); + Norm = Truncation; + if (Grading.size() > 0) + Norm = Grading; + } set_implications(); start_message(); @@ -4502,12 +4536,14 @@ check_given_grading(); // look for a grading if it is needed - find_grading(); + if(!using_renf()) + find_grading(); + if (isComputed(ConeProperty::IsPointed) && !pointed) { end_message(); return; } - if (!isComputed(ConeProperty::Grading)) + if (!isComputed(ConeProperty::Grading) && !using_renf()) disable_grading_dep_comp(); // revlex_triangulation(); was here for test @@ -4518,15 +4554,25 @@ // primal_algorithm_initialize(); support_hyperplanes(); - compute_class_group(); + InputGenerators = Generators; // purified input + if(check_semiopen_empty) + prepare_inclusion_exclusion(); + if(!using_renf()) + compute_class_group(); compute_automorphisms(); deactivate_completed_tasks(); end_message(); return; } + + if (isComputed(ConeProperty::IsPointed) && !pointed) { + end_message(); + return; + } set_degrees(); sort_gens_by_degree(true); + InputGenerators = Generators; // purified input bool polyhedron_is_polytope = inhomogeneous; if (inhomogeneous) { @@ -4548,70 +4594,17 @@ primal_algorithm(); deactivate_completed_tasks(); - if (inhomogeneous && descent_level == 0) { + if (!using_renf() && inhomogeneous && descent_level == 0) { find_module_rank(); } - compute_class_group(); - compute_automorphisms(); - deactivate_completed_tasks(); - - end_message(); -} - -#ifdef ENFNORMALIZ -template <> -void Full_Cone::compute() { - if (dim == 0) { - set_zero_cone(); - return; - } - - assert(Truncation.size() == 0 || Grading.size() == 0); - - Norm = Truncation; - if (Grading.size() > 0) - Norm = Grading; - - set_implications(); - set_degrees(); - - start_message(); - - if (!do_Hilbert_basis && !do_h_vector && !do_multiplicity && !do_deg1_elements && !do_Stanley_dec && !keep_triangulation && - !do_determinants) - assert(Generators.max_rank_submatrix_lex().size() == dim); - - minimize_support_hyperplanes(); // if they are given - if (inhomogeneous) - set_levels(); - - check_given_grading(); - - // compute_by_automorphisms(); - - if (do_only_supp_hyps_and_aux) { - support_hyperplanes(); - compute_automorphisms(); - deactivate_completed_tasks(); - end_message(); - return; - } - - if (isComputed(ConeProperty::IsPointed) && !pointed) { - end_message(); - return; - } - - sort_gens_by_degree(true); - - primal_algorithm(); + if(!using_renf()) + compute_class_group(); compute_automorphisms(); deactivate_completed_tasks(); end_message(); } -#endif // compute the degree vector of a hsop template @@ -5429,13 +5422,13 @@ vector hv(1); typename list>::const_iterator hb = Polytope.Deg1_Elements.begin(); for (; hb != Polytope.Deg1_Elements.end(); ++hb) { - size_t deg = convertTo(v_scalar_product(Grading, *hb)); + size_t deg = convertToLong(v_scalar_product(Grading, *hb)); if (deg + 1 > hv.size()) hv.resize(deg + 1); hv[deg]++; } Hilbert_Series.add(hv, vector()); - Hilbert_Series.setShift(convertTo(shift)); + Hilbert_Series.setShift(convertToLong(shift)); Hilbert_Series.adjustShift(); Hilbert_Series.simplify(); setComputed(ConeProperty::HilbertSeries); @@ -5881,6 +5874,9 @@ // an alternative to compute() for the basic tasks that need no triangulation template void Full_Cone::dualize_cone(bool print_message) { + + InputGenerators = Generators; // purified input -- in case we get an exception + omp_start_level = omp_get_level(); if (dim == 0) { @@ -5899,6 +5895,8 @@ start_message(); sort_gens_by_degree(false); + + InputGenerators = Generators; // purified input if (!isComputed(ConeProperty::SupportHyperplanes)) build_top_cone(); @@ -6396,24 +6394,22 @@ if (ExcludedFaces.nr_of_rows() == 0) return; - do_excluded_faces = do_h_vector || do_Stanley_dec; - - if (verbose && !do_excluded_faces) { - errorOutput() << endl - << "WARNING: excluded faces, but no h-vector computation or Stanley decomposition" << endl - << "Therefore excluded faces will be ignored" << endl; - } - - if (isComputed(ConeProperty::ExcludedFaces) && (isComputed(ConeProperty::InclusionExclusionData) || !do_excluded_faces)) { + do_excluded_faces = do_h_vector || do_Stanley_dec || check_semiopen_empty; + + if ((isComputed(ConeProperty::ExcludedFaces) && isComputed(ConeProperty::InclusionExclusionData)) || !do_excluded_faces) { return; } + + if(verbose) + verboseOutput() << "Computing inclusion/excluseion data" << endl; // indicates which generators lie in the excluded faces vector GensInExcl(ExcludedFaces.nr_of_rows()); + + index_covering_face = ExcludedFaces.nr_of_rows(); // if not changed: not covered by an exc luded face - for (size_t j = 0; j < ExcludedFaces.nr_of_rows(); ++j) { // now we produce these indicators - bool first_neq_0 = true; // and check whether the linear forms in ExcludedFaces - bool non_zero = false; // have the cone on one side + for (size_t j = 0; j < ExcludedFaces.nr_of_rows(); ++j) { + bool empty_semiopen = true; GensInExcl[j].resize(nr_gen); for (size_t i = 0; i < nr_gen; ++i) { Integer test = v_scalar_product(ExcludedFaces[j], Generators[i]); @@ -6421,22 +6417,21 @@ GensInExcl[j].set(i); continue; } - non_zero = true; - if (first_neq_0) { - first_neq_0 = false; - if (test < 0) { - for (size_t k = 0; k < dim; ++k) // replace linear form by its negative - ExcludedFaces[j][k] *= -1; // to get cone in positive halfspace - test *= -1; // (only for error check) - } - } - if (test < 0) { - throw FatalException("Excluded hyperplane does not define a face."); - } + empty_semiopen = false; } - if (!non_zero) { // not impossible if the hyperplane contains the vector space spanned by the cone - throw FatalException("Excluded face contains the full cone."); + if (empty_semiopen) { // not impossible if the hyperplane contains the vector space spanned by the cone + if(!check_semiopen_empty || do_h_vector || do_Stanley_dec) + throw BadInputException("An Excluded face covers the polyhedron. Not allowed unless ONLY checking emptyness."); + empty_semiopen = true; + index_covering_face = j; + setComputed(ConeProperty::IsEmptySemiOpen); + setComputed(ConeProperty::ExcludedFaces); + return; } + } + + if(check_semiopen_empty){ + setComputed(ConeProperty::IsEmptySemiOpen); } vector essential(ExcludedFaces.nr_of_rows(), true); @@ -6627,6 +6622,8 @@ do_pointed = false; do_all_hyperplanes = true; do_supphyps_dynamic = false; + + check_semiopen_empty = false; do_bottom_dec = false; keep_order = false; @@ -6699,7 +6696,7 @@ } } Generators.remove_duplicate_and_zero_rows(); - + nr_gen = Generators.nr_of_rows(); if (nr_gen != static_cast(static_cast(nr_gen))) { @@ -6806,6 +6803,7 @@ dim = C.dim; Generators.swap(C.Generators); + InputGenerators = Generators; nr_gen = Generators.nr_of_rows(); if (Generators.nr_of_rows() > 0) setComputed(ConeProperty::Generators); @@ -7202,14 +7200,16 @@ template const Matrix& Full_Cone::getGenerators() const { - return Generators; + return InputGenerators; } //--------------------------------------------------------------------------- template vector Full_Cone::getExtremeRays() const { - return Extreme_Rays_Ind; + vector ext = Extreme_Rays_Ind; + ext.resize(InputGenerators.nr_of_rows()); + return ext; } //--------------------------------------------------------------------------- diff -Nru normaliz-3.8.5+ds/source/libnormaliz/full_cone.h normaliz-3.8.9+ds/source/libnormaliz/full_cone.h --- normaliz-3.8.5+ds/source/libnormaliz/full_cone.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/full_cone.h 2020-09-25 14:54:40.000000000 +0000 @@ -27,7 +27,7 @@ #include #include #include -#include +#include //#include #include "libnormaliz/general.h" @@ -120,6 +120,7 @@ bool exploit_automs_mult; bool exploit_automs_vectors; bool do_automorphisms; + bool check_semiopen_empty; bool do_hsop; bool do_extreme_rays; @@ -180,6 +181,7 @@ renf_elem_class renf_multiplicity; #endif Matrix Generators; + Matrix InputGenerators; // stores purified input -- Generators can be extended set> Generator_Set; // the generators as a set (if needed) Matrix Generators_float; // floatung point approximations to the generators vector PermGens; // stores the permutation of the generators created by sorting @@ -215,6 +217,8 @@ ClassGroup; // the class group as a vector: ClassGroup[0]=its rank, then the orders of the finite cyclic summands Matrix ProjToLevel0Quot; // projection matrix onto quotient modulo level 0 sublattice + + size_t index_covering_face; //used in checking emptyness of semiopen polyhedron // ************************** Data for convex hull computations **************************** vector HypCounter; // counters used to give unique number to hyperplane diff -Nru normaliz-3.8.5+ds/source/libnormaliz/general.cpp normaliz-3.8.9+ds/source/libnormaliz/general.cpp --- normaliz-3.8.5+ds/source/libnormaliz/general.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/general.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -22,7 +22,7 @@ */ #include -#include +#include #include "libnormaliz/general.h" @@ -43,6 +43,11 @@ size_t GMP_scal_prod = 0; size_t TotDet = 0; +bool int_max_value_dual_long_computed = false; +bool int_max_value_dual_long_long_computed = false; +bool int_max_value_primary_long_computed = false; +bool int_max_value_primary_long_long_computed = false; + #ifdef NMZ_EXTENDED_TESTS bool test_arith_overflow_full_cone = false; bool test_arith_overflow_dual_mode = false; diff -Nru normaliz-3.8.5+ds/source/libnormaliz/general.h normaliz-3.8.9+ds/source/libnormaliz/general.h --- normaliz-3.8.5+ds/source/libnormaliz/general.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/general.h 2020-07-21 15:37:45.000000000 +0000 @@ -25,10 +25,12 @@ #define LIBNORMALIZ_GENERAL_H_ #include -#include -#include +#include +#include #include +#include + #ifndef NMZ_MAKEFILE_CLASSIC #include #endif @@ -88,6 +90,11 @@ NORMALIZ_DLL_EXPORT extern size_t GMP_mat, GMP_hyp, GMP_scal_prod; NORMALIZ_DLL_EXPORT extern size_t TotDet; +NORMALIZ_DLL_EXPORT extern bool int_max_value_dual_long_computed; +NORMALIZ_DLL_EXPORT extern bool int_max_value_dual_long_long_computed; +NORMALIZ_DLL_EXPORT extern bool int_max_value_primary_long_computed; +NORMALIZ_DLL_EXPORT extern bool int_max_value_primary_long_long_computed; + #ifdef NMZ_EXTENDED_TESTS NORMALIZ_DLL_EXPORT extern bool test_arith_overflow_full_cone, test_arith_overflow_dual_mode; NORMALIZ_DLL_EXPORT extern bool test_arith_overflow_descent, test_arith_overflow_proj_and_lift; @@ -131,8 +138,8 @@ } /* end namespace libnormaliz */ -#include #include +#include #include #include diff -Nru normaliz-3.8.5+ds/source/libnormaliz/HilbertSeries.cpp normaliz-3.8.9+ds/source/libnormaliz/HilbertSeries.cpp --- normaliz-3.8.5+ds/source/libnormaliz/HilbertSeries.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/HilbertSeries.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -34,9 +34,9 @@ #include "libnormaliz/general.h" #include "libnormaliz/HilbertSeries.h" #include "libnormaliz/vector_operations.h" -#include "libnormaliz/map_operations.h" +#include "libnormaliz/list_and_map_operations.h" #include "libnormaliz/integer.h" -#include "libnormaliz/convert.h" +// #include "libnormaliz/convert.h" #include "libnormaliz/matrix.h" @@ -628,6 +628,7 @@ if (verbose) { errorOutput() << "WARNING: We skip the computation of the Hilbert-quasi-polynomial because the period " << period << " is too big!" << endl; + errorOutput() << "Rerun with NO_PERIOD_BOUND" << endl; } return; } diff -Nru normaliz-3.8.5+ds/source/libnormaliz/input.cpp normaliz-3.8.9+ds/source/libnormaliz/input.cpp --- normaliz-3.8.5+ds/source/libnormaliz/input.cpp 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/input.cpp 2020-09-25 14:54:40.000000000 +0000 @@ -0,0 +1,1015 @@ +/* + * Normaliz + * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * As an exception, when this program is distributed through (i) the App Store + * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play + * by Google Inc., then that store may impose any digital rights management, + * device limits and/or redistribution restrictions that are required by its + * terms of service. + */ + +#include +#include // std::isdigit +#include // numeric_limits + +#include "libnormaliz/input.h" + +namespace libnormaliz { + +// eats up a comment, stream must start with "/*", eats everything until "*/" +void skip_comment(istream& in) { + int i = in.get(); + int j = in.get(); + if (i != '/' || j != '*') { + throw BadInputException("Bad comment start!"); + } + while (in.good()) { + in.ignore(numeric_limits::max(), '*'); // ignore everything until next '*' + i = in.get(); + if (in.good() && i == '/') + return; // successfully skipped comment + } + throw BadInputException("Incomplete comment!"); +} + +template +void save_matrix(map > >& input_map, + InputType input_type, + const vector >& M) { + // check if this type already exists + if (contains(input_map, input_type)) { + /*throw BadInputException("Multiple inputs of type \"" + type_string + + "\" are not allowed!");*/ + input_map[input_type].insert(input_map[input_type].end(), M.begin(), M.end()); + return; + } + input_map[input_type] = M; +} + +template +void save_empty_matrix(map > >& input_map, InputType input_type) { + vector > M; + save_matrix(input_map, input_type, M); +} + +template +vector > transpose_mat(const vector >& mat) { + if (mat.size() == 0 || mat[0].size() == 0) + return vector >(0); + size_t m = mat[0].size(); + size_t n = mat.size(); + vector > transpose(m, vector(n, 0)); + for (size_t i = 0; i < m; ++i) + for (size_t j = 0; j < n; ++j) + transpose[i][j] = mat[j][i]; + return transpose; +} + +template +void append_row(const vector row, map > >& input_map, Type::InputType input_type) { + vector > one_row(1, row); + save_matrix(input_map, input_type, one_row); +} + +template +void process_constraint(const string& rel, + const vector& left, + Number right, + const Number modulus, + map > >& input_map, + bool forced_hom) { + vector row = left; + bool inhomogeneous = false; + if (right != 0 || rel == "<" || rel == ">") + inhomogeneous = true; + string modified_rel = rel; + bool strict_inequality = false; + if (rel == "<") { + strict_inequality = true; + right -= 1; + modified_rel = "<="; + } + if (rel == ">") { + strict_inequality = true; + right += 1; + modified_rel = ">="; + } + if (strict_inequality && forced_hom) { + throw BadInputException("Strict inequality not allowed in hom_constraints!"); + } + if (inhomogeneous || forced_hom) + row.push_back(-right); // rhs --> lhs + if (modified_rel == "<=") { // convert <= to >= + for (size_t j = 0; j < row.size(); ++j) + row[j] = -row[j]; + modified_rel = ">="; + } + if (rel == "~") + row.push_back(modulus); + + if (inhomogeneous && !forced_hom) { + if (modified_rel == "=") { + append_row(row, input_map, Type::inhom_equations); + return; + } + if (modified_rel == ">=") { + append_row(row, input_map, Type::inhom_inequalities); + return; + } + if (modified_rel == "~") { + append_row(row, input_map, Type::inhom_congruences); + return; + } + } + else { + if (modified_rel == "=") { + append_row(row, input_map, Type::equations); + return; + } + if (modified_rel == ">=") { + append_row(row, input_map, Type::inequalities); + return; + } + if (modified_rel == "~") { + append_row(row, input_map, Type::congruences); + return; + } + } + throw BadInputException("Illegal constrint type " + rel + " !"); +} + +template +bool read_modulus(istream& in, Number& modulus) { + in >> std::ws; // gobble any leading white space + char dummy; + in >> dummy; + if (dummy != '(') + return false; + in >> modulus; + if (in.fail() || modulus == 0) + return false; + in >> std::ws; // gobble any white space before closing + in >> dummy; + if (dummy != ')') + return false; + return true; +} + +template +void read_symbolic_constraint(istream& in, string& rel, vector& left, Number& right, Number& modulus, bool forced_hom) { + string constraint; + + while (in.good()) { + char c; + c = in.get(); + if (in.fail()) + throw BadInputException("Symbolic constraint does not end with semicolon"); + if (c == ';') + break; + constraint += c; + } + + // remove white space + // we must take care that the removal of white space does not + // shadow syntax errors + string without_spaces; + bool digit_then_spaces = false; + bool has_content = false; + for (size_t j = 0; j < constraint.size(); ++j) { + char test = constraint[j]; + if (!isspace(test)) + has_content = true; + if (isspace(test)) + continue; + if (test == '.') { + if (j == constraint.size() - 1 || isspace(constraint[j + 1])) + throw BadInputException("Incomplete number"); + } + if (test == 'e') { + if (j == constraint.size() - 1 || isspace(constraint[j + 1])) + throw BadInputException("Incomplete number"); + if (j <= constraint.size() - 3 && (constraint[j + 1] == '+' || constraint[j + 1] == '-') && + isspace(constraint[j + 2])) + throw BadInputException("Incomplete number"); + } + if (!isdigit(test)) + digit_then_spaces = false; + else { + if (digit_then_spaces) + throw BadInputException("Incomplete number"); + // cout << "jjjj " << j << " |" << constraint[j+1] << "|" << endl; + if (j < constraint.size() - 1 && isspace(constraint[j + 1])) { + digit_then_spaces = true; + // cout << "Drin" << endl; + } + } + without_spaces += test; + } + if (!has_content) + throw BadInputException("Empty symbolic constraint"); + + // split into terms + // we separate by + and - + // except: first on lhs or rhs, between ( and ) and following e. + bool first_sign = true; + bool in_brackets = false; + bool relation_read = false; + size_t RHS_start = 0; + vector terms; + string current_term; + for (size_t j = 0; j < without_spaces.size(); ++j) { + char test = without_spaces[j]; + if (test == '(') + in_brackets = true; + if (test == ')') { + if (!in_brackets) + throw BadInputException("Closing bracket without opening bracket"); + in_brackets = false; + } + if (test == '+' || test == '-') { + if (!first_sign && !in_brackets) { + terms.push_back(current_term); + current_term.clear(); + } + } + first_sign = false; + + if (test == 'e') { + current_term += test; + if (j == without_spaces.size() - 1) + throw BadInputException("Incomplete number"); + if (without_spaces[j + 1] == '+' || without_spaces[j + 1] == '-') { + current_term += without_spaces[j + 1]; + j++; + } + continue; + } + + if (test == '=' || test == '<' || test == '>' || test == '~') { + terms.push_back(current_term); + current_term.clear(); + rel += test; + RHS_start = terms.size(); + if (relation_read) + throw BadInputException("Double relation in constraint"); + relation_read = true; + if (j == without_spaces.size() - 1) + throw BadInputException("Relation last character in constraint"); + if (without_spaces[j + 1] == '=') { + rel += without_spaces[j + 1]; + j++; + } + first_sign = true; + continue; + } + + current_term += test; + } + terms.push_back(current_term); + if (!relation_read) + throw BadInputException("No relation in constraint"); + + // for(size_t i=0;i (long)left.size()) + throw BadInputException("Index " + expo_string + " in symbolic constraint out of bounds"); + index--; + left[index] += side * sign * coeff; + } + else { // absolute term + right -= side * sign * coeff; + } + } + + // cout << "constraint " << left << rel << " " << right << endl; +} + +template +void read_constraints(istream& in, long dim, map > >& input_map, bool forced_hom) { + long nr_constraints; + in >> nr_constraints; + + if (in.fail() || nr_constraints < 0) { + throw BadInputException("Cannot read " + toString(nr_constraints) + " constraints!"); + } + + if (nr_constraints == 0) + return; + + bool symbolic = false; + + in >> std::ws; + int c = in.peek(); + if (c == 's') { + string dummy; + in >> dummy; + if (dummy != "symbolic") + throw BadInputException("Illegal keyword " + dummy + " in input!"); + symbolic = true; + } + + long hom_correction = 0; + if (forced_hom) + hom_correction = 1; + + for (long i = 0; i < nr_constraints; ++i) { + vector left(dim - hom_correction); + string rel, modulus_str; + Number right, modulus = 0; + + if (symbolic) { + read_symbolic_constraint(in, rel, left, right, modulus, forced_hom); + } + else { // ordinary constraint read here + for (long j = 0; j < dim - hom_correction; ++j) { + read_number(in, left[j]); + } + in >> rel; + read_number(in, right); + if (rel == "~") { + if (!read_modulus(in, modulus)) + // throw BadInputException("Congruence not allowed with field coefficients!"); + throw BadInputException("Error while reading modulus!"); + } + if (in.fail()) { + throw BadInputException("Error while reading constraint!"); + } + } + process_constraint(rel, left, right, modulus, input_map, forced_hom); + } +} + +template +bool read_sparse_vector(istream& in, vector& input_vec, long length) { + input_vec = vector(length, 0); + char dummy; + + while (in.good()) { + in >> std::ws; + int c = in.peek(); + if (c == ';') { + in >> dummy; // swallow ; + return true; + } + long pos; + in >> pos; + if (in.fail()) + return false; + pos--; + if (pos < 0 || pos >= length) + return false; + in >> std::ws; + c = in.peek(); + if (c != ':') + return false; + in >> dummy; // skip : + Number value; + read_number(in, value); + if (in.fail()) + return false; + input_vec[pos] = value; + } + + return false; +} + +template +bool read_formatted_vector(istream& in, vector& input_vec) { + input_vec.clear(); + in >> std::ws; + char dummy; + in >> dummy; // read first proper character + if (dummy != '[') + return false; + bool one_more_entry_required = false; + while (in.good()) { + in >> std::ws; + if (!one_more_entry_required && in.peek() == ']') { + in >> dummy; + return true; + } + Number number; + read_number(in, number); + if (in.fail()) + return false; + input_vec.push_back(number); + in >> std::ws; + one_more_entry_required = false; + if (in.peek() == ',' || in.peek() == ';') { // skip potential separator + in >> dummy; + one_more_entry_required = true; + } + } + return false; +} + +void read_polynomial(istream& in, string& polynomial) { + char c; + while (in.good()) { + in >> c; + if (in.fail()) + throw BadInputException("Error while reading polynomial!"); + if (c == ';') { + if (polynomial.size() == 0) + throw BadInputException("Error while reading polynomial!"); + return; + } + polynomial += c; + } +} + +template +bool read_formatted_matrix(istream& in, vector >& input_mat, bool transpose) { + input_mat.clear(); + in >> std::ws; + char dummy; + in >> dummy; // read first proper character + if (dummy != '[') + return false; + bool one_more_entry_required = false; + while (in.good()) { + in >> std::ws; + if (!one_more_entry_required && in.peek() == ']') { // closing ] found + in >> dummy; + if (transpose) + input_mat = transpose_mat(input_mat); + return true; + } + vector input_vec; + if (!read_formatted_vector(in, input_vec)) { + throw BadInputException("Error in reading input vector!"); + } + if (input_mat.size() > 0 && input_vec.size() != input_mat[0].size()) { + throw BadInputException("Rows of input matrix have unequal lengths!"); + } + input_mat.push_back(input_vec); + in >> std::ws; + one_more_entry_required = false; + if (in.peek() == ',' || in.peek() == ';') { // skip potential separator + in >> dummy; + one_more_entry_required = true; + } + } + + return false; +} + +template +void read_number_field(istream& in, renf_class& number_field) { + throw NumberFieldInputException(); +} + +#ifdef ENFNORMALIZ +template <> +void read_number_field(istream& in, renf_class& renf) { + char c; + string s; + in >> s; + if (s != "min_poly" && s != "minpoly") + throw BadInputException("Error in reading number field: expected keyword min_poly or minpoly"); + in >> ws; + c = in.peek(); + if (c != '(') + throw BadInputException("Error in reading number field: min_poly does not start with ("); + in >> c; + + string mp_string; + while (in.good()) { + c = in.peek(); + if (c == ')') { + in.get(c); + break; + } + in.get(c); + if (in.fail()) + throw BadInputException("Error in reading number field: min_poly not terminated by )"); + mp_string += c; + } + // omp_set_num_threads(1); + + string indet; + + for(auto& g:mp_string){ + if(isalpha(g)){ + indet = g; + break; + } + } + + if(indet == "e" || indet == "x") + throw BadInputException("Letters e and x not allowed for field generator"); + + in >> s; + if (s != "embedding") + throw BadInputException("Error in reading number field: expected keyword embedding"); + in >> ws; + string emb_string; + c = in.peek(); + if (c == '[') { + in >> c; + while (in.good()) { + in >> c; + if (c == ']') + break; + emb_string += c; + } + } + else + throw BadInputException("Error in reading number field: definition of embedding does not start with ["); + + if (c != ']') + throw BadInputException("Error in reading number field: definition of embedding does not end with ]"); + + if (in.fail()) + throw BadInputException("Could not read number field!"); + + renf = renf_class(mp_string, indet, emb_string); + renf.gen_name = indet; // temporary fix for bug in renfxx.h + + renf.set_istream(in); +} +#endif + +void read_num_param(istream& in, map& num_param_input, NumParam::Param numpar, const string& type_string) { + long value; + in >> value; + if (in.fail()) + throw BadInputException("Error in reading " + type_string); + num_param_input[numpar] = value; +} + +template +map > > readNormalizInput(istream& in, + OptionsHandler& options, + map& num_param_input, + string& polynomial, + renf_class& number_field) { + string type_string; + long i, j; + long nr_rows, nr_columns, nr_rows_or_columns; + InputType input_type; + Number number; + ConeProperty::Enum cp; + NumParam::Param numpar; + set num_par_already_set; + bool we_have_a_polynomial = false; + + map > > input_map; + + in >> std::ws; // eat up any leading white spaces + int c = in.peek(); + if (c == EOF) { + throw BadInputException("Empty input file!"); + } + bool new_input_syntax = !std::isdigit(c); + + if (new_input_syntax) { + long dim; + while (in.peek() == '/') { + skip_comment(in); + in >> std::ws; + } + in >> type_string; + if (!in.good() || type_string != "amb_space") { + throw BadInputException("First entry must be \"amb_space\"!"); + } + bool dim_known = false; + in >> std::ws; + c = in.peek(); + if (c == 'a') { + string dummy; + in >> dummy; + if (dummy != "auto") { + throw BadInputException("Bad amb_space value!"); + } + } + else { + in >> dim; + if (!in.good() || dim < 0) { + throw BadInputException("Bad amb_space value!"); + } + dim_known = true; + } + while (in.good()) { // main loop + + bool transpose = false; + in >> std::ws; // eat up any leading white spaces + c = in.peek(); + if (c == EOF) + break; + if (c == '/') { + skip_comment(in); + } + else { + in >> type_string; + if (in.fail()) { + throw BadInputException("Could not read type string!"); + } + if (std::isdigit(c)) { + throw BadInputException("Unexpected number " + type_string + " when expecting a type!"); + } + if (isConeProperty(cp, type_string)) { + options.activateInputFileConeProperty(cp); + continue; + } + if (isNumParam(numpar, type_string)) { + auto ns = num_par_already_set.find(numpar); + if (ns != num_par_already_set.end()) + throw BadInputException("Numerical parameter " + type_string + " set twice"); + read_num_param(in, num_param_input, numpar, type_string); + num_par_already_set.insert(numpar); + continue; + } + if (type_string == "LongLong") { + options.activateInputFileLongLong(); + continue; + } + if (type_string == "NoExtRaysOutput") { + options.activateNoExtRaysOutput(); + continue; + } + if (type_string == "NoMatricesOutput") { + options.activateNoMatricesOutput(); + continue; + } + if (type_string == "NoSuppHypsOutput") { + options.activateNoSuppHypsOutput(); + continue; + } + if (type_string == "number_field") { +#ifndef ENFNORMALIZ + throw BadInputException("number_field only allowed for Normaliz with e-antic"); +#else + read_number_field(in, number_field); +#endif + continue; + } + if (type_string == "total_degree") { + if (!dim_known) { + throw BadInputException("Ambient space must be known for " + type_string + "!"); + } + input_type = Type::grading; + save_matrix(input_map, input_type, + vector >(1, vector(dim + type_nr_columns_correction(input_type), 1))); + continue; + } + if (type_string == "nonnegative") { + if (!dim_known) { + throw BadInputException("Ambient space must be known for " + type_string + "!"); + } + input_type = Type::signs; + save_matrix(input_map, input_type, + vector >(1, vector(dim + type_nr_columns_correction(input_type), 1))); + continue; + } + if (type_string == "constraints") { + if (!dim_known) { + throw BadInputException("Ambient space must be known for " + type_string + "!"); + } + read_constraints(in, dim, input_map, false); + continue; + } + if (type_string == "hom_constraints") { + if (!dim_known) { + throw BadInputException("Ambient space must be known for " + type_string + "!"); + } + read_constraints(in, dim, input_map, true); + continue; + } + if (type_string == "polynomial") { + if (we_have_a_polynomial) + throw BadInputException("Only one polynomial allowed"); + read_polynomial(in, polynomial); + we_have_a_polynomial = true; + continue; + } + + input_type = to_type(type_string); + if (dim_known) + nr_columns = dim + type_nr_columns_correction(input_type); + + if (type_is_vector(input_type)) { + nr_rows_or_columns = nr_rows = 1; + in >> std::ws; // eat up any leading white spaces + c = in.peek(); + if (c == 'u') { // must be unit vector + string vec_kind; + in >> vec_kind; + if (vec_kind != "unit_vector") { + throw BadInputException("Error while reading " + type_string + ": unit_vector expected!"); + } + + long pos = 0; + in >> pos; + if (in.fail()) { + throw BadInputException("Error while reading " + type_string + " as a unit_vector!"); + } + + if (!dim_known) { + throw BadInputException("Ambient space must be known for unit vector " + type_string + "!"); + } + + vector > e_i = vector >(1, vector(nr_columns, 0)); + if (pos < 1 || pos > static_cast(e_i[0].size())) { + throw BadInputException("Error while reading " + type_string + " as a unit_vector " + toString(pos) + + "!"); + } + pos--; // in input file counting starts from 1 + e_i[0].at(pos) = 1; + save_matrix(input_map, input_type, e_i); + continue; + } // end unit vector + + if (c == 's') { // must be "sparse" + string vec_kind; + in >> vec_kind; + if (vec_kind != "sparse") { + throw BadInputException("Error while reading " + type_string + ": sparse vector expected!"); + } + + if (!dim_known) { + throw BadInputException("Ambient space must be known for sparse vector " + type_string + "!"); + } + + vector sparse_vec; + nr_columns = dim + type_nr_columns_correction(input_type); + bool success = read_sparse_vector(in, sparse_vec, nr_columns); + if (!success) { + throw BadInputException("Error while reading " + type_string + " as a sparse vector!"); + } + save_matrix(input_map, input_type, vector >(1, sparse_vec)); + continue; + } + + if (c == '[') { // must be formatted vector + vector formatted_vec; + bool success = read_formatted_vector(in, formatted_vec); + if (!dim_known) { + dim = formatted_vec.size() - type_nr_columns_correction(input_type); + dim_known = true; + nr_columns = dim + type_nr_columns_correction(input_type); + } + if (!success || (long)formatted_vec.size() != nr_columns) { + throw BadInputException("Error while reading " + type_string + " as a formatted vector!"); + } + save_matrix(input_map, input_type, vector >(1, formatted_vec)); + continue; + } // end formatted vector + } + else { // end vector, it is a matrix. Plain vector read as a one row matrix later on + in >> std::ws; + c = in.peek(); + + if (c != '[' && !std::isdigit(c)) { // must be transpose + string transpose_str; + in >> transpose_str; + if (transpose_str != "transpose") { + throw BadInputException("Illegal keyword " + transpose_str + " following matrix type!"); + } + transpose = true; + in >> std::ws; + c = in.peek(); + } + if (c == '[') { // it is a formatted matrix + vector > formatted_mat; + bool success = read_formatted_matrix(in, formatted_mat, transpose); + if (!success) { + throw BadInputException("Error while reading formatted matrix " + type_string + "!"); + } + if (formatted_mat.size() == 0) { // empty matrix + input_type = to_type(type_string); + save_empty_matrix(input_map, input_type); + continue; + } + if (!dim_known) { + dim = formatted_mat[0].size() - type_nr_columns_correction(input_type); + dim_known = true; + nr_columns = dim + type_nr_columns_correction(input_type); + } + + if ((long)formatted_mat[0].size() != nr_columns) { + throw BadInputException("Error while reading formatted matrix " + type_string + "!"); + } + + save_matrix(input_map, input_type, formatted_mat); + continue; + } // only plain matrix left + + in >> nr_rows_or_columns; // is number of columns if transposed + nr_rows = nr_rows_or_columns; // most of the time + } + + if (!dim_known) { + throw BadInputException("Ambient space must be known for plain matrix or vector " + type_string + "!"); + } + + if (transpose) + swap(nr_rows, nr_columns); + + if (in.fail() || nr_rows_or_columns < 0) { + throw BadInputException("Error while reading " + type_string + " (a " + toString(nr_rows) + "x" + + toString(nr_columns) + " matrix) !"); + } + if (nr_rows == 0) { + input_type = to_type(type_string); + save_empty_matrix(input_map, input_type); + continue; + } + + vector > M(nr_rows); + in >> std::ws; + c = in.peek(); + if (c == 's') { // must be sparse + string sparse_test; + in >> sparse_test; + if (sparse_test != "sparse") { + throw BadInputException("Error while reading " + type_string + ": sparse matrix expected!"); + } + for (long i = 0; i < nr_rows; ++i) { + bool success = read_sparse_vector(in, M[i], nr_columns); + if (!success) { + throw BadInputException("Error while reading " + type_string + ": corrupted sparse matrix"); + } + } + } + else { // dense matrix + for (i = 0; i < nr_rows; i++) { + M[i].resize(nr_columns); + for (j = 0; j < nr_columns; j++) { + read_number(in, M[i][j]); + // cout << M[i][j] << endl; + } + } + } + if (transpose) + M = transpose_mat(M); + save_matrix(input_map, input_type, M); + } + if (in.fail()) { + throw BadInputException("Error while reading " + type_string + " (a " + toString(nr_rows) + "x" + + toString(nr_columns) + " matrix) !"); + } + } + } + else { + // old input syntax + while (in.good()) { + in >> nr_rows; + if (in.fail()) + break; + in >> nr_columns; + if ((nr_rows < 0) || (nr_columns < 0)) { + throw BadInputException("Error while reading a " + toString(nr_rows) + "x" + toString(nr_columns) + " matrix !"); + } + vector > M(nr_rows, vector(nr_columns)); + for (i = 0; i < nr_rows; i++) { + for (j = 0; j < nr_columns; j++) { + read_number(in, M[i][j]); + } + } + + in >> type_string; + + if (in.fail()) { + throw BadInputException("Error while reading a " + toString(nr_rows) + "x" + toString(nr_columns) + " matrix!"); + } + + input_type = to_type(type_string); + + // check if this type already exists + save_matrix(input_map, input_type, M); + } + } + return input_map; +} + +template map > > readNormalizInput(istream& in, + OptionsHandler& options, + map& num_param_input, + string& polynomial, + renf_class& number_field); + +#ifdef ENFNORMALIZ +template map > > readNormalizInput(istream& in, + OptionsHandler& options, + map& num_param_input, + string& polynomial, + renf_class& number_field); +#endif + +} // namespace diff -Nru normaliz-3.8.5+ds/source/libnormaliz/input.h normaliz-3.8.9+ds/source/libnormaliz/input.h --- normaliz-3.8.5+ds/source/libnormaliz/input.h 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/input.h 2020-07-21 15:37:45.000000000 +0000 @@ -0,0 +1,47 @@ +/* + * Normaliz + * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * As an exception, when this program is distributed through (i) the App Store + * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play + * by Google Inc., then that store may impose any digital rights management, + * device limits and/or redistribution restrictions that are required by its + * terms of service. + */ + +#include +#include // std::isdigit +#include // numeric_limits + +#include "libnormaliz/options.h" +#include "libnormaliz/input_type.h" +#include "libnormaliz/list_and_map_operations.h" +#include "libnormaliz/cone_property.h" + +#ifndef NORMALIZ_INPUT_H +#define NORMALIZ_INPUT_H + + +namespace libnormaliz { + +template +map > > readNormalizInput(istream& in, + OptionsHandler& options, + map& num_param_input, + string& polynomial, + renf_class& number_field); +} // namespace + +#endif diff -Nru normaliz-3.8.5+ds/source/libnormaliz/input_type.cpp normaliz-3.8.9+ds/source/libnormaliz/input_type.cpp --- normaliz-3.8.5+ds/source/libnormaliz/input_type.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/input_type.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,236 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#include -#include - -#include "libnormaliz/input_type.h" - -namespace libnormaliz { - -InputType to_type(const std::string& type_string) { - if (type_string == "0" || type_string == "1" || type_string == "2" || type_string == "3" || type_string == "4" || - type_string == "5" || type_string == "6" || type_string == "hyperplanes" || type_string == "10") { - throw BadInputException("Error: deprecated type \"" + type_string + "\", please use new type string!"); - } - - if (type_string == "0" || type_string == "integral_closure") { - return Type::integral_closure; - } - if (type_string == "polyhedron") { - return Type::polyhedron; - } - if (type_string == "1" || type_string == "normalization") { - return Type::normalization; - } - if (type_string == "2" || type_string == "polytope") { - return Type::polytope; - } - if (type_string == "3" || type_string == "rees_algebra") { - return Type::rees_algebra; - } - if (type_string == "4" || type_string == "hyperplanes" || type_string == "inequalities") { - return Type::inequalities; - } - if (type_string == "strict_inequalities") { - return Type::strict_inequalities; - } - if (type_string == "strict_signs") { - return Type::strict_signs; - } - if (type_string == "inhom_inequalities") { - return Type::inhom_inequalities; - } - if (type_string == "dehomogenization") { - return Type::dehomogenization; - } - if (type_string == "5" || type_string == "equations") { - return Type::equations; - } - if (type_string == "inhom_equations") { - return Type::inhom_equations; - } - if (type_string == "6" || type_string == "congruences") { - return Type::congruences; - } - if (type_string == "inhom_congruences") { - return Type::inhom_congruences; - } - if (type_string == "signs") { - return Type::signs; - } - if (type_string == "10" || type_string == "lattice_ideal") { - return Type::lattice_ideal; - } - if (type_string == "grading") { - return Type::grading; - } - if (type_string == "excluded_faces") { - return Type::excluded_faces; - } - if (type_string == "lattice") { - return Type::lattice; - } - if (type_string == "saturation") { - return Type::saturation; - } - if (type_string == "cone") { - return Type::cone; - } - if (type_string == "offset") { - return Type::offset; - } - if (type_string == "vertices") { - return Type::vertices; - } - if (type_string == "support_hyperplanes") { - return Type::support_hyperplanes; - } - if (type_string == "cone_and_lattice") { - return Type::cone_and_lattice; - } - if (type_string == "subspace") { - return Type::subspace; - } - if (type_string == "open_facets") { - return Type::open_facets; - } - if (type_string == "projection_coordinates") { - return Type::projection_coordinates; - } - - if (type_string == "hilbert_basis_rec_cone") { - return Type::hilbert_basis_rec_cone; - } - - if (type_string == "extreme_rays") { - return Type::extreme_rays; - } - - if (type_string == "maximal_subspace") { - return Type::maximal_subspace; - } - - if (type_string == "generated_lattice") { - return Type::generated_lattice; - } - - - if (type_string == "scale") { - return Type::scale; - } - - if (type_string == "add_cone") { - return Type::add_cone; - } - - if (type_string == "add_subspace") { - return Type::add_subspace; - } - - if (type_string == "add_vertices") { - return Type::add_vertices; - } - - if (type_string == "add_inequalities") { - return Type::add_inequalities; - } - - if (type_string == "add_equations") { - return Type::add_equations; - } - - if (type_string == "add_inhom_inequalities") { - return Type::add_inhom_inequalities; - } - - if (type_string == "add_inhom_equations") { - return Type::add_inhom_equations; - } - - throw BadInputException("Unknown type \"" + type_string + "\"!"); - return Type::integral_closure; -} - -long type_nr_columns_correction(InputType t) { - if (t == Type::polytope || t == Type::rees_algebra) - return -1; - if (t == Type::congruences || t == Type::vertices || t == Type::polyhedron || t == Type::inhom_inequalities || - t == Type::inhom_equations || t == Type::hilbert_basis_rec_cone || t == Type::add_inhom_inequalities || - t == Type::add_vertices|| t == Type::add_inhom_equations) - return 1; - if (t == Type::inhom_congruences) - return 2; - return 0; -} - -/* returns true if the input of this type is a vector */ -bool type_is_vector(InputType type) { - if (type == Type::grading || type == Type::signs || type == Type::strict_signs || type == Type::dehomogenization || - type == Type::offset || type == Type::open_facets || type == Type::projection_coordinates || type == Type::scale) { - return true; - } - return false; -} - -NumParam::Param to_numpar(const std::string& type_string) { - if (type_string == "expansion_degree") - return NumParam::expansion_degree; - if (type_string == "nr_coeff_quasipol") - return NumParam::nr_coeff_quasipol; - if (type_string == "face_codim_bound") - return NumParam::face_codim_bound; - if (type_string == "autom_codim_bound_vectors") - return NumParam::autom_codim_bound_vectors; - if (type_string == "autom_codim_bound_mult") - return NumParam::autom_codim_bound_mult; - - return NumParam::not_a_num_param; -} - -std::string numpar_to_string(const NumParam::Param& numpar) { - if (numpar == NumParam::expansion_degree) - return "expansion_degree"; - if (numpar == NumParam::nr_coeff_quasipol) - return "nr_coeff_quasipol"; - if (numpar == NumParam::face_codim_bound) - return "face_codim_bound"; - if (numpar == NumParam::autom_codim_bound_vectors) - return "autom_codim_bound_vectors"; - if (numpar == NumParam::autom_codim_bound_mult) - return "autom_codim_bound_mult"; - if (numpar == NumParam::autom_codim_bound_vectors) - return "autom_codim_bound_vectors"; - if (numpar == NumParam::not_a_num_param) - return "not_a_num_param"; - assert(false); -} - -bool isNumParam(NumParam::Param& numpar, const std::string& type_string) { - numpar = to_numpar(type_string); - if (numpar == NumParam::not_a_num_param) - return false; - return true; -} - -} /* end namespace libnormaliz */ diff -Nru normaliz-3.8.5+ds/source/libnormaliz/input_type.h normaliz-3.8.9+ds/source/libnormaliz/input_type.h --- normaliz-3.8.5+ds/source/libnormaliz/input_type.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/input_type.h 2020-08-29 07:43:26.000000000 +0000 @@ -43,11 +43,13 @@ cone_and_lattice, lattice, saturation, + rational_lattice, // // inhomogeneous generators // vertices, offset, + rational_offset, // // homogeneous constraints // @@ -55,6 +57,7 @@ signs, equations, congruences, + excluded_faces, // // inhomogeneous constraints // @@ -63,6 +66,7 @@ strict_inequalities, strict_signs, inhom_congruences, + inhom_excluded_faces, // // linearforms // @@ -72,7 +76,6 @@ // special open_facets, projection_coordinates, - excluded_faces, lattice_ideal, // // precomputed data @@ -132,6 +135,221 @@ NumParam::Param to_numpar(const std::string& type_string); std::string numpar_to_string(const NumParam::Param& numpar); +inline InputType to_type(const std::string& type_string) { + if (type_string == "0" || type_string == "1" || type_string == "2" || type_string == "3" || type_string == "4" || + type_string == "5" || type_string == "6" || type_string == "hyperplanes" || type_string == "10") { + throw BadInputException("Error: deprecated type \"" + type_string + "\", please use new type string!"); + } + + if (type_string == "0" || type_string == "integral_closure") { + return Type::integral_closure; + } + if (type_string == "polyhedron") { + return Type::polyhedron; + } + if (type_string == "1" || type_string == "normalization") { + return Type::normalization; + } + if (type_string == "2" || type_string == "polytope") { + return Type::polytope; + } + if (type_string == "3" || type_string == "rees_algebra") { + return Type::rees_algebra; + } + if (type_string == "4" || type_string == "hyperplanes" || type_string == "inequalities") { + return Type::inequalities; + } + if (type_string == "strict_inequalities") { + return Type::strict_inequalities; + } + if (type_string == "strict_signs") { + return Type::strict_signs; + } + if (type_string == "inhom_inequalities") { + return Type::inhom_inequalities; + } + if (type_string == "dehomogenization") { + return Type::dehomogenization; + } + if (type_string == "5" || type_string == "equations") { + return Type::equations; + } + if (type_string == "inhom_equations") { + return Type::inhom_equations; + } + if (type_string == "6" || type_string == "congruences") { + return Type::congruences; + } + if (type_string == "inhom_congruences") { + return Type::inhom_congruences; + } + if (type_string == "signs") { + return Type::signs; + } + if (type_string == "10" || type_string == "lattice_ideal") { + return Type::lattice_ideal; + } + if (type_string == "grading") { + return Type::grading; + } + if (type_string == "excluded_faces") { + return Type::excluded_faces; + } + if (type_string == "inhom_excluded_faces") { + return Type::inhom_excluded_faces; + } + if (type_string == "lattice") { + return Type::lattice; + } + if (type_string == "rational_lattice") { + return Type::rational_lattice; + } + if (type_string == "saturation") { + return Type::saturation; + } + if (type_string == "cone") { + return Type::cone; + } + if (type_string == "offset") { + return Type::offset; + } + if (type_string == "rational_offset") { + return Type::rational_offset; + } + if (type_string == "vertices") { + return Type::vertices; + } + if (type_string == "support_hyperplanes") { + return Type::support_hyperplanes; + } + if (type_string == "cone_and_lattice") { + return Type::cone_and_lattice; + } + if (type_string == "subspace") { + return Type::subspace; + } + if (type_string == "open_facets") { + return Type::open_facets; + } + if (type_string == "projection_coordinates") { + return Type::projection_coordinates; + } + + if (type_string == "hilbert_basis_rec_cone") { + return Type::hilbert_basis_rec_cone; + } + + if (type_string == "extreme_rays") { + return Type::extreme_rays; + } + + if (type_string == "maximal_subspace") { + return Type::maximal_subspace; + } + + if (type_string == "generated_lattice") { + return Type::generated_lattice; + } + + + if (type_string == "scale") { + return Type::scale; + } + + if (type_string == "add_cone") { + return Type::add_cone; + } + + if (type_string == "add_subspace") { + return Type::add_subspace; + } + + if (type_string == "add_vertices") { + return Type::add_vertices; + } + + if (type_string == "add_inequalities") { + return Type::add_inequalities; + } + + if (type_string == "add_equations") { + return Type::add_equations; + } + + if (type_string == "add_inhom_inequalities") { + return Type::add_inhom_inequalities; + } + + if (type_string == "add_inhom_equations") { + return Type::add_inhom_equations; + } + + throw BadInputException("Unknown type \"" + type_string + "\"!"); + return Type::integral_closure; +} + +inline long type_nr_columns_correction(InputType t) { + if (t == Type::polytope || t == Type::rees_algebra) + return -1; + if (t == Type::congruences || t == Type::vertices || t == Type::polyhedron || t == Type::inhom_inequalities || + t == Type::inhom_equations || t == Type::hilbert_basis_rec_cone || t == Type::add_inhom_inequalities || + t == Type::add_vertices|| t == Type::add_inhom_equations || t == Type::inhom_excluded_faces) + return 1; + if (t == Type::inhom_congruences) + return 2; + return 0; +} + +/* returns true if the input of this type is a vector */ +inline bool type_is_vector(InputType type) { + if (type == Type::grading || type == Type::signs || type == Type::strict_signs || type == Type::dehomogenization || + type == Type::offset || type == Type::open_facets || type == Type::projection_coordinates || type == Type::scale || + type == Type::rational_offset) { + return true; + } + return false; +} + +inline NumParam::Param to_numpar(const std::string& type_string) { + if (type_string == "expansion_degree") + return NumParam::expansion_degree; + if (type_string == "nr_coeff_quasipol") + return NumParam::nr_coeff_quasipol; + if (type_string == "face_codim_bound") + return NumParam::face_codim_bound; + if (type_string == "autom_codim_bound_vectors") + return NumParam::autom_codim_bound_vectors; + if (type_string == "autom_codim_bound_mult") + return NumParam::autom_codim_bound_mult; + + return NumParam::not_a_num_param; +} + +inline std::string numpar_to_string(const NumParam::Param& numpar) { + if (numpar == NumParam::expansion_degree) + return "expansion_degree"; + if (numpar == NumParam::nr_coeff_quasipol) + return "nr_coeff_quasipol"; + if (numpar == NumParam::face_codim_bound) + return "face_codim_bound"; + if (numpar == NumParam::autom_codim_bound_vectors) + return "autom_codim_bound_vectors"; + if (numpar == NumParam::autom_codim_bound_mult) + return "autom_codim_bound_mult"; + if (numpar == NumParam::autom_codim_bound_vectors) + return "autom_codim_bound_vectors"; + if (numpar == NumParam::not_a_num_param) + return "not_a_num_param"; + assert(false); +} + +inline bool isNumParam(NumParam::Param& numpar, const std::string& type_string) { + numpar = to_numpar(type_string); + if (numpar == NumParam::not_a_num_param) + return false; + return true; +} + } /* end namespace libnormaliz */ #endif /* LIBNORMALIZ_H_ */ diff -Nru normaliz-3.8.5+ds/source/libnormaliz/integer.cpp normaliz-3.8.9+ds/source/libnormaliz/integer.cpp --- normaliz-3.8.5+ds/source/libnormaliz/integer.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/integer.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,675 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -//--------------------------------------------------------------------------- - -#include -#include -#include - -#include "libnormaliz/integer.h" -#include "libnormaliz/convert.h" -#include "libnormaliz/vector_operations.h" -#include "libnormaliz/reduction.h" - -//--------------------------------------------------------------------------- - -namespace libnormaliz { -using namespace std; - -bool try_convert(mpz_class& ret, const mpq_class& val) { - assert(false); // must never be used - return false; -} - -bool try_convert(mpq_class& ret, const mpz_class& val) { - assert(false); // must never be used - return false; -} - -#ifdef ENFNORMALIZ -bool try_convert(renf_elem_class& ret, const mpz_class& val) { - ret = val; - return true; -} - -bool try_convert(mpz_class& ret, const renf_elem_class& val) { - renf_elem_class help = val; - if (!help.is_integer()) - throw ArithmeticException("field element cannot be converted to integer"); - ret = help.get_num(); - return true; -} - -bool try_convert(renf_elem_class& ret, const long long& val) { - ret = convertTo(val); - return true; -} - -bool try_convert(long long& ret, const renf_elem_class& val) { - mpz_class bridge; - try_convert(bridge, val); - return try_convert(ret, bridge); -} - -bool try_convert(renf_elem_class& ret, const long& val) { - ret = val; - return true; -} - -bool try_convert(long& ret, const renf_elem_class& val) { - mpz_class bridge; - try_convert(bridge, val); - return try_convert(ret, bridge); -} - -bool try_convert(mpq_class& ret, const renf_elem_class& val) { - nmz_float ret_double = val.get_d(); - ret = mpq_class(ret_double); - return true; -} - -bool try_convert(nmz_float& ret, const renf_elem_class& val) { - ret = val.get_d(); - return true; -} -#endif - -bool try_convert(long& ret, const long long& val) { - if (fits_long_range(val)) { - ret = val; - return true; - } - return false; -} - -bool try_convert(long& ret, const mpz_class& val) { - if (!val.fits_slong_p()) { - return false; - } - ret = val.get_si(); - return true; -} - -bool try_convert(long long& ret, const mpz_class& val) { - if (val.fits_slong_p()) { - ret = val.get_si(); - return true; - } - if (sizeof(long long) == sizeof(long)) { - return false; - } - mpz_class quot; - ret = mpz_fdiv_q_ui(quot.get_mpz_t(), val.get_mpz_t(), LONG_MAX); // returns remainder - if (!quot.fits_slong_p()) { - return false; - } - ret += ((long long)quot.get_si()) * ((long long)LONG_MAX); - return true; -} - -bool try_convert(mpz_class& ret, const long long& val) { - if (fits_long_range(val)) { - ret = mpz_class(long(val)); - } - else { - ret = mpz_class(long(val % LONG_MAX)) + mpz_class(LONG_MAX) * mpz_class(long(val / LONG_MAX)); - } - return true; -} - -bool try_convert(float& ret, const mpz_class& val) { - if (!val.fits_slong_p()) - return false; - long dummy = convertTo(val); - ret = (float)dummy; - return true; -} - -bool fits_long_range(long long a) { - return sizeof(long long) == sizeof(long) || (a <= LONG_MAX && a >= LONG_MIN); -} - -bool try_convert(nmz_float& ret, const long& val) { - ret = (nmz_float)val; - return true; -} - -bool try_convert(nmz_float& ret, const mpz_class& val) { - ret = val.get_d(); - return true; -} - -bool try_convert(mpz_class& ret, const nmz_float& val) { - ret = mpz_class(val); - return true; -} - -bool try_convert(nmz_float& ret, const long long& val) { - ret = (nmz_float)val; - return true; -} - -bool try_convert(long& ret, const nmz_float& val) { - mpz_class bridge; - if (!try_convert(bridge, val)) - return false; - return try_convert(ret, bridge); -} - -bool try_convert(long long& ret, const nmz_float& val) { - mpz_class bridge; - if (!try_convert(bridge, val)) - return false; - return try_convert(ret, bridge); -} -//--------------------------------------------------------------------------- - -template -Integer gcd(const Integer& a, const Integer& b) { - if (a == 0) { - return Iabs(b); - } - if (b == 0) { - return Iabs(a); - } - Integer q0, q1, r; - q0 = Iabs(a); - r = Iabs(b); - do { - q1 = r; - r = q0 % q1; - q0 = q1; - } while (r != 0); - return q1; -} - -template <> -nmz_float gcd(const nmz_float& a, const nmz_float& b) { - if (a == 0 && b == 0) - return 0; - return 1.0; -} - -template <> -mpz_class gcd(const mpz_class& a, const mpz_class& b) { - mpz_class g; - mpz_gcd(g.get_mpz_t(), a.get_mpz_t(), b.get_mpz_t()); - return g; -} - -#ifdef ENFNORMALIZ -template <> -renf_elem_class gcd(const renf_elem_class& a, const renf_elem_class& b) { - if (a == 0 && b == 0) - return 0; - return 1; -} -#endif - -template long gcd(const long& a, const long& b); -template long long gcd(const long long& a, const long long& b); - -//--------------------------------------------------------------------------- - -template -Integer lcm(const Integer& a, const Integer& b) { - if ((a == 0) || (b == 0)) { - return 0; - } - else - return Iabs(a * b / gcd(a, b)); -} - -template <> -mpz_class lcm(const mpz_class& a, const mpz_class& b) { - mpz_class g; - mpz_lcm(g.get_mpz_t(), a.get_mpz_t(), b.get_mpz_t()); - return g; -} - -template long lcm(const long& a, const long& b); -template long long lcm(const long long& a, const long long& b); - -#ifdef ENFNORMALIZ -template <> -renf_elem_class lcm(const renf_elem_class& a, const renf_elem_class& b) { - return 1; -} -#endif - -//--------------------------------------------------------------------------- - -template -Integer int_max_value_dual() { - Integer k = sizeof(Integer) * 8 - 2; // number of bytes convetred to number of bits - Integer test = 1; - test = test << k; // 2^k - return test; -} - -bool int_max_value_dual_long_computed = false; - -template <> -long int_max_value_dual() { - static long max_value; - - if (int_max_value_dual_long_computed) - return max_value; - - long k = sizeof(long) * 8 - 2; // number of bytes convetred to number of bits - long test = 1; - test = test << k; // 2^k - // test=0; // 10000; - max_value = test; - int_max_value_dual_long_computed = true; - return test; -} - -bool int_max_value_dual_long_long_computed = false; - -template <> -long long int_max_value_dual() { - static long long max_value; - - if (int_max_value_dual_long_long_computed) - return max_value; - - long long k = sizeof(long long) * 8 - 2; // number of bytes convetred to number of bits - long long test = 1; - test = test << k; // 2^k - // test=0; // 10000; - max_value = test; - int_max_value_dual_long_long_computed = true; - return test; -} - -//--------------------------------------------------------------------------- - -template <> -mpz_class int_max_value_dual() { - assert(false); - return 0; -} - -//--------------------------------------------------------------------------- - -template -Integer int_max_value_primary() { - Integer k = sizeof(Integer) * 8 - 12; // number of bytes convetred to number of bits - Integer test = 1; - test = test << k; // 2^k - // test=0; // 10000; - return test; -} - -bool int_max_value_primary_long_computed = false; - -template <> -long int_max_value_primary() { - static long max_value; - - if (int_max_value_primary_long_computed) - return max_value; - - long k = sizeof(long) * 8 - 12; // number of bytes convetred to number of bits - long test = 1; - test = test << k; // 2^k - // test=0; // 10000; - int_max_value_primary_long_computed = true; - max_value = test; - return test; -} - -bool int_max_value_primary_long_long_computed = false; - -template <> -long long int_max_value_primary() { - static long long max_value; - - if (int_max_value_primary_long_long_computed) - return max_value; - - long long k = sizeof(long long) * 8 - 12; // number of bytes convetred to number of bits - long long test = 1; - test = test << k; // 2^k -#ifdef NMZ_EXTENDED_TESTS - if(test_linear_algebra_GMP) - test=0; -#endif - max_value = test; - int_max_value_primary_long_long_computed = true; - return test; -} - -//--------------------------------------------------------------------------- - -template <> -mpz_class int_max_value_primary() { - assert(false); - return 0; -} - -#ifdef ENFNORMALIZ -template <> -renf_elem_class int_max_value_primary() { - assert(false); - return 0; -} -#endif - -//--------------------------------------------------------------------------- - -template -void check_range_list(const CandidateList& ll) { - check_range_list(ll.Candidates); -} - -template void check_range_list(const CandidateList&); -template void check_range_list(const CandidateList&); -template void check_range_list(const CandidateList&); - -//--------------------------------------------------------------------------- - -template -void check_range_list(const std::list >& ll) { - if (using_GMP()) - return; - - Integer test = int_max_value_dual(); - // cout << "test " << test << endl; - - for (const auto& v : ll) { - for (size_t i = 0; i < v.values.size(); ++i) - if (Iabs(v.values[i]) >= test) { - // cout << v; - // cout << "i " << i << " " << Iabs(v[i]) << endl; - throw ArithmeticException("Vector entry out of range. Imminent danger of arithmetic overflow."); - } - } -} - -//--------------------------------------------------------------------------- - -mpq_class dec_fraction_to_mpq(string s) { - size_t skip = 0; // skip leading spaces - for (; skip < s.size(); ++skip) { - if (!isspace(s[skip])) - break; - } - s = s.substr(skip); - - mpz_class sign = 1; - if (s[0] == '+') - s = s.substr(1); - else if (s[0] == '-') { - s = s.substr(1); - sign = -1; - } - - if (s[0] == '+' || s[0] == '-') - throw BadInputException("Error in decimal fraction " + s); - - string int_string, frac_string, exp_string; - size_t frac_part_length = 0; - size_t pos_point = s.find("."); - size_t pos_E = s.find("e"); - if (pos_point != string::npos) { - int_string = s.substr(0, pos_point); - if (pos_E != string::npos) { - frac_part_length = pos_E - (pos_point + 1); - } - else - frac_part_length = s.size() - (pos_point + 1); - frac_string = s.substr(pos_point + 1, frac_part_length); - if (frac_string[0] == '+' || frac_string[0] == '-') - throw BadInputException("Error in decimal fraction " + s); - } - else - int_string = s.substr(0, pos_E); - if (pos_E != string::npos) - exp_string = s.substr(pos_E + 1, s.size() - (pos_E + 1)); - - /* cout << "int " << int_string << endl; - cout << "frac " << frac_string << endl; - cout << "exp " << exp_string << endl; */ - - // remove leading 0 and + - if (int_string.size() > 0 && int_string[0] == '+') - int_string = int_string.substr(1); - while (int_string.size() > 0 && int_string[0] == '0') - int_string = int_string.substr(1); - while (frac_string.size() > 0 && frac_string[0] == '0') - frac_string = frac_string.substr(1); - if (exp_string.size() > 0 && exp_string[0] == '+') - exp_string = exp_string.substr(1); - while (exp_string.size() > 0 && exp_string[0] == '0') - exp_string = exp_string.substr(1); - - mpq_class int_part, frac_part, exp_part; - if (!int_string.empty()) - int_part = mpz_class(int_string); - - if (pos_E == 0) - int_part = 1; - - // cout << "int_part " << int_part << endl; - - mpz_class den = 1; - if (!frac_string.empty()) { - frac_part = mpz_class(frac_string); - for (size_t i = 0; i < frac_part_length; ++i) - den *= 10; - } - // cout << "frac_part " << frac_part << endl; - mpq_class result = int_part; - if (frac_part != 0) - result += frac_part / den; - if (!exp_string.empty()) { - mpz_class expo(exp_string); // we take mpz_class because it has better error checking - // long expo=stol(exp_string); - mpz_class abs_expo = Iabs(expo); - mpz_class factor = 1; - for (long i = 0; i < abs_expo; ++i) - factor *= 10; - if (expo >= 0) - result *= factor; - else - result /= factor; - } - /* cout <<" result " << sign*result << endl; - cout << "==========" << endl; */ - return sign * result; -} - -//---------------------------------------------------------------------- -// the next function produce an integer quotient and determine whether -// there is a remainder - -bool int_quotient(long& Quot, const long& Num, const long& Den) { - Quot = Iabs(Num) / Iabs(Den); - return Quot * Iabs(Den) != Iabs(Num); -} - -bool int_quotient(long long& Quot, const long long& Num, const long long& Den) { - Quot = Iabs(Num) / Iabs(Den); - return Quot * Iabs(Den) != Iabs(Num); -} - -bool int_quotient(mpz_class& Quot, const mpz_class& Num, const mpz_class& Den) { - Quot = Iabs(Num) / Iabs(Den); - return Quot * Iabs(Den) != Iabs(Num); -} - -bool int_quotient(long long& Quot, const mpz_class& Num, const mpz_class& Den) { - mpz_class mpz_Quot = (Iabs(Num) / Iabs(Den)); - convert(Quot, mpz_Quot); - return mpz_Quot * Iabs(Den) != Iabs(Num); -} - -template -bool int_quotient(IntegerRet& Quot, const nmz_float& Num, const nmz_float& Den) { - nmz_float FloatQuot = Iabs(Num) / Iabs(Den); // cout << "FF " << FloatQuot << endl; - nmz_float IntQuot = trunc(FloatQuot + nmz_epsilon); // cout << "II " << IntQuot << endl; - Quot = convertTo(IntQuot); // cout << "QQ " << Quot << endl; - return FloatQuot - IntQuot > nmz_epsilon; -} - -template bool int_quotient(long&, const nmz_float&, const nmz_float&); -template bool int_quotient(long long&, const nmz_float&, const nmz_float&); -template bool int_quotient(mpz_class&, const nmz_float&, const nmz_float&); -template bool int_quotient(nmz_float&, const nmz_float&, const nmz_float&); - -template -IntegerRet floor_quot(const IntegerVal Num, IntegerVal Den) { - IntegerRet Quot; - bool frac = int_quotient(Quot, Num, Den); - if ((Num >= 0 && Den >= 0) || (Num < 0 && Den < 0)) { - return Quot; - } - else { - if (frac) { - return -Quot - 1; - } - return -Quot; - } -} - -template long floor_quot(nmz_float, nmz_float); -template long long floor_quot(nmz_float, nmz_float); -template mpz_class floor_quot(nmz_float, nmz_float); -template long floor_quot(long, long); -template long long floor_quot(long long, long long); -template long long floor_quot(long, long); -template mpz_class floor_quot(mpz_class, mpz_class); -template long long floor_quot(mpz_class, mpz_class); - -template -IntegerRet ceil_quot(const IntegerVal Num, IntegerVal Den) { - IntegerRet Quot; - bool frac = int_quotient(Quot, Num, Den); - if ((Num >= 0 && Den >= 0) || (Num < 0 && Den < 0)) { - if (frac) - return Quot + 1; - return Quot; - } - else { - return -Quot; - } -} - -template long ceil_quot(nmz_float, nmz_float); -template long long ceil_quot(nmz_float, nmz_float); -template mpz_class ceil_quot(nmz_float, nmz_float); -template long ceil_quot(long, long); -template long long ceil_quot(long long, long long); -template long long ceil_quot(long, long); -template mpz_class ceil_quot(mpz_class, mpz_class); -template long long ceil_quot(mpz_class, mpz_class); - -#ifdef ENFNORMALIZ -template <> -mpz_class floor_quot(const renf_elem_class Num, renf_elem_class Den) { - return floor(Num / Den); -} - -template <> -mpz_class ceil_quot(const renf_elem_class Num, renf_elem_class Den) { - return ceil(Num / Den); -} -#endif - -//---------------------------------------------------------------------- - -mpz_class floor(const mpq_class& q) { - mpz_class num = q.get_num(); - mpz_class den = q.get_den(); - mpz_class ent = num / den; - if (num < 0 && den * ent != num) - ent--; - return ent; -} - -mpz_class ceil(const mpq_class& q) { - mpz_class num = q.get_num(); - mpz_class den = q.get_den(); - mpz_class ent = num / den; - if (num > 0 && den * ent != num) - ent++; - return ent; -} - -mpz_class round(const mpq_class& q) { - mpq_class work; - if (q >= 0) { - work = q - mpq_class(1, 2); - return ceil(work); - } - work = q + mpq_class(1, 2); - return floor(work); -} - -template -mpz_class nmz_factorial(Integer n) { - assert(n >= 0); - mpz_class f = 1; - long nlong = convertTo(n); - for (long i = 1; i <= nlong; ++i) - f *= i; - return f; -} - -template mpz_class nmz_factorial(long); -template mpz_class nmz_factorial(long long); -template mpz_class nmz_factorial(mpz_class); - -template -mpz_class nmz_binomial(Integer n, Integer k) { - if (k > n) - return 0; - return nmz_factorial(n) / nmz_factorial(k); -} - -template -mpz_class nmz_binomial(Integer n, Integer k); - -template mpz_class nmz_binomial(long, long); -template mpz_class nmz_binomial(long long, long long); -template mpz_class nmz_binomial(mpz_class, mpz_class); - -nmz_float mpq_to_nmz_float(const mpq_class& val) { - mpz_class bound = 1; - for (size_t i = 0; i < 60; ++i) - bound *= 10; - mpz_class gmp_num = val.get_num(), gmp_den = val.get_den(); - while (Iabs(gmp_num) > bound && Iabs(gmp_den) > bound) { - gmp_num /= 10; - gmp_den /= 10; - } - nmz_float num, den; - convert(num, gmp_num); - convert(den, gmp_den); - return num / den; -} - -} // end namespace libnormaliz diff -Nru normaliz-3.8.5+ds/source/libnormaliz/integer.h normaliz-3.8.9+ds/source/libnormaliz/integer.h --- normaliz-3.8.5+ds/source/libnormaliz/integer.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/integer.h 2020-09-25 14:54:40.000000000 +0000 @@ -27,7 +27,8 @@ #include #include #include -#include +#include +#include #include @@ -40,6 +41,7 @@ namespace libnormaliz { using namespace std; + //--------------------------------------------------------------------------- // Basic functions //--------------------------------------------------------------------------- @@ -429,6 +431,7 @@ } } +// To be used in input.cpp inline void string2coeff(mpq_class& coeff, istream& in, const string& s) { // in here superfluous parameter stringstream sin(s); @@ -436,6 +439,24 @@ // coeff=mpq_class(s); } +// To be used from other sources +inline void string2coeff(mpq_class& coeff, const string& s) { + + // cout << "SSSSSS " << s << endl; + + const string numeric = "+-0123456789/.e "; // must allow blank + for(auto& c: s){ + size_t pos = numeric.find(c); + if(pos == string::npos) + throw BadInputException("Illegal character in numerical string"); + } + + + stringstream sin(s); + coeff = mpq_read(sin); + // coeff=mpq_class(s); +} + inline void read_number(istream& in, mpq_class& number) { number = mpq_read(in); } @@ -476,7 +497,7 @@ in >> ws; c = in.peek(); - if (c != '(') { // rational number + if (c != '(' && c != '\'' && c != '\"') { // rational number mpq_class rat = mpq_read(in); number = renf_elem_class(rat); return; @@ -490,7 +511,7 @@ bool skip = false; while (in.good()) { c = in.peek(); - if (c == ')') { + if (c == ')' || c == '\'' || c == '\"') { in >> c; break; } @@ -506,6 +527,662 @@ } #endif +// formerly conver.h +// conversion for integers, throws ArithmeticException if conversion fails +template +inline void convert(ToType& ret, const FromType& val) { + if (!try_convert(ret, val)) { + throw ArithmeticException(val); + } +} + +// conversion of vectors +template +inline void convert(vector& ret_vect, const vector& from_vect) { + size_t s = from_vect.size(); + ret_vect.resize(s); + for (size_t i = 0; i < s; ++i) + convert(ret_vect[i], from_vect[i]); +} + +// general conversion with return, throws ArithmeticException if conversion fails +template +ToType convertTo(const FromType& val) { + ToType copy; + convert(copy, val); + return copy; +} + + +inline bool try_convert(mpz_class& ret, const mpq_class& val) { + assert(false); // must never be used + return false; +} + +inline bool try_convert(mpq_class& ret, const mpz_class& val) { + assert(false); // must never be used + return false; +} + +#ifdef ENFNORMALIZ +inline bool try_convert(renf_elem_class& ret, const mpz_class& val) { + ret = val; + return true; +} + +inline bool try_convert(mpz_class& ret, const renf_elem_class& val) { + renf_elem_class help = val; + if (!help.is_integer()) + throw ArithmeticException("field element cannot be converted to integer"); + ret = help.get_num(); + return true; +} + +inline bool try_convert(renf_elem_class& ret, const long long& val) { + ret = convertTo(val); + return true; +} + +inline bool try_convert(long long& ret, const renf_elem_class& val) { + mpz_class bridge; + try_convert(bridge, val); + return try_convert(ret, bridge); +} + +inline bool try_convert(renf_elem_class& ret, const long& val) { + ret = val; + return true; +} + +inline bool try_convert(long& ret, const renf_elem_class& val) { + mpz_class bridge; + try_convert(bridge, val); + return try_convert(ret, bridge); +} + +inline bool try_convert(mpq_class& ret, const renf_elem_class& val) { + nmz_float ret_double = val.get_d(); + ret = mpq_class(ret_double); + return true; +} + +inline bool try_convert(nmz_float& ret, const renf_elem_class& val) { + ret = val.get_d(); + return true; +} +#endif + +inline bool try_convert(long& ret, const long long& val) { + if (fits_long_range(val)) { + ret = val; + return true; + } + return false; +} + +inline bool try_convert(long& ret, const mpz_class& val) { + if (!val.fits_slong_p()) { + return false; + } + ret = val.get_si(); + return true; +} + +inline bool try_convert(long long& ret, const mpz_class& val) { + if (val.fits_slong_p()) { + ret = val.get_si(); + return true; + } + if (sizeof(long long) == sizeof(long)) { + return false; + } + mpz_class quot; + ret = mpz_fdiv_q_ui(quot.get_mpz_t(), val.get_mpz_t(), LONG_MAX); // returns remainder + if (!quot.fits_slong_p()) { + return false; + } + ret += ((long long)quot.get_si()) * ((long long)LONG_MAX); + return true; +} + +inline bool try_convert(mpz_class& ret, const long long& val) { + if (fits_long_range(val)) { + ret = mpz_class(long(val)); + } + else { + ret = mpz_class(long(val % LONG_MAX)) + mpz_class(LONG_MAX) * mpz_class(long(val / LONG_MAX)); + } + return true; +} + +inline bool try_convert(float& ret, const mpz_class& val) { + if (!val.fits_slong_p()) + return false; + long dummy = convertTo(val); + ret = (float)dummy; + return true; +} + +inline bool fits_long_range(long long a) { + return sizeof(long long) == sizeof(long) || (a <= LONG_MAX && a >= LONG_MIN); +} + +inline bool try_convert(nmz_float& ret, const long& val) { + ret = (nmz_float)val; + return true; +} + +inline bool try_convert(nmz_float& ret, const mpz_class& val) { + ret = val.get_d(); + return true; +} + +inline bool try_convert(mpz_class& ret, const nmz_float& val) { + ret = mpz_class(val); + return true; +} + +inline bool try_convert(nmz_float& ret, const long long& val) { + ret = (nmz_float)val; + return true; +} + +inline bool try_convert(long& ret, const nmz_float& val) { + mpz_class bridge; + if (!try_convert(bridge, val)) + return false; + return try_convert(ret, bridge); +} + +inline bool try_convert(long long& ret, const nmz_float& val) { + mpz_class bridge; + if (!try_convert(bridge, val)) + return false; + return try_convert(ret, bridge); +} +//--------------------------------------------------------------------------- + +template +Integer gcd(const Integer& a, const Integer& b) { + if (a == 0) { + return Iabs(b); + } + if (b == 0) { + return Iabs(a); + } + Integer q0, q1, r; + q0 = Iabs(a); + r = Iabs(b); + do { + q1 = r; + r = q0 % q1; + q0 = q1; + } while (r != 0); + return q1; +} + +template <> +inline nmz_float gcd(const nmz_float& a, const nmz_float& b) { + if (a == 0 && b == 0) + return 0; + return 1.0; +} + +template <> +inline mpz_class gcd(const mpz_class& a, const mpz_class& b) { + mpz_class g; + mpz_gcd(g.get_mpz_t(), a.get_mpz_t(), b.get_mpz_t()); + return g; +} + +#ifdef ENFNORMALIZ +template <> +inline renf_elem_class gcd(const renf_elem_class& a, const renf_elem_class& b) { + if (a == 0 && b == 0) + return 0; + return 1; +} +#endif + +//--------------------------------------------------------------------------- + +template +Integer lcm(const Integer& a, const Integer& b) { + if ((a == 0) || (b == 0)) { + return 0; + } + else + return Iabs(a * b / gcd(a, b)); +} + +template <> +inline mpz_class lcm(const mpz_class& a, const mpz_class& b) { + mpz_class g; + mpz_lcm(g.get_mpz_t(), a.get_mpz_t(), b.get_mpz_t()); + return g; +} + +#ifdef ENFNORMALIZ +template <> +inline renf_elem_class lcm(const renf_elem_class& a, const renf_elem_class& b) { + return 1; +} +#endif + +//--------------------------------------------------------------------------- + +template +Integer int_max_value_dual() { + Integer k = sizeof(Integer) * 8 - 2; // number of bytes convetred to number of bits + Integer test = 1; + test = test << k; // 2^k + return test; +} + +// bool int_max_value_dual_long_computed = false; + +template <> +inline long int_max_value_dual() { + static long max_value; + + if (int_max_value_dual_long_computed) + return max_value; + + long k = sizeof(long) * 8 - 2; // number of bytes convetred to number of bits + long test = 1; + test = test << k; // 2^k + // test=0; // 10000; + max_value = test; + int_max_value_dual_long_computed = true; + return test; +} + +// bool int_max_value_dual_long_long_computed = false; + +template <> +inline long long int_max_value_dual() { + static long long max_value; + + if (int_max_value_dual_long_long_computed) + return max_value; + + long long k = sizeof(long long) * 8 - 2; // number of bytes convetred to number of bits + long long test = 1; + test = test << k; // 2^k + // test=0; // 10000; + max_value = test; + int_max_value_dual_long_long_computed = true; + return test; +} + +//--------------------------------------------------------------------------- + +template <> +inline mpz_class int_max_value_dual() { + assert(false); + return 0; +} + +//--------------------------------------------------------------------------- + +template +Integer int_max_value_primary() { + Integer k = sizeof(Integer) * 8 - 12; // number of bytes convetred to number of bits + Integer test = 1; + test = test << k; // 2^k + // test=0; // 10000; + return test; +} + +// bool int_max_value_primary_long_computed = false; + +template <> +inline long int_max_value_primary() { + static long max_value; + + if (int_max_value_primary_long_computed) + return max_value; + + long k = sizeof(long) * 8 - 12; // number of bytes convetred to number of bits + long test = 1; + test = test << k; // 2^k + // test=0; // 10000; + int_max_value_primary_long_computed = true; + max_value = test; + return test; +} + +// bool int_max_value_primary_long_long_computed = false; + +template <> +inline long long int_max_value_primary() { + static long long max_value; + + if (int_max_value_primary_long_long_computed) + return max_value; + + long long k = sizeof(long long) * 8 - 12; // number of bytes convetred to number of bits + long long test = 1; + test = test << k; // 2^k +#ifdef NMZ_EXTENDED_TESTS + if(test_linear_algebra_GMP) + test=0; +#endif + max_value = test; + int_max_value_primary_long_long_computed = true; + return test; +} + +//--------------------------------------------------------------------------- + +template <> +inline mpz_class int_max_value_primary() { + assert(false); + return 0; +} + +#ifdef ENFNORMALIZ +template <> +inline renf_elem_class int_max_value_primary() { + assert(false); + return 0; +} +#endif + +//--------------------------------------------------------------------------- + +template +void check_range_list(const CandidateList& ll) { + check_range_list(ll.Candidates); +} + +//--------------------------------------------------------------------------- + +template +void check_range_list(const std::list >& ll) { + if (using_GMP()) + return; + + Integer test = int_max_value_dual(); + // cout << "test " << test << endl; + + for (const auto& v : ll) { + for (size_t i = 0; i < v.values.size(); ++i) + if (Iabs(v.values[i]) >= test) { + // cout << v; + // cout << "i " << i << " " << Iabs(v[i]) << endl; + throw ArithmeticException("Vector entry out of range. Imminent danger of arithmetic overflow."); + } + } +} + +//--------------------------------------------------------------------------- + +inline mpq_class dec_fraction_to_mpq(string s) { + size_t skip = 0; // skip leading spaces + for (; skip < s.size(); ++skip) { + if (!isspace(s[skip])) + break; + } + s = s.substr(skip); + + mpz_class sign = 1; + if (s[0] == '+') + s = s.substr(1); + else if (s[0] == '-') { + s = s.substr(1); + sign = -1; + } + + if (s[0] == '+' || s[0] == '-') + throw BadInputException("Error in decimal fraction " + s); + + string int_string, frac_string, exp_string; + size_t frac_part_length = 0; + size_t pos_point = s.find("."); + size_t pos_E = s.find("e"); + if (pos_point != string::npos) { + int_string = s.substr(0, pos_point); + if (pos_E != string::npos) { + frac_part_length = pos_E - (pos_point + 1); + } + else + frac_part_length = s.size() - (pos_point + 1); + frac_string = s.substr(pos_point + 1, frac_part_length); + if (frac_string[0] == '+' || frac_string[0] == '-') + throw BadInputException("Error in decimal fraction " + s); + } + else + int_string = s.substr(0, pos_E); + if (pos_E != string::npos) + exp_string = s.substr(pos_E + 1, s.size() - (pos_E + 1)); + + /* cout << "int " << int_string << endl; + cout << "frac " << frac_string << endl; + cout << "exp " << exp_string << endl; */ + + // remove leading 0 and + + if (int_string.size() > 0 && int_string[0] == '+') + int_string = int_string.substr(1); + while (int_string.size() > 0 && int_string[0] == '0') + int_string = int_string.substr(1); + while (frac_string.size() > 0 && frac_string[0] == '0') + frac_string = frac_string.substr(1); + if (exp_string.size() > 0 && exp_string[0] == '+') + exp_string = exp_string.substr(1); + bool exponent_could_be_zero=false; + while (exp_string.size() > 0 && exp_string[0] == '0'){ + exponent_could_be_zero = true; + exp_string = exp_string.substr(1); + } + + if(pos_E != string::npos && exp_string == "" && !exponent_could_be_zero) + throw BadInputException("No exponent following character e in floating point number"); + + mpq_class int_part, frac_part, exp_part; + if (!int_string.empty()) + int_part = mpz_class(int_string); + + if (pos_E == 0) + int_part = 1; + + // cout << "int_part " << int_part << endl; + + mpz_class den = 1; + if (!frac_string.empty()) { + frac_part = mpz_class(frac_string); + for (size_t i = 0; i < frac_part_length; ++i) + den *= 10; + } + // cout << "frac_part " << frac_part << endl; + mpq_class result = int_part; + if (frac_part != 0) + result += frac_part / den; + if (!exp_string.empty()) { + mpz_class expo(exp_string); // we take mpz_class because it has better error checking + // long expo=stol(exp_string); + mpz_class abs_expo = Iabs(expo); + mpz_class factor = 1; + for (long i = 0; i < abs_expo; ++i) + factor *= 10; + if (expo >= 0) + result *= factor; + else + result /= factor; + } + /* cout <<" result " << sign*result << endl; + cout << "==========" << endl; */ + return sign * result; +} + +//---------------------------------------------------------------------- +// the next function produce an integer quotient and determine whether +// there is a remainder + +inline bool int_quotient(long& Quot, const long& Num, const long& Den) { + Quot = Iabs(Num) / Iabs(Den); + return Quot * Iabs(Den) != Iabs(Num); +} + +inline bool int_quotient(long long& Quot, const long long& Num, const long long& Den) { + Quot = Iabs(Num) / Iabs(Den); + return Quot * Iabs(Den) != Iabs(Num); +} + +inline bool int_quotient(mpz_class& Quot, const mpz_class& Num, const mpz_class& Den) { + Quot = Iabs(Num) / Iabs(Den); + return Quot * Iabs(Den) != Iabs(Num); +} + +inline bool int_quotient(long long& Quot, const mpz_class& Num, const mpz_class& Den) { + mpz_class mpz_Quot = (Iabs(Num) / Iabs(Den)); + convert(Quot, mpz_Quot); + return mpz_Quot * Iabs(Den) != Iabs(Num); +} + +template +inline bool int_quotient(IntegerRet& Quot, const nmz_float& Num, const nmz_float& Den) { + nmz_float FloatQuot = Iabs(Num) / Iabs(Den); // cout << "FF " << FloatQuot << endl; + nmz_float IntQuot = trunc(FloatQuot + nmz_epsilon); // cout << "II " << IntQuot << endl; + Quot = convertTo(IntQuot); // cout << "QQ " << Quot << endl; + return FloatQuot - IntQuot > nmz_epsilon; +} + + +template +IntegerRet floor_quot(const IntegerVal Num, IntegerVal Den) { + IntegerRet Quot; + bool frac = int_quotient(Quot, Num, Den); + if ((Num >= 0 && Den >= 0) || (Num < 0 && Den < 0)) { + return Quot; + } + else { + if (frac) { + return -Quot - 1; + } + return -Quot; + } +} + +template +IntegerRet ceil_quot(const IntegerVal Num, IntegerVal Den) { + IntegerRet Quot; + bool frac = int_quotient(Quot, Num, Den); + if ((Num >= 0 && Den >= 0) || (Num < 0 && Den < 0)) { + if (frac) + return Quot + 1; + return Quot; + } + else { + return -Quot; + } +} + +#ifdef ENFNORMALIZ +template <> +inline mpz_class floor_quot(const renf_elem_class Num, renf_elem_class Den) { + return floor(Num / Den); +} + +template <> +inline mpz_class ceil_quot(const renf_elem_class Num, renf_elem_class Den) { + return ceil(Num / Den); +} +#endif + +//---------------------------------------------------------------------- + +inline mpz_class floor(const mpq_class& q) { + mpz_class num = q.get_num(); + mpz_class den = q.get_den(); + mpz_class ent = num / den; + if (num < 0 && den * ent != num) + ent--; + return ent; +} + +inline mpz_class ceil(const mpq_class& q) { + mpz_class num = q.get_num(); + mpz_class den = q.get_den(); + mpz_class ent = num / den; + if (num > 0 && den * ent != num) + ent++; + return ent; +} + +inline mpz_class round(const mpq_class& q) { + mpq_class work; + if (q >= 0) { + work = q - mpq_class(1, 2); + return ceil(work); + } + work = q + mpq_class(1, 2); + return floor(work); +} + +template +mpz_class nmz_factorial(Integer n) { + assert(n >= 0); + mpz_class f = 1; + long nlong = convertTo(n); + for (long i = 1; i <= nlong; ++i) + f *= i; + return f; +} + + +template +mpz_class nmz_binomial(Integer n, Integer k) { + if (k > n) + return 0; + return nmz_factorial(n) / nmz_factorial(k); +} + + +inline nmz_float mpq_to_nmz_float(const mpq_class& val) { + mpz_class bound = 1; + for (size_t i = 0; i < 60; ++i) + bound *= 10; + mpz_class gmp_num = val.get_num(), gmp_den = val.get_den(); + while (Iabs(gmp_num) > bound && Iabs(gmp_den) > bound) { + gmp_num /= 10; + gmp_den /= 10; + } + nmz_float num, den; + convert(num, gmp_num); + convert(den, gmp_den); + return num / den; +} + +template +long convertToLong(const Integer& val){ + + long ret; + try{ + ret = convertTo(val); + } + catch (const ArithmeticException& e){ + throw LongException(val); + } + + return ret; + +} + +template +long convertToLongLong(const Integer& val){ + + long ret; + try{ + ret = convertTo(val); + } + catch (const ArithmeticException& e){ + throw LongLongException(val); + } + + return ret; + +} } // namespace libnormaliz //--------------------------------------------------------------------------- diff -Nru normaliz-3.8.5+ds/source/libnormaliz/libnormaliz.h normaliz-3.8.9+ds/source/libnormaliz/libnormaliz.h --- normaliz-3.8.5+ds/source/libnormaliz/libnormaliz.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/libnormaliz.h 2020-07-21 15:37:45.000000000 +0000 @@ -30,9 +30,11 @@ #include "libnormaliz/automorph.h" #include "libnormaliz/cone.h" #include "libnormaliz/cone_property.h" -#include "libnormaliz/convert.h" +#include "libnormaliz/dynamic_bitset.h" +// #include "libnormaliz/convert.h" #include "libnormaliz/HilbertSeries.h" -#include "libnormaliz/map_operations.h" +// #include "libnormaliz/map_operations.h" +#include "libnormaliz/list_and_map_operations.h" #include "libnormaliz/matrix.h" #include "libnormaliz/my_omp.h" // #include "libnormaliz/nmz_integrate.h" @@ -42,4 +44,4 @@ #include "libnormaliz/vector_operations.h" #include "libnormaliz/version.h" -#endif \ No newline at end of file +#endif diff -Nru normaliz-3.8.5+ds/source/libnormaliz/linear_algebra.cpp normaliz-3.8.9+ds/source/libnormaliz/linear_algebra.cpp --- normaliz-3.8.5+ds/source/libnormaliz/linear_algebra.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/linear_algebra.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,35 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(push, target(mic)) -#endif - -#include "libnormaliz/integer.cpp" -#include "libnormaliz/vector_operations.cpp" -#include "libnormaliz/matrix.cpp" -#include "libnormaliz/sublattice_representation.cpp" - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(pop) -#endif diff -Nru normaliz-3.8.5+ds/source/libnormaliz/list_and_map_operations.h normaliz-3.8.9+ds/source/libnormaliz/list_and_map_operations.h --- normaliz-3.8.5+ds/source/libnormaliz/list_and_map_operations.h 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/list_and_map_operations.h 2020-07-21 15:37:45.000000000 +0000 @@ -0,0 +1,181 @@ +/* + * Normaliz + * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * As an exception, when this program is distributed through (i) the App Store + * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play + * by Google Inc., then that store may impose any digital rights management, + * device limits and/or redistribution restrictions that are required by its + * terms of service. + */ + +//--------------------------------------------------------------------------- +#ifndef LIBNORMALIZ_LIST_OPERATIONS_H +#define LIBNORMALIZ_LIST_OPERATIONS_H + +//--------------------------------------------------------------------------- + +#include +#include +#include +#include + +#include "libnormaliz/general.h" +#include "libnormaliz/matrix.h" + +namespace libnormaliz { +using std::list; +using std::vector; + +//--------------------------------------------------------------------------- +// Data access +//--------------------------------------------------------------------------- + +template +std::ostream& operator<<(std::ostream& out, const list& l) { + for (const auto& i : l) { + out << i << " "; + } + out << std::endl; + return out; +} + +//--------------------------------------------------------------------------- +// List operations +//--------------------------------------------------------------------------- + +template +vector l_multiplication(const list >& l, const vector& v) { + int s = l.size(); + vector p(s); + s = 0; + for (const auto& i : l) { + p[s++] = v_scalar_product(*i, v); // maybe we loose time here? + } + return p; +} + +//--------------------------------------------------------------------------- + +template +list > l_list_x_matrix(const list >& l, const Matrix& M) { + list > result; + vector p; + for (const auto& i : l) { + p = M.VxM(i); + result.push_back(p); + } + return result; +} +//--------------------------------------------------------------------------- + +template +void l_cut(list >& l, int size) { + for (auto& i : l) { + i.resize(size); + } +} + +/* +template +void l_cut_front(list >& l, int size); +// cuts all the vectors in l to a given size, maintaining the back +*/ + +//--------------------------------------------------------------------------- + +template +void random_order(list& LL) { + vector::iterator> list_order; + size_t nrLL = LL.size(); + list_order.reserve(nrLL); + auto p = LL.begin(); + for (size_t k = 0; k < nrLL; ++k, ++p) { + list_order.push_back(p); + } + for (size_t k = 0; k < 10 * nrLL; ++k) { + swap(list_order[rand() % nrLL], list_order[rand() % nrLL]); + } + list new_order; + for (size_t k = 0; k < nrLL; ++k) { + new_order.push_back(*list_order[k]); + } + LL.clear(); + LL.splice(LL.begin(), new_order); +} + +//--------------------------------------------------------------------------- + +template +void random_order(list& LL, typename list::iterator from, typename list::iterator to) { + list MM; + MM.splice(MM.begin(), LL, from, to); + random_order(MM); + LL.splice(LL.begin(), MM); +} + +// formerly map_operations.h + +template +std::ostream& operator<<(std::ostream& out, const map& M) { + for (const auto& it : M) { + out << it.first << ": " << it.second << " "; + } + out << std::endl; + return out; +} + +//--------------------------------------------------------------------------- + +template +bool contains(const set& m, const key& k) { + return (m.find(k) != m.end()); +} + +//--------------------------------------------------------------------------- + +template +bool contains(const map& m, const key& k) { + return (m.find(k) != m.end()); +} + +//--------------------------------------------------------------------------- + +template +map count_in_map(const vector& v) { + map m; + T size = v.size(); + for (T i = 0; i < size; ++i) { + m[v[i]]++; + } + return m; +} + +template +vector to_vector(const map& M) { + vector v; + for (const auto& it : M) { + for (T i = 0; i < it.second; i++) { + v.push_back(it.first); + } + } + return v; +} + +} // namespace libnormaliz + +//--------------------------------------------------------------------------- +#endif +//--------------------------------------------------------------------------- diff -Nru normaliz-3.8.5+ds/source/libnormaliz/list_operations.cpp normaliz-3.8.9+ds/source/libnormaliz/list_operations.cpp --- normaliz-3.8.5+ds/source/libnormaliz/list_operations.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/list_operations.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,88 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -//--------------------------------------------------------------------------- - -#include -#include - -#include "libnormaliz/integer.h" -#include "libnormaliz/vector_operations.h" -#include "libnormaliz/matrix.h" -#include "libnormaliz/simplex.h" -#include "libnormaliz/list_operations.h" - -//--------------------------------------------------------------------------- - -namespace libnormaliz { -using namespace std; - -//--------------------------------------------------------------------------- - -template -vector l_multiplication(const list >& l, const vector& v) { - int s = l.size(); - vector p(s); - s = 0; - for (const auto& i : l) { - p[s++] = v_scalar_product(*i, v); // maybe we loose time here? - } - return p; -} - -//--------------------------------------------------------------------------- - -template -list > l_list_x_matrix(const list >& l, const Matrix& M) { - list > result; - vector p; - for (const auto& i : l) { - p = M.VxM(i); - result.push_back(p); - } - return result; -} -//--------------------------------------------------------------------------- - -template -void l_cut(list >& l, int size) { - for (auto& i : l) { - i.resize(size); - } -} - -//--------------------------------------------------------------------------- - -/* -template -void l_cut_front(list >& l, int size) { - vector tmp; - for (auto i = l.begin(); i != l.end();) { - tmp = v_cut_front(*i, size); - i = l.erase(i); // important to decrease memory consumption - l.insert(i, tmp); - } -} -*/ - -} // namespace libnormaliz diff -Nru normaliz-3.8.5+ds/source/libnormaliz/list_operations.h normaliz-3.8.9+ds/source/libnormaliz/list_operations.h --- normaliz-3.8.5+ds/source/libnormaliz/list_operations.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/list_operations.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,111 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -//--------------------------------------------------------------------------- -#ifndef LIBNORMALIZ_LIST_OPERATIONS_H -#define LIBNORMALIZ_LIST_OPERATIONS_H - -//--------------------------------------------------------------------------- - -#include -#include -#include - -#include "libnormaliz/general.h" - -namespace libnormaliz { -using std::list; -using std::vector; - -//--------------------------------------------------------------------------- -// Data access -//--------------------------------------------------------------------------- - -template -std::ostream& operator<<(std::ostream& out, const list& l) { - for (const auto& i : l) { - out << i << " "; - } - out << std::endl; - return out; -} - -//--------------------------------------------------------------------------- -// List operations -//--------------------------------------------------------------------------- - -template -vector l_multiplication(const list >& l, const vector& v); -// the list shall contain only vectors of size=v.size(). Returns a vector -// containing all the scalar products (we see l as as matrix and return l*v). -template -list > l_list_x_matrix(const list >& l, const Matrix& M); -// the list shall contain only vectors of size=M.nr_of_rows(). Returns a list -// containing the product (we see l as as matrix and return l*M). -template -void l_cut(list >& l, int size); -// cuts all the vectors in l to a given size. - -/* -template -void l_cut_front(list >& l, int size); -// cuts all the vectors in l to a given size, maintaining the back -*/ - -//--------------------------------------------------------------------------- - -template -void random_order(list& LL) { - vector::iterator> list_order; - size_t nrLL = LL.size(); - list_order.reserve(nrLL); - auto p = LL.begin(); - for (size_t k = 0; k < nrLL; ++k, ++p) { - list_order.push_back(p); - } - for (size_t k = 0; k < 10 * nrLL; ++k) { - swap(list_order[rand() % nrLL], list_order[rand() % nrLL]); - } - list new_order; - for (size_t k = 0; k < nrLL; ++k) { - new_order.push_back(*list_order[k]); - } - LL.clear(); - LL.splice(LL.begin(), new_order); -} - -//--------------------------------------------------------------------------- - -template -void random_order(list& LL, typename list::iterator from, typename list::iterator to) { - list MM; - MM.splice(MM.begin(), LL, from, to); - random_order(MM); - LL.splice(LL.begin(), MM); -} - -} // namespace libnormaliz - -//--------------------------------------------------------------------------- -#endif -//--------------------------------------------------------------------------- diff -Nru normaliz-3.8.5+ds/source/libnormaliz/Makefile.classic normaliz-3.8.9+ds/source/libnormaliz/Makefile.classic --- normaliz-3.8.5+ds/source/libnormaliz/Makefile.classic 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/Makefile.classic 2020-08-29 07:43:26.000000000 +0000 @@ -3,11 +3,10 @@ ## include ../Makefile.configuration -INSTALLHDRS = cone.h cone_property.h convert.h general.h HilbertSeries.h integer.h input_type.h matrix.h my_omp.h normaliz_exception.h sublattice_representation.h vector_operations.h version.h automorph.h libnormaliz.h libnormaliz/map_operations.h +INSTALLHDRS = cone.h cone_property.h general.h HilbertSeries.h integer.h input_type.h matrix.h my_omp.h normaliz_exception.h sublattice_representation.h vector_operations.h version.h automorph.h libnormaliz.h list_and_map_operations.h LIBSOURCES = $(wildcard *.cpp) LIBHEADERS = $(wildcard *.h) -#NMZINTSOURCES = nmz_integrate.h nmz_polynomial.cpp nmz_integrate.cpp nmz_integral.cpp ## -I .. necessary since we include files libnormaliz/*.h and *.cpp # CXXFLAGS += -I .. @@ -18,30 +17,68 @@ offload_handler.o: $(LIBHEADERS) offload_handler.cpp $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c offload_handler.cpp -o $@ + +automorph.o: $(LIBHEADERS) automorph.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c automorph.cpp -o $@ + +reduction.o: $(LIBHEADERS) reduction.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c reduction.cpp -o $@ + +cone_dual_mode.o: $(LIBHEADERS) cone_dual_mode.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c cone_dual_mode.cpp -o $@ + +project_and_lift.o: $(LIBHEADERS) project_and_lift.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c project_and_lift.cpp -o $@ + +face_lattice.o: $(LIBHEADERS) face_lattice.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c face_lattice.cpp -o $@ + +descent.o: $(LIBHEADERS) descent.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c descent.cpp -o $@ + +HilbertSeries.o: $(LIBHEADERS) HilbertSeries.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c HilbertSeries.cpp -o $@ + +nmz_integral.o: $(LIBHEADERS) nmz_integral.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c nmz_integral.cpp -o $@ -enumeration.o: $(LIBHEADERS) enumeration.cpp nmz_polynomial.cpp nmz_integral.cpp HilbertSeries.cpp - $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c enumeration.cpp -o $@ - -other_algorithms.o: $(LIBHEADERS) other_algorithms.cpp cone_dual_mode.cpp reduction.cpp project_and_lift.cpp descent.cpp automorph.cpp nmz_nauty.cpp dynamic_bitset.cpp - $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c other_algorithms.cpp -o $@ - -linear_algebra.o: $(LIBHEADERS) sublattice_representation.cpp vector_operations.cpp matrix.cpp integer.cpp - $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c linear_algebra.cpp -o $@ +sublattice_representation.o: $(LIBHEADERS) sublattice_representation.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c sublattice_representation.cpp -o $@ + +matrix.o: $(LIBHEADERS) matrix.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c matrix.cpp -o $@ nmz_nauty.o: $(LIBHEADERS) nmz_nauty.cpp $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c nmz_nauty.cpp -o $@ + +cone.o: $(LIBHEADERS) cone.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c cone.cpp -o $@ + +cone_property.o: $(LIBHEADERS) cone_property.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c cone_property.cpp -o $@ + +general.o: $(LIBHEADERS) general.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c general.cpp -o $@ + +collection.o: $(LIBHEADERS) collection.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c collection.cpp -o $@ -cone_and_control.o: $(LIBHEADERS) cone_and_control.cpp cone.cpp cone_property.cpp input_type.cpp list_operations.cpp general.cpp collection.cpp - $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c cone_and_control.cpp -o $@ - -primal.o: $(LIBHEADERS) primal.cpp full_cone.cpp simplex.cpp bottom.cpp - $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c primal.cpp -o $@ +full_cone.o: $(LIBHEADERS) full_cone.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c full_cone.cpp -o $@ + +simplex.o: $(LIBHEADERS) simplex.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c simplex.cpp -o $@ output.o: $(LIBHEADERS) output.cpp $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c output.cpp -o $@ + +input.o: $(LIBHEADERS) input.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c input.cpp -o $@ -libnormaliz.a: enumeration.o offload_handler.o other_algorithms.o linear_algebra.o primal.o cone_and_control.o output.o nmz_nauty.o +options.o: $(LIBHEADERS) options.cpp + $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c options.cpp -o $@ +libnormaliz.a: nmz_integral.o HilbertSeries.o offload_handler.o automorph.o reduction.o cone_dual_mode.o project_and_lift.o descent.o sublattice_representation.o matrix.o full_cone.o simplex.o cone.o cone_property.o general.o collection.o output.o nmz_nauty.o input.o options.o face_lattice.o ar -cr $@ $^ .PHONY : install diff -Nru normaliz-3.8.5+ds/source/libnormaliz/map_operations.h normaliz-3.8.9+ds/source/libnormaliz/map_operations.h --- normaliz-3.8.5+ds/source/libnormaliz/map_operations.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/map_operations.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,89 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -//--------------------------------------------------------------------------- - -#ifndef LIBNORMALIZ_MAP_OPERATIONS_H -#define LIBNORMALIZ_MAP_OPERATIONS_H - -//--------------------------------------------------------------------------- - -#include -#include -#include - -namespace libnormaliz { -using std::map; -using std::vector; - -template -std::ostream& operator<<(std::ostream& out, const map& M) { - for (const auto& it : M) { - out << it.first << ": " << it.second << " "; - } - out << std::endl; - return out; -} - -//--------------------------------------------------------------------------- - -template -bool contains(const set& m, const key& k) { - return (m.find(k) != m.end()); -} - -//--------------------------------------------------------------------------- - -template -bool contains(const map& m, const key& k) { - return (m.find(k) != m.end()); -} - -//--------------------------------------------------------------------------- - -template -map count_in_map(const vector& v) { - map m; - T size = v.size(); - for (T i = 0; i < size; ++i) { - m[v[i]]++; - } - return m; -} - -template -vector to_vector(const map& M) { - vector v; - for (const auto& it : M) { - for (T i = 0; i < it.second; i++) { - v.push_back(it.first); - } - } - return v; -} - -} // namespace libnormaliz - -//--------------------------------------------------------------------------- -#endif -//--------------------------------------------------------------------------- diff -Nru normaliz-3.8.5+ds/source/libnormaliz/matrix.cpp normaliz-3.8.9+ds/source/libnormaliz/matrix.cpp --- normaliz-3.8.5+ds/source/libnormaliz/matrix.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/matrix.cpp 2020-09-25 14:54:40.000000000 +0000 @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include "libnormaliz/matrix.h" @@ -3528,7 +3528,7 @@ perm.resize(key.size()); i = 0; for (const auto& it : rowList) { - perm[i] = convertTo(it[nc + 1]); + perm[i] = convertToLong(it[nc + 1]); i++; } return perm; @@ -4253,8 +4253,7 @@ } for (size_t k = 0; k < bin_exp.size(); ++k) { - if (bin_exp[k]) - Layers[k][i][j] = true; + Layers[k][i][j] = bin_exp[k]; } } @@ -4390,6 +4389,7 @@ Matrix VM(nr_rows,nr_columns); for(size_t i = 0;i < nr_rows; ++i){ for(size_t j = 0; j < nr_columns; ++j){ + cout << "EEEEEE " << val_entry(i,j) << endl; VM[i][j]=values[val_entry(i,j)]; } } @@ -4510,6 +4510,6 @@ } template void maximal_subsets(const vector >&, vector&); -template void maximal_subsets(const vector&, dynamic_bitset&); +// template void maximal_subsets(const vector&, dynamic_bitset&); } // namespace libnormaliz diff -Nru normaliz-3.8.5+ds/source/libnormaliz/matrix.h normaliz-3.8.9+ds/source/libnormaliz/matrix.h --- normaliz-3.8.5+ds/source/libnormaliz/matrix.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/matrix.h 2020-07-21 15:37:45.000000000 +0000 @@ -30,11 +30,11 @@ #include #include #include -#include +#include #include #include -#include +// #include #include #include "libnormaliz/dynamic_bitset.h" // #include @@ -644,7 +644,7 @@ /* cout << "MMMMM " << i << " " << j << " " << M[i][j] << endl; cout << i << "---" << G[i]; cout << j << "---" << G[j];*/ - if (isnan(M[i][j])) { + if (std::isnan(M[i][j])) { T = Tinv = Matrix(U.nr_of_rows()); return U; } diff -Nru normaliz-3.8.5+ds/source/libnormaliz/nmz_integral.cpp normaliz-3.8.9+ds/source/libnormaliz/nmz_integral.cpp --- normaliz-3.8.5+ds/source/libnormaliz/nmz_integral.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/nmz_integral.cpp 2020-09-25 14:54:40.000000000 +0000 @@ -29,14 +29,17 @@ #include "libnormaliz/nmz_integrate.h" #include "libnormaliz/cone.h" #include "libnormaliz/vector_operations.h" -#include "libnormaliz/map_operations.h" +// #include "libnormaliz/map_operations.h" #include "libnormaliz/dynamic_bitset.h" +#include "libnormaliz/list_and_map_operations.h" using namespace CoCoA; #include "../libnormaliz/my_omp.h" namespace libnormaliz { + +bool verbose_INT; BigRat IntegralUnitSimpl(const RingElem& F, const SparsePolyRing& P, @@ -141,7 +144,7 @@ size_t i, j; size_t nrows, ncols; - nrows = C.getNrGenerators(); + nrows = C.getNrTriangulationGenerators(); ncols = C.getEmbeddingDim(); gens.resize(nrows); for (i = 0; i < nrows; ++i) @@ -149,7 +152,7 @@ for (i = 0; i < nrows; i++) { for (j = 0; j < ncols; j++) { - convert(gens[i], C.getGenerators()[i]); + convert(gens[i], C.getTriangulationGenerators()[i]); } if (check_ascending) { long degree, prevDegree = 1; diff -Nru normaliz-3.8.5+ds/source/libnormaliz/nmz_integrate.h normaliz-3.8.9+ds/source/libnormaliz/nmz_integrate.h --- normaliz-3.8.5+ds/source/libnormaliz/nmz_integrate.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/nmz_integrate.h 2020-07-21 15:37:45.000000000 +0000 @@ -26,7 +26,6 @@ #ifdef NMZ_COCOA #include "CoCoA/library.H" -using namespace CoCoA; #include #include @@ -39,6 +38,8 @@ #include "libnormaliz/matrix.h" namespace libnormaliz { + +using namespace CoCoA; using namespace std; @@ -127,6 +128,903 @@ return (fact); } +ourFactorization::ourFactorization(const vector& myFactors, + const vector& myMultiplicities, + const RingElem& myRemainingFactor) { + this->myFactors = myFactors; + this->myMultiplicities = myMultiplicities; + this->myRemainingFactor = myRemainingFactor; +} + +/* +ourFactorization::ourFactorization(const factorization& FF) { + ourFactorization(FF.myFactors(), FF.myMultiplicities(), FF.myRemainingFactor()); +} + +RingElem binomial(const RingElem& f, long k) +// computes binomial coefficient (f choose k) +{ + const SparsePolyRing& P = owner(f); + RingElem g(P); + g = 1; + for (int i = 0; i < k; i++) + g *= (f - i) / (i + 1); + return (g); +} +*/ + +RingElem ascFact(const RingElem& f, long k) +// computes (f+1)*...*(f+k) +{ + const SparsePolyRing& P = owner(f); + RingElem g(P); + g = 1; + for (int i = 0; i < k; i++) + g *= (f + i + 1); + return (g); +} + +/* +RingElem descFact(const RingElem& f, long k) +// computes f*(f-1)*...*(f-k+1) +{ + const SparsePolyRing& P = owner(f); + RingElem g(P); + g = 1; + for (int i = 0; i < k; i++) + g *= (f - i); + return (g); +} +*/ + +bool compareLength(const RingElem& p, const RingElem& q) { + return (NumTerms(p) > NumTerms(q)); +} + +vector ourCoeffs(const RingElem& F, const long j) { + // our version of expanding a poly nomial wrt to indeterminate j + // The return value is the vector of coefficients of x[j]^i + vector c; + const SparsePolyRing& P = owner(F); + RingElem x = indets(P)[j]; + if (F == 0) { + c.push_back(zero(P)); + return (c); + } + + vector v(NumIndets(P)); + long k, cs; + + SparsePolyIter i = BeginIter(F); + for (; !IsEnded(i); ++i) { + exponents(v, PP(i)); + k = v[j]; + cs = c.size(); + if (k > cs - 1) + c.resize(k + 1, zero(P)); + v[j] = 0; + // c[k]+=monomial(P,coeff(i),v); + PushBack(c[k], coeff(i), v); + } + return (c); +} + +RingElem mySubstitution(const RingElem& F, const vector& w) { + const SparsePolyRing& R = owner(F); + RingElem G(zero(R)); + RingElem H(one(R)); + vector v(NumIndets(R)); + vector Z(NumIndets(R)); + + SparsePolyIter i = BeginIter(F); + for (; !IsEnded(i); ++i) { + exponents(v, PP(i)); + H = zero(R); + PushBack(H, coeff(i), Z); + for (size_t j = 0; j < v.size(); ++j) + H *= power(w[j], v[j]); + G += H; + } + return G; +} + +vector MxV(const vector >& M, vector V) { + // matrix*vector + vector P(M.size()); + for (size_t i = 0; i < M.size(); ++i) { + long s = 0; + for (size_t j = 0; j < V.size(); ++j) + s += M[i][j] * V[j]; + P[i] = s; + } + return (P); +} + +vector VxM(const vector& V, const vector >& M) { + // vector*matrix + const SparsePolyRing& R = owner(V[0]); + RingElem s(zero(R)); + vector P(M[0].size(), zero(R)); + for (size_t j = 0; j < M[0].size(); ++j) { + s = 0; + for (size_t i = 0; i < M.size(); ++i) + s += V[i] * M[i][j]; + P[j] = s; + } + return (P); +} + +/* +RingElem affineLinearSubstitution(const RingElem& F,const vector >& A, + const vector& b, const long& denom){ +// NOT IN USE + size_t i; + const SparsePolyRing& R=owner(F); + size_t m=A.size(); + // long n=A[0].size(); + vector v(m,zero(R)); + RingElem g(zero(R)); + + for(i=0;i w=VxM(v,A); + vector w1(w.size()+1,zero(R)); + w1[0]=indets(R)[0]; + for(i=1;i shiftVars(const vector& v, const vector& key) { + // selects components of v and reorders them according to key + vector w(v.size(), 0); + for (size_t i = 0; i < key.size(); ++i) { + w[i] = v[key[i]]; + } + return (w); +} + +void makeLocalDegreesAndKey(const dynamic_bitset& indicator, + const vector& degrees, + vector& localDeg, + vector& key) { + localDeg.clear(); + key.clear(); + key.push_back(0); + for (size_t i = 0; i < indicator.size(); ++i) + if (indicator.test(i)) + key.push_back(i + 1); + for (size_t i = 0; i < key.size() - 1; ++i) + localDeg.push_back(degrees[key[i + 1] - 1]); +} + +void makeStartEnd(const vector& localDeg, vector& St, vector& End) { + vector denom = degrees2denom(localDeg); // first we must find the blocks of equal degree + if (denom.size() == 0) + return; + St.push_back(1); + for (size_t i = 0; i < denom.size(); ++i) + if (denom[i] != 0) { + End.push_back(St[St.size() - 1] + denom[i] - 1); + if (i < denom.size() - 1) + St.push_back(End[End.size() - 1] + 1); + } + /* if(St.size()!=End.size()){ + for (size_t i=0;i orderExposInner(vector& vin, const vector& St, vector& End) { + vector v = vin; + long p, s, pend, pst; + bool ordered; + if (St.size() != End.size()) { + verboseOutput() << St.size() << " " << End.size() << " " << vin.size() << endl; + verboseOutput() << St[0] << endl; + for (size_t i = 0; i < vin.size(); ++i) { + verboseOutput() << vin[i] << " "; + } + verboseOutput() << endl; + assert(false); + } + for (size_t j = 0; j < St.size(); ++j) { // now we go over the blocks + pst = St[j]; + pend = End[j]; + while (1) { + ordered = true; + for (p = pst; p < pend; ++p) { + if (v[p] < v[p + 1]) { + ordered = false; + s = v[p]; + v[p] = v[p + 1]; + v[p + 1] = s; + } + } + if (ordered) + break; + pend--; + } + } + return (v); +} + +RingElem orderExpos(const RingElem& F, const vector& degrees, const dynamic_bitset& indicator, bool compactify) { + // orders the exponent vectors v of the terms of F + // the exponents v[i] and v[j], i < j, are swapped if + // (1) degrees[i]==degrees[j] and (2) v[i] < v[j] + // so that the exponents are descending in each degree block + // the ordered exponent vectors are inserted into a map + // and their coefficients are added + // at the end the polynomial is rebuilt from the map + // If compactify==true, the exponents will be shifted to the left in order to keep the correspondence + // of variables to degrees + // compactification not used at present (occurs only in restrictToFaces) + + const SparsePolyRing& P = owner(F); + vector v(NumIndets(P)); + vector key, localDeg; + key.reserve(v.size() + 1); + localDeg.reserve(degrees.size() + 1); + + if (compactify) { + makeLocalDegreesAndKey(indicator, degrees, localDeg, key); + } + else { + localDeg = degrees; + } + + vector St, End; + makeStartEnd(localDeg, St, End); + + // now the main job + + map, RingElem> orderedMons; // will take the ordered exponent vectors + + SparsePolyIter mon = BeginIter(F); // go over the given polynomial + for (; !IsEnded(mon); ++mon) { + exponents(v, PP(mon)); // this function gives the exponent vector back as v + if (compactify) + v = shiftVars(v, key); + v = orderExposInner(v, St, End); + auto ord_mon = orderedMons.find(v); // insert into map or add coefficient + if (ord_mon != orderedMons.end()) { + ord_mon->second += coeff(mon); + } + else { + orderedMons.insert(pair, RingElem>(v, coeff(mon))); + } + } + + // now we must reconstruct the polynomial + // we use that the natural order of vectors in C++ STL is inverse + // to lex. Therefore push_front + + RingElem r(zero(P)); + // JAA verboseOutput() << "Loop start " << orderedMons.size() << endl; + // JAA size_t counter=0; + for (const auto& ord_mon : orderedMons) { + // JAA verboseOutput() << counter++ << ord_mon.first << endl; + // JAA try { + PushFront(r, ord_mon.second, ord_mon.first); + // JAA } + // JAA catch(const std::exception& exc){verboseOutput() << "Caught exception: " << exc.what() << endl;} + } + // JAA verboseOutput() << "Loop end" << endl; + return (r); +} + +void restrictToFaces(const RingElem& G, + RingElem& GOrder, + vector& GRest, + const vector degrees, + const vector& inExSimplData) { + // Computesd the restrictions of G to the faces in inclusion-exclusion. + // All terms are simultaneously compactified and exponentwise ordered + // Polynomials returned in GRest + // Ordering is also applied to G itself, returned in GOrder + // Note: degrees are given for the full simplex. Therefore "local" degreees must be made + // (depend only on face and not on offset, but generation here is cheap) + + const SparsePolyRing& P = owner(G); + + vector v(NumIndets(P)); + vector w(NumIndets(P)); + vector localDeg; + localDeg.reserve(v.size()); + size_t dim = NumIndets(P) - 1; + + // first we make the facewise data that are needed for the compactification and otrdering + // of exponent vectors + vector > St(inExSimplData.size()), End(inExSimplData.size()), key(inExSimplData.size()); + vector active; + for (size_t i = 0; i < inExSimplData.size(); ++i) + if (!inExSimplData[i].done) { + active.push_back(i); + makeLocalDegreesAndKey(inExSimplData[i].GenInFace, degrees, localDeg, key[i]); + makeStartEnd(localDeg, St[i], End[i]); + } + + // now the same for the full simplex (localDeg=degrees) + dynamic_bitset fullSimpl(dim); + fullSimpl.set(); + vector StSimpl, EndSimpl; + makeStartEnd(degrees, StSimpl, EndSimpl); + + vector, RingElem> > orderedMons(inExSimplData.size()); // will take the ordered exponent vectors + map, RingElem> orderedMonsSimpl; + + dynamic_bitset indicator(dim); + + // now we go over the terms of G + SparsePolyIter term = BeginIter(G); + // PPMonoid TT = PPM(owner(G)); + for (; !IsEnded(term); ++term) { + // PPMonoidElem mon(PP(term)); + exponents(v, PP(term)); + w = v; + indicator.reset(); + for (size_t j = 0; j < dim; ++j) + if (v[j + 1] != 0) // we must add 1 since the 0-th indeterminate is irrelevant here + indicator.set(j); + for (size_t i = 0; i < active.size(); ++i) { + int j = active[i]; + if (indicator.is_subset_of(inExSimplData[j].GenInFace)) { + w = shiftVars(v, key[j]); + w = orderExposInner(w, St[j], End[j]); + // w=shiftVars(v,key[j]); + auto ord_mon = orderedMons[j].find(w); // insert into map or add coefficient + if (ord_mon != orderedMons[j].end()) { + ord_mon->second += coeff(term); + } + else { + orderedMons[j].insert(pair, RingElem>(w, coeff(term))); + } + } + } // for i + + v = orderExposInner(v, StSimpl, EndSimpl); + auto ord_mon = orderedMonsSimpl.find(v); // insert into map or add coefficient + if (ord_mon != orderedMonsSimpl.end()) { + ord_mon->second += coeff(term); + } + else { + orderedMonsSimpl.insert(pair, RingElem>(v, coeff(term))); + } + } // loop over term + + // now we must make the resulting polynomials from the maps + + for (size_t i = 0; i < active.size(); ++i) { + int j = active[i]; + for (const auto& ord_mon : orderedMons[j]) { + PushFront(GRest[j], ord_mon.second, ord_mon.first); + } + // verboseOutput() << "GRest[j] " << j << " " << NumTerms(GRest[j]) << endl; + } + + for (const auto& ord_mon : orderedMonsSimpl) { + PushFront(GOrder, ord_mon.second, ord_mon.first); + } +} + +long nrActiveFaces = 0; +long nrActiveFacesOld = 0; + +void all_contained_faces(const RingElem& G, + RingElem& GOrder, + const vector& degrees, + dynamic_bitset& indicator, + long Deg, + vector& inExSimplData, + deque, RingElem> >& facePolysThread) { + const SparsePolyRing& R = owner(G); + vector GRest; + // size_t dim=indicator.size(); + for (size_t i = 0; i < inExSimplData.size(); ++i) { + GRest.push_back(zero(R)); + + if (!indicator.is_subset_of(inExSimplData[i].GenInFace)) + inExSimplData[i].done = true; // done if face cannot contribute to result for this offset + else + inExSimplData[i].done = false; // not done otherwise + } + restrictToFaces(G, GOrder, GRest, degrees, inExSimplData); + + for (size_t j = 0; j < inExSimplData.size(); ++j) { + if (inExSimplData[j].done) + continue; +#pragma omp atomic + nrActiveFaces++; + // verboseOutput() << "Push back " << NumTerms(GRest[j]); + GRest[j] = power(indets(R)[0], Deg) * inExSimplData[j].mult * + GRest[j]; // shift by degree of offset amd multiply by mult of face + facePolysThread.push_back(pair, RingElem>(inExSimplData[j].degrees, GRest[j])); + // verboseOutput() << " Now " << facePolysThread.size() << endl; + } +} + +RingElem affineLinearSubstitutionFL(const ourFactorization& FF, + const vector >& A, + const vector& b, + const long& denom, + const SparsePolyRing& R, + const vector& degrees, + const BigInt& lcmDets, + vector& inExSimplData, + deque, RingElem> >& facePolysThread) { + // applies linar substitution y --> lcmDets*A(y+b/denom) to all factors in FF + // and returns the product of the modified factorsafter ordering the exponent vectors + + size_t i; + size_t m = A.size(); + size_t dim = m; // TO DO: eliminate this duplication + vector v(m, zero(R)); + RingElem g(zero(R)); + + for (i = 0; i < m; i++) { + g = b[i] * (lcmDets / denom); + v[i] = g + lcmDets * indets(R)[i + 1]; + } + vector w = VxM(v, A); + vector w1(w.size() + 1, zero(R)); + w1[0] = RingElem(R, lcmDets); + for (i = 1; i < w1.size(); ++i) + w1[i] = w[i - 1]; + + // RingHom phi=PolyAlgebraHom(R,R,w1); + + RingElem G1(zero(R)); + list sortedFactors; + for (i = 0; i < FF.myFactors.size(); ++i) { + // G1=phi(FF.myFactors[i]); + G1 = mySubstitution(FF.myFactors[i], w1); + for (int nn = 0; nn < FF.myMultiplicities[i]; ++nn) + sortedFactors.push_back(G1); + } + + sortedFactors.sort(compareLength); + + RingElem G(one(R)); + + for (const auto& sf : sortedFactors) + G *= sf; + + if (inExSimplData.size() == 0) { // not really necesary, but a slight shortcut + dynamic_bitset dummyInd; + return (orderExpos(G, degrees, dummyInd, false)); + } + + // if(inExSimplData.size()!=0){ + long Deg = 0; + dynamic_bitset indicator(dim); // indicates the non-zero components of b + indicator.reset(); + for (size_t i = 0; i < dim; ++i) + if (b[i] != 0) { + indicator.set(i); + Deg += degrees[i] * b[i]; + } + Deg /= denom; + RingElem Gorder(zero(R)); + all_contained_faces(G, Gorder, degrees, indicator, Deg, inExSimplData, facePolysThread); + return (Gorder); + // } +} + +vector homogComps(const RingElem& F) { + // returns the vector of homogeneous components of F + // w.r.t. standard grading + + const SparsePolyRing& P = owner(F); + long dim = NumIndets(P); + vector v(dim); + vector c(deg(F) + 1, zero(P)); + long j, k; + + // TODO there is a leading_term() function coming in cocoalib + // TODO maybe there will be even a "splice_leading_term" + SparsePolyIter i = BeginIter(F); + for (; !IsEnded(i); ++i) { + exponents(v, PP(i)); + k = 0; + for (j = 0; j < dim; j++) + k += v[j]; + PushBack(c[k], coeff(i), v); + } + return (c); +} + +RingElem homogenize(const RingElem& F) { + // homogenizes F wrt the zeroth variable and returns the + // homogenized polynomial + + SparsePolyRing P = owner(F); + int d = deg(F); + vector c(d + 1, zero(P)); + c = homogComps(F); + RingElem h(zero(P)); + for (int i = 0; i <= d; ++i) + h += c[i] * power(indets(P)[0], d - i); + return (h); +} + +RingElem makeZZCoeff(const RingElem& F, const SparsePolyRing& RZZ) { + // F is a polynomial over RingQQ with integral coefficients + // This function converts it into a polynomial over RingZZ + + SparsePolyIter mon = BeginIter(F); // go over the given polynomial + RingElem G(zero(RZZ)); + for (; !IsEnded(mon); ++mon) { + PushBack(G, num(coeff(mon)), PP(mon)); + } + return (G); +} + +RingElem makeQQCoeff(const RingElem& F, const SparsePolyRing& R) { + // F is a polynomial over RingZZ + // This function converts it into a polynomial over RingQQ + SparsePolyIter mon = BeginIter(F); // go over the given polynomial + RingElem G(zero(R)); + for (; !IsEnded(mon); ++mon) { + PushBack(G, RingElem(RingQQ(), coeff(mon)), PP(mon)); + } + return (G); +} + +RingElem processInputPolynomial(const string& poly_as_string, + const SparsePolyRing& R, + const SparsePolyRing& RZZ, + vector& resPrimeFactors, + vector& resPrimeFactorsNonhom, + vector& resMultiplicities, + RingElem& remainingFactor, + bool& homogeneous, + const bool& do_leadCoeff) { + // "res" stands for "result" + // resPrimeFactors are homogenized, the "nonhom" come from the original polynomial + + long i, j; + string dummy = poly_as_string; + size_t semicolon=dummy.find(';'); + if(semicolon != string::npos){ + dummy[semicolon]=' '; + } + RingElem the_only_dactor = ReadExpr(R, dummy); // there is only one + vector factorsRead; + factorsRead.push_back(the_only_dactor); + vector multiplicities; + + vector primeFactors; // for use in this routine + vector primeFactorsNonhom; // return results will go into the "res" parameters for output + + if (verbose_INT) + verboseOutput() << "Polynomial read" << endl; + + homogeneous = true; + for (auto& G : factorsRead) { + // we factor the polynomials read and make them integral this way they + // must further be homogenized and converted to polynomials with ZZ + // coefficients (instead of inegral QQ) The homogenization is necessary + // to allow substitutions over ZZ + if (deg(G) == 0) { + remainingFactor *= G; // constants go into remainingFactor + continue; // this extra treatment would not be necessary + } + + // homogeneous=(G==LF(G)); + vector compsG = homogComps(G); + // we test for homogeneity. In case do_leadCoeff==true, polynomial + // is replaced by highest homogeneous component + if (G != compsG[compsG.size() - 1]) { + homogeneous = false; + if (verbose_INT && do_leadCoeff) + verboseOutput() << "Polynomial is inhomogeneous. Replacing it by highest hom. comp." << endl; + if (do_leadCoeff) { + G = compsG[compsG.size() - 1]; + } + } + + factorization FF = factor(G); // now the factorization and transfer to integer coefficients + for (j = 0; j < (long)FF.myFactors().size(); ++j) { + primeFactorsNonhom.push_back(FF.myFactors()[j]); // these are the factors of the polynomial to be integrated + primeFactors.push_back(makeZZCoeff(homogenize(FF.myFactors()[j]), RZZ)); // the homogenized factors with ZZ coeff + multiplicities.push_back(FF.myMultiplicities()[j]); // homogenized for substitution ! + } + remainingFactor *= FF.myRemainingFactor(); + } + + // it remains to collect multiple factors that come from different input factors + for (i = 0; i < (long)primeFactors.size(); ++i) { + if (primeFactors[i] == 0) + continue; + for (j = i + 1; j < (long)primeFactors.size(); ++j) { + if (primeFactors[j] != 0 && primeFactors[i] == primeFactors[j]) { + primeFactors[j] = 0; + multiplicities[i]++; + } + } + } + + // now everything is transferred to the return parameters + for (i = 0; i < (long)primeFactors.size(); ++i) { + if (primeFactors[i] != 0) { + resPrimeFactorsNonhom.push_back(primeFactorsNonhom[i]); + resPrimeFactors.push_back(primeFactors[i]); + resMultiplicities.push_back(multiplicities[i]); + } + } + + RingElem F(one(R)); // the polynomial to be integrated with QQ coefficients + for (const auto& G : factorsRead) + F *= G; + + return F; +} + +CyclRatFunct genFunct(const vector >& GFP, const RingElem& F, const vector& degrees) +// writes \sum_{x\in\ZZ_+^n} f(x,t) T^x +// under the specialization T_i --> t^g_i +// as a rational function in t +{ + const SparsePolyRing& P = owner(F); + RingElem t = indets(P)[0]; + + CyclRatFunct s(F); // F/1 + + CyclRatFunct g(zero(P)), h(zero(P)); + + long nd = degrees.size(); + long i, k, mg; + vector c; + + for (k = 1; k <= nd; k++) { + c = ourCoeffs(s.num, k); // we split the numerator according + // to powers of var k + mg = c.size(); // max degree+1 in var k + + h.set2(zero(P)); + for (i = 0; i < mg; i++) // now we replace the powers of var k + { // by the corrseponding rational function, + // multiply, and sum the products + + h.num = (1 - power(t, degrees[k - 1])) * h.num + GFP[degrees[k - 1]][i].num * c[i]; + h.denom = GFP[degrees[k - 1]][i].denom; + } + s.num = h.num; + s.denom = prodDenom(s.denom, h.denom); + } + return (s); +} + +vector power2ascFact(const SparsePolyRing& P, const long& k) +// computes the representation of the power x^n as the linear combination +// of (x+1)_n,...,(x+1)_0 +// return value is the vector of coefficients (they belong to ZZ) +{ + RingElem t = indets(P)[0]; + const vector ONE(NumIndets(P)); + RingElem f(P), g(P), h(P); + f = power(t, k); + long m; + vector c(k + 1, zero(P)); + while (f != 0) { + m = deg(f); + h = monomial(P, LC(f), ONE); + c[m] = h; + f -= h * ascFact(t, m); + } + return (c); +} + +CyclRatFunct genFunctPower1(const SparsePolyRing& P, long k, long n) +// computes the generating function for +// \sum_j j^n (t^k)^j +{ + vector a = power2ascFact(P, n); + RingElem b(P); + vector u; + CyclRatFunct g(zero(P)), h(zero(P)); + long i, s = a.size(); + for (i = 0; i < s; ++i) { + u = makeDenom(k, i + 1); + b = a[i] * factorial(i); + g.set2(b, u); + h.addCRF(g); + } + return (h); +} + +void CyclRatFunct::extendDenom(const vector& target) +// extends the denominator to target +// by multiplying the numrerator with the remaining factor +{ + RingElem t = indets(owner(num))[0]; + long i, ns = target.size(), nf = denom.size(); + for (i = 1; i < ns; ++i) { + if (i > nf - 1) + num *= power(1 - power(t, i), (target[i])); + else if (target[i] > denom[i]) + num *= power(1 - power(t, i), (target[i] - denom[i])); + } + denom = target; +} + +vector lcmDenom(const vector& df, const vector& dg) { + // computes the lcm of ztwo denominators as used in CyclRatFunct + // (1-t^i and 1-t^j, i != j, are considered as coprime) + size_t nf = df.size(), ng = dg.size(), i; + size_t n = max(nf, ng); + vector dh = df; + dh.resize(n); + for (i = 1; i < n; ++i) + if (i < ng && dh[i] < dg[i]) + dh[i] = dg[i]; + return (dh); +} + +vector prodDenom(const vector& df, const vector& dg) { + // as above, but computes the profduct + size_t nf = df.size(), ng = dg.size(), i; + size_t n = max(nf, ng); + vector dh = df; + dh.resize(n); + for (i = 1; i < n; ++i) + if (i < ng) + dh[i] += dg[i]; + return (dh); +} + +vector degrees2denom(const vector& d) { + // converts a vector of degrees to a "denominator" + // listing at position i the multiplicity of i in d + long m = 0; + size_t i; + if (d.size() == 0) + return vector(0); + for (i = 0; i < d.size(); ++i) + m = max(m, d[i]); + vector e(m + 1); + for (i = 0; i < d.size(); ++i) + e[d[i]]++; + return (e); +} + +vector denom2degrees(const vector& d) { + // the converse operation + vector denomDeg; + for (size_t i = 0; i < d.size(); ++i) + for (long j = 0; j < d[i]; ++j) + denomDeg.push_back(i); + return (denomDeg); +} + +RingElem denom2poly(const SparsePolyRing& P, const vector& d) { + // converts a denominator into a real polynomial + // the variable for the denominator is x[0] + RingElem t = indets(P)[0]; + RingElem f(one(P)); + for (size_t i = 1; i < d.size(); ++i) + f *= power(1 - power(t, i), d[i]); + return (f); +} + +vector makeDenom(long k, long n) +// makes the denominator (1-t^k)^n +{ + vector d(k + 1); + d[k] = n; + return (d); +} + +void CyclRatFunct::addCRF(const CyclRatFunct& r) { + // adds r to *this, r is preserved in its given form + CyclRatFunct s(zero(owner(num))); + const vector lcmden(lcmDenom(denom, r.denom)); + s = r; + s.extendDenom(lcmden); + extendDenom(lcmden); + num += s.num; +} + +/* +void CyclRatFunct::multCRF(const CyclRatFunct& r) { + // nmultiplies *this by r + num *= r.num; + denom = prodDenom(denom, r.denom); +} +*/ + +void CyclRatFunct::showCRF() { + if (!verbose_INT) + return; + + verboseOutput() << num << endl; + for (size_t i = 1; i < denom.size(); ++i) + verboseOutput() << denom[i] << " "; + verboseOutput() << endl; +} + +void CyclRatFunct::showCoprimeCRF() { + // shows *this also with coprime numerator and denominator + // makes only sense if only x[0] appears in the numerator (not checked) + + if (!verbose_INT) + return; + + verboseOutput() << "--------------------------------------------" << endl << endl; + verboseOutput() << "Given form" << endl << endl; + showCRF(); + verboseOutput() << endl; + const SparsePolyRing& R = owner(num); + SparsePolyRing P = NewPolyRing_DMPI(RingQQ(), symbols("t")); + vector Im(NumIndets(R), zero(P)); + Im[0] = indets(P)[0]; + RingHom phi = PolyAlgebraHom(R, P, Im); + RingElem f(phi(num)); + RingElem g(denom2poly(P, denom)); + RingElem h = CoCoA::gcd(f, g); + f /= h; + g /= h; + verboseOutput() << "Coprime numerator (for denom with remaining factor 1)" << endl << endl; + factorization gf = factor(g); + verboseOutput() << f / gf.myRemainingFactor() << endl << endl << "Factorization of denominator" << endl << endl; + size_t nf = gf.myFactors().size(); + for (size_t i = 0; i < nf; ++i) + verboseOutput() << gf.myFactors()[i] << " mult " << gf.myMultiplicities()[i] << endl; + verboseOutput() << "--------------------------------------------" << endl; +} + +void CyclRatFunct::simplifyCRF() { + // cancels factors 1-t^i from the denominator that appear there explicitly + // (and not just as factors of 1-t^j for some j) + + const SparsePolyRing& R = owner(num); + long nd = denom.size(); + for (long i = 1; i < nd; i++) { + while (denom[i] > 0) { + if (!IsDivisible(num, 1 - power(indets(R)[0], i))) + break; + num /= 1 - power(indets(R)[0], i); + denom[i]--; + } + } +} + +void CyclRatFunct::set2(const RingElem& f, const vector& d) { + num = f; + denom = d; +} + +void CyclRatFunct::set2(const RingElem& f) { + num = f; + denom.resize(1, 0); +} + +CyclRatFunct::CyclRatFunct(const RingElem& c) + : num(c) +// constructor starting from a RingElem +// initialization necessary because RingElem has no default +// constructor +{ + denom.resize(1, 0); +} + +CyclRatFunct::CyclRatFunct(const RingElem& c, const vector& d) : num(c), denom(d) { +} + } // end namespace libnormaliz #endif // NMZ_COCOA diff -Nru normaliz-3.8.5+ds/source/libnormaliz/nmz_nauty.cpp normaliz-3.8.9+ds/source/libnormaliz/nmz_nauty.cpp --- normaliz-3.8.5+ds/source/libnormaliz/nmz_nauty.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/nmz_nauty.cpp 2020-09-25 14:54:40.000000000 +0000 @@ -135,13 +135,7 @@ for(size_t j=0; j< VV.size(); ++j){ long old_index=Values[VV[j]]; new_index[old_index]=j; - } - - - for (i = 0; i < mm; ++i) { - for (j = 0; j < nn; ++j) - MM.insert(MVal[i][j], i, j); - } + } for (i = 0; i < mm; ++i) { for (j = 0; j < nn; ++j){ @@ -220,6 +214,7 @@ } MM.set_values(VV); + } template @@ -400,7 +395,7 @@ if(stats.grpsize2 != 0){ mpz_class power_mpz = mpz_class(stats.grpsize2); - long power = convertTo(power_mpz); + long power = convertToLong(power_mpz); for(long i = 0; i< power; ++i) result.order *= 10; } @@ -532,7 +527,7 @@ result.order = mpz_class(stats.grpsize1); if(stats.grpsize2 != 0){ mpz_class power_mpz = mpz_class(stats.grpsize2); - long power = convertTo(power_mpz); + long power = convertToLong(power_mpz); for(long i = 0; i< power; ++i) result.order *= 10; } diff -Nru normaliz-3.8.5+ds/source/libnormaliz/nmz_polynomial.cpp normaliz-3.8.9+ds/source/libnormaliz/nmz_polynomial.cpp --- normaliz-3.8.5+ds/source/libnormaliz/nmz_polynomial.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/nmz_polynomial.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,937 +0,0 @@ -#ifdef NMZ_COCOA -/* - * Copyright (C) 2012-2014 Winfried Bruns, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#include -#include -#include - -#include "libnormaliz/nmz_integrate.h" -#include "libnormaliz/dynamic_bitset.h" - -using namespace CoCoA; - -#include "../libnormaliz/my_omp.h" - -using namespace std; - -namespace libnormaliz { - -ourFactorization::ourFactorization(const vector& myFactors, - const vector& myMultiplicities, - const RingElem& myRemainingFactor) { - this->myFactors = myFactors; - this->myMultiplicities = myMultiplicities; - this->myRemainingFactor = myRemainingFactor; -} - -/* -ourFactorization::ourFactorization(const factorization& FF) { - ourFactorization(FF.myFactors(), FF.myMultiplicities(), FF.myRemainingFactor()); -} - -RingElem binomial(const RingElem& f, long k) -// computes binomial coefficient (f choose k) -{ - const SparsePolyRing& P = owner(f); - RingElem g(P); - g = 1; - for (int i = 0; i < k; i++) - g *= (f - i) / (i + 1); - return (g); -} -*/ - -RingElem ascFact(const RingElem& f, long k) -// computes (f+1)*...*(f+k) -{ - const SparsePolyRing& P = owner(f); - RingElem g(P); - g = 1; - for (int i = 0; i < k; i++) - g *= (f + i + 1); - return (g); -} - -/* -RingElem descFact(const RingElem& f, long k) -// computes f*(f-1)*...*(f-k+1) -{ - const SparsePolyRing& P = owner(f); - RingElem g(P); - g = 1; - for (int i = 0; i < k; i++) - g *= (f - i); - return (g); -} -*/ - -bool compareLength(const RingElem& p, const RingElem& q) { - return (NumTerms(p) > NumTerms(q)); -} - -vector ourCoeffs(const RingElem& F, const long j) { - // our version of expanding a poly nomial wrt to indeterminate j - // The return value is the vector of coefficients of x[j]^i - vector c; - const SparsePolyRing& P = owner(F); - RingElem x = indets(P)[j]; - if (F == 0) { - c.push_back(zero(P)); - return (c); - } - - vector v(NumIndets(P)); - long k, cs; - - SparsePolyIter i = BeginIter(F); - for (; !IsEnded(i); ++i) { - exponents(v, PP(i)); - k = v[j]; - cs = c.size(); - if (k > cs - 1) - c.resize(k + 1, zero(P)); - v[j] = 0; - // c[k]+=monomial(P,coeff(i),v); - PushBack(c[k], coeff(i), v); - } - return (c); -} - -RingElem mySubstitution(const RingElem& F, const vector& w) { - const SparsePolyRing& R = owner(F); - RingElem G(zero(R)); - RingElem H(one(R)); - vector v(NumIndets(R)); - vector Z(NumIndets(R)); - - SparsePolyIter i = BeginIter(F); - for (; !IsEnded(i); ++i) { - exponents(v, PP(i)); - H = zero(R); - PushBack(H, coeff(i), Z); - for (size_t j = 0; j < v.size(); ++j) - H *= power(w[j], v[j]); - G += H; - } - return G; -} - -vector MxV(const vector >& M, vector V) { - // matrix*vector - vector P(M.size()); - for (size_t i = 0; i < M.size(); ++i) { - long s = 0; - for (size_t j = 0; j < V.size(); ++j) - s += M[i][j] * V[j]; - P[i] = s; - } - return (P); -} - -vector VxM(const vector& V, const vector >& M) { - // vector*matrix - const SparsePolyRing& R = owner(V[0]); - RingElem s(zero(R)); - vector P(M[0].size(), zero(R)); - for (size_t j = 0; j < M[0].size(); ++j) { - s = 0; - for (size_t i = 0; i < M.size(); ++i) - s += V[i] * M[i][j]; - P[j] = s; - } - return (P); -} - -/* -RingElem affineLinearSubstitution(const RingElem& F,const vector >& A, - const vector& b, const long& denom){ -// NOT IN USE - size_t i; - const SparsePolyRing& R=owner(F); - size_t m=A.size(); - // long n=A[0].size(); - vector v(m,zero(R)); - RingElem g(zero(R)); - - for(i=0;i w=VxM(v,A); - vector w1(w.size()+1,zero(R)); - w1[0]=indets(R)[0]; - for(i=1;i shiftVars(const vector& v, const vector& key) { - // selects components of v and reorders them according to key - vector w(v.size(), 0); - for (size_t i = 0; i < key.size(); ++i) { - w[i] = v[key[i]]; - } - return (w); -} - -void makeLocalDegreesAndKey(const dynamic_bitset& indicator, - const vector& degrees, - vector& localDeg, - vector& key) { - localDeg.clear(); - key.clear(); - key.push_back(0); - for (size_t i = 0; i < indicator.size(); ++i) - if (indicator.test(i)) - key.push_back(i + 1); - for (size_t i = 0; i < key.size() - 1; ++i) - localDeg.push_back(degrees[key[i + 1] - 1]); -} - -void makeStartEnd(const vector& localDeg, vector& St, vector& End) { - vector denom = degrees2denom(localDeg); // first we must find the blocks of equal degree - if (denom.size() == 0) - return; - St.push_back(1); - for (size_t i = 0; i < denom.size(); ++i) - if (denom[i] != 0) { - End.push_back(St[St.size() - 1] + denom[i] - 1); - if (i < denom.size() - 1) - St.push_back(End[End.size() - 1] + 1); - } - /* if(St.size()!=End.size()){ - for (size_t i=0;i orderExposInner(vector& vin, const vector& St, vector& End) { - vector v = vin; - long p, s, pend, pst; - bool ordered; - if (St.size() != End.size()) { - verboseOutput() << St.size() << " " << End.size() << " " << vin.size() << endl; - verboseOutput() << St[0] << endl; - for (size_t i = 0; i < vin.size(); ++i) { - verboseOutput() << vin[i] << " "; - } - verboseOutput() << endl; - assert(false); - } - for (size_t j = 0; j < St.size(); ++j) { // now we go over the blocks - pst = St[j]; - pend = End[j]; - while (1) { - ordered = true; - for (p = pst; p < pend; ++p) { - if (v[p] < v[p + 1]) { - ordered = false; - s = v[p]; - v[p] = v[p + 1]; - v[p + 1] = s; - } - } - if (ordered) - break; - pend--; - } - } - return (v); -} - -RingElem orderExpos(const RingElem& F, const vector& degrees, const dynamic_bitset& indicator, bool compactify) { - // orders the exponent vectors v of the terms of F - // the exponents v[i] and v[j], i < j, are swapped if - // (1) degrees[i]==degrees[j] and (2) v[i] < v[j] - // so that the exponents are descending in each degree block - // the ordered exponent vectors are inserted into a map - // and their coefficients are added - // at the end the polynomial is rebuilt from the map - // If compactify==true, the exponents will be shifted to the left in order to keep the correspondence - // of variables to degrees - // compactification not used at present (occurs only in restrictToFaces) - - const SparsePolyRing& P = owner(F); - vector v(NumIndets(P)); - vector key, localDeg; - key.reserve(v.size() + 1); - localDeg.reserve(degrees.size() + 1); - - if (compactify) { - makeLocalDegreesAndKey(indicator, degrees, localDeg, key); - } - else { - localDeg = degrees; - } - - vector St, End; - makeStartEnd(localDeg, St, End); - - // now the main job - - map, RingElem> orderedMons; // will take the ordered exponent vectors - - SparsePolyIter mon = BeginIter(F); // go over the given polynomial - for (; !IsEnded(mon); ++mon) { - exponents(v, PP(mon)); // this function gives the exponent vector back as v - if (compactify) - v = shiftVars(v, key); - v = orderExposInner(v, St, End); - auto ord_mon = orderedMons.find(v); // insert into map or add coefficient - if (ord_mon != orderedMons.end()) { - ord_mon->second += coeff(mon); - } - else { - orderedMons.insert(pair, RingElem>(v, coeff(mon))); - } - } - - // now we must reconstruct the polynomial - // we use that the natural order of vectors in C++ STL is inverse - // to lex. Therefore push_front - - RingElem r(zero(P)); - // JAA verboseOutput() << "Loop start " << orderedMons.size() << endl; - // JAA size_t counter=0; - for (const auto& ord_mon : orderedMons) { - // JAA verboseOutput() << counter++ << ord_mon.first << endl; - // JAA try { - PushFront(r, ord_mon.second, ord_mon.first); - // JAA } - // JAA catch(const std::exception& exc){verboseOutput() << "Caught exception: " << exc.what() << endl;} - } - // JAA verboseOutput() << "Loop end" << endl; - return (r); -} - -void restrictToFaces(const RingElem& G, - RingElem& GOrder, - vector& GRest, - const vector degrees, - const vector& inExSimplData) { - // Computesd the restrictions of G to the faces in inclusion-exclusion. - // All terms are simultaneously compactified and exponentwise ordered - // Polynomials returned in GRest - // Ordering is also applied to G itself, returned in GOrder - // Note: degrees are given for the full simplex. Therefore "local" degreees must be made - // (depend only on face and not on offset, but generation here is cheap) - - const SparsePolyRing& P = owner(G); - - vector v(NumIndets(P)); - vector w(NumIndets(P)); - vector localDeg; - localDeg.reserve(v.size()); - size_t dim = NumIndets(P) - 1; - - // first we make the facewise data that are needed for the compactification and otrdering - // of exponent vectors - vector > St(inExSimplData.size()), End(inExSimplData.size()), key(inExSimplData.size()); - vector active; - for (size_t i = 0; i < inExSimplData.size(); ++i) - if (!inExSimplData[i].done) { - active.push_back(i); - makeLocalDegreesAndKey(inExSimplData[i].GenInFace, degrees, localDeg, key[i]); - makeStartEnd(localDeg, St[i], End[i]); - } - - // now the same for the full simplex (localDeg=degrees) - dynamic_bitset fullSimpl(dim); - fullSimpl.set(); - vector StSimpl, EndSimpl; - makeStartEnd(degrees, StSimpl, EndSimpl); - - vector, RingElem> > orderedMons(inExSimplData.size()); // will take the ordered exponent vectors - map, RingElem> orderedMonsSimpl; - - dynamic_bitset indicator(dim); - - // now we go over the terms of G - SparsePolyIter term = BeginIter(G); - // PPMonoid TT = PPM(owner(G)); - for (; !IsEnded(term); ++term) { - // PPMonoidElem mon(PP(term)); - exponents(v, PP(term)); - w = v; - indicator.reset(); - for (size_t j = 0; j < dim; ++j) - if (v[j + 1] != 0) // we must add 1 since the 0-th indeterminate is irrelevant here - indicator.set(j); - for (size_t i = 0; i < active.size(); ++i) { - int j = active[i]; - if (indicator.is_subset_of(inExSimplData[j].GenInFace)) { - w = shiftVars(v, key[j]); - w = orderExposInner(w, St[j], End[j]); - // w=shiftVars(v,key[j]); - auto ord_mon = orderedMons[j].find(w); // insert into map or add coefficient - if (ord_mon != orderedMons[j].end()) { - ord_mon->second += coeff(term); - } - else { - orderedMons[j].insert(pair, RingElem>(w, coeff(term))); - } - } - } // for i - - v = orderExposInner(v, StSimpl, EndSimpl); - auto ord_mon = orderedMonsSimpl.find(v); // insert into map or add coefficient - if (ord_mon != orderedMonsSimpl.end()) { - ord_mon->second += coeff(term); - } - else { - orderedMonsSimpl.insert(pair, RingElem>(v, coeff(term))); - } - } // loop over term - - // now we must make the resulting polynomials from the maps - - for (size_t i = 0; i < active.size(); ++i) { - int j = active[i]; - for (const auto& ord_mon : orderedMons[j]) { - PushFront(GRest[j], ord_mon.second, ord_mon.first); - } - // verboseOutput() << "GRest[j] " << j << " " << NumTerms(GRest[j]) << endl; - } - - for (const auto& ord_mon : orderedMonsSimpl) { - PushFront(GOrder, ord_mon.second, ord_mon.first); - } -} - -long nrActiveFaces = 0; -long nrActiveFacesOld = 0; - -void all_contained_faces(const RingElem& G, - RingElem& GOrder, - const vector& degrees, - dynamic_bitset& indicator, - long Deg, - vector& inExSimplData, - deque, RingElem> >& facePolysThread) { - const SparsePolyRing& R = owner(G); - vector GRest; - // size_t dim=indicator.size(); - for (size_t i = 0; i < inExSimplData.size(); ++i) { - GRest.push_back(zero(R)); - - if (!indicator.is_subset_of(inExSimplData[i].GenInFace)) - inExSimplData[i].done = true; // done if face cannot contribute to result for this offset - else - inExSimplData[i].done = false; // not done otherwise - } - restrictToFaces(G, GOrder, GRest, degrees, inExSimplData); - - for (size_t j = 0; j < inExSimplData.size(); ++j) { - if (inExSimplData[j].done) - continue; -#pragma omp atomic - nrActiveFaces++; - // verboseOutput() << "Push back " << NumTerms(GRest[j]); - GRest[j] = power(indets(R)[0], Deg) * inExSimplData[j].mult * - GRest[j]; // shift by degree of offset amd multiply by mult of face - facePolysThread.push_back(pair, RingElem>(inExSimplData[j].degrees, GRest[j])); - // verboseOutput() << " Now " << facePolysThread.size() << endl; - } -} - -RingElem affineLinearSubstitutionFL(const ourFactorization& FF, - const vector >& A, - const vector& b, - const long& denom, - const SparsePolyRing& R, - const vector& degrees, - const BigInt& lcmDets, - vector& inExSimplData, - deque, RingElem> >& facePolysThread) { - // applies linar substitution y --> lcmDets*A(y+b/denom) to all factors in FF - // and returns the product of the modified factorsafter ordering the exponent vectors - - size_t i; - size_t m = A.size(); - size_t dim = m; // TO DO: eliminate this duplication - vector v(m, zero(R)); - RingElem g(zero(R)); - - for (i = 0; i < m; i++) { - g = b[i] * (lcmDets / denom); - v[i] = g + lcmDets * indets(R)[i + 1]; - } - vector w = VxM(v, A); - vector w1(w.size() + 1, zero(R)); - w1[0] = RingElem(R, lcmDets); - for (i = 1; i < w1.size(); ++i) - w1[i] = w[i - 1]; - - // RingHom phi=PolyAlgebraHom(R,R,w1); - - RingElem G1(zero(R)); - list sortedFactors; - for (i = 0; i < FF.myFactors.size(); ++i) { - // G1=phi(FF.myFactors[i]); - G1 = mySubstitution(FF.myFactors[i], w1); - for (int nn = 0; nn < FF.myMultiplicities[i]; ++nn) - sortedFactors.push_back(G1); - } - - sortedFactors.sort(compareLength); - - RingElem G(one(R)); - - for (const auto& sf : sortedFactors) - G *= sf; - - if (inExSimplData.size() == 0) { // not really necesary, but a slight shortcut - dynamic_bitset dummyInd; - return (orderExpos(G, degrees, dummyInd, false)); - } - - // if(inExSimplData.size()!=0){ - long Deg = 0; - dynamic_bitset indicator(dim); // indicates the non-zero components of b - indicator.reset(); - for (size_t i = 0; i < dim; ++i) - if (b[i] != 0) { - indicator.set(i); - Deg += degrees[i] * b[i]; - } - Deg /= denom; - RingElem Gorder(zero(R)); - all_contained_faces(G, Gorder, degrees, indicator, Deg, inExSimplData, facePolysThread); - return (Gorder); - // } -} - -vector homogComps(const RingElem& F) { - // returns the vector of homogeneous components of F - // w.r.t. standard grading - - const SparsePolyRing& P = owner(F); - long dim = NumIndets(P); - vector v(dim); - vector c(deg(F) + 1, zero(P)); - long j, k; - - // TODO there is a leading_term() function coming in cocoalib - // TODO maybe there will be even a "splice_leading_term" - SparsePolyIter i = BeginIter(F); - for (; !IsEnded(i); ++i) { - exponents(v, PP(i)); - k = 0; - for (j = 0; j < dim; j++) - k += v[j]; - PushBack(c[k], coeff(i), v); - } - return (c); -} - -RingElem homogenize(const RingElem& F) { - // homogenizes F wrt the zeroth variable and returns the - // homogenized polynomial - - SparsePolyRing P = owner(F); - int d = deg(F); - vector c(d + 1, zero(P)); - c = homogComps(F); - RingElem h(zero(P)); - for (int i = 0; i <= d; ++i) - h += c[i] * power(indets(P)[0], d - i); - return (h); -} - -RingElem makeZZCoeff(const RingElem& F, const SparsePolyRing& RZZ) { - // F is a polynomial over RingQQ with integral coefficients - // This function converts it into a polynomial over RingZZ - - SparsePolyIter mon = BeginIter(F); // go over the given polynomial - RingElem G(zero(RZZ)); - for (; !IsEnded(mon); ++mon) { - PushBack(G, num(coeff(mon)), PP(mon)); - } - return (G); -} - -RingElem makeQQCoeff(const RingElem& F, const SparsePolyRing& R) { - // F is a polynomial over RingZZ - // This function converts it into a polynomial over RingQQ - SparsePolyIter mon = BeginIter(F); // go over the given polynomial - RingElem G(zero(R)); - for (; !IsEnded(mon); ++mon) { - PushBack(G, RingElem(RingQQ(), coeff(mon)), PP(mon)); - } - return (G); -} - -RingElem processInputPolynomial(const string& poly_as_string, - const SparsePolyRing& R, - const SparsePolyRing& RZZ, - vector& resPrimeFactors, - vector& resPrimeFactorsNonhom, - vector& resMultiplicities, - RingElem& remainingFactor, - bool& homogeneous, - const bool& do_leadCoeff) { - // "res" stands for "result" - // resPrimeFactors are homogenized, the "nonhom" come from the original polynomial - - long i, j; - string dummy = poly_as_string; - size_t semicolon=dummy.find(';'); - if(semicolon != string::npos){ - dummy[semicolon]=' '; - } - RingElem the_only_dactor = ReadExpr(R, dummy); // there is only one - vector factorsRead; - factorsRead.push_back(the_only_dactor); - vector multiplicities; - - vector primeFactors; // for use in this routine - vector primeFactorsNonhom; // return results will go into the "res" parameters for output - - if (verbose_INT) - verboseOutput() << "Polynomial read" << endl; - - homogeneous = true; - for (auto& G : factorsRead) { - // we factor the polynomials read and make them integral this way they - // must further be homogenized and converted to polynomials with ZZ - // coefficients (instead of inegral QQ) The homogenization is necessary - // to allow substitutions over ZZ - if (deg(G) == 0) { - remainingFactor *= G; // constants go into remainingFactor - continue; // this extra treatment would not be necessary - } - - // homogeneous=(G==LF(G)); - vector compsG = homogComps(G); - // we test for homogeneity. In case do_leadCoeff==true, polynomial - // is replaced by highest homogeneous component - if (G != compsG[compsG.size() - 1]) { - homogeneous = false; - if (verbose_INT && do_leadCoeff) - verboseOutput() << "Polynomial is inhomogeneous. Replacing it by highest hom. comp." << endl; - if (do_leadCoeff) { - G = compsG[compsG.size() - 1]; - } - } - - factorization FF = factor(G); // now the factorization and transfer to integer coefficients - for (j = 0; j < (long)FF.myFactors().size(); ++j) { - primeFactorsNonhom.push_back(FF.myFactors()[j]); // these are the factors of the polynomial to be integrated - primeFactors.push_back(makeZZCoeff(homogenize(FF.myFactors()[j]), RZZ)); // the homogenized factors with ZZ coeff - multiplicities.push_back(FF.myMultiplicities()[j]); // homogenized for substitution ! - } - remainingFactor *= FF.myRemainingFactor(); - } - - // it remains to collect multiple factors that come from different input factors - for (i = 0; i < (long)primeFactors.size(); ++i) { - if (primeFactors[i] == 0) - continue; - for (j = i + 1; j < (long)primeFactors.size(); ++j) { - if (primeFactors[j] != 0 && primeFactors[i] == primeFactors[j]) { - primeFactors[j] = 0; - multiplicities[i]++; - } - } - } - - // now everything is transferred to the return parameters - for (i = 0; i < (long)primeFactors.size(); ++i) { - if (primeFactors[i] != 0) { - resPrimeFactorsNonhom.push_back(primeFactorsNonhom[i]); - resPrimeFactors.push_back(primeFactors[i]); - resMultiplicities.push_back(multiplicities[i]); - } - } - - RingElem F(one(R)); // the polynomial to be integrated with QQ coefficients - for (const auto& G : factorsRead) - F *= G; - - return F; -} - -CyclRatFunct genFunct(const vector >& GFP, const RingElem& F, const vector& degrees) -// writes \sum_{x\in\ZZ_+^n} f(x,t) T^x -// under the specialization T_i --> t^g_i -// as a rational function in t -{ - const SparsePolyRing& P = owner(F); - RingElem t = indets(P)[0]; - - CyclRatFunct s(F); // F/1 - - CyclRatFunct g(zero(P)), h(zero(P)); - - long nd = degrees.size(); - long i, k, mg; - vector c; - - for (k = 1; k <= nd; k++) { - c = ourCoeffs(s.num, k); // we split the numerator according - // to powers of var k - mg = c.size(); // max degree+1 in var k - - h.set2(zero(P)); - for (i = 0; i < mg; i++) // now we replace the powers of var k - { // by the corrseponding rational function, - // multiply, and sum the products - - h.num = (1 - power(t, degrees[k - 1])) * h.num + GFP[degrees[k - 1]][i].num * c[i]; - h.denom = GFP[degrees[k - 1]][i].denom; - } - s.num = h.num; - s.denom = prodDenom(s.denom, h.denom); - } - return (s); -} - -vector power2ascFact(const SparsePolyRing& P, const long& k) -// computes the representation of the power x^n as the linear combination -// of (x+1)_n,...,(x+1)_0 -// return value is the vector of coefficients (they belong to ZZ) -{ - RingElem t = indets(P)[0]; - const vector ONE(NumIndets(P)); - RingElem f(P), g(P), h(P); - f = power(t, k); - long m; - vector c(k + 1, zero(P)); - while (f != 0) { - m = deg(f); - h = monomial(P, LC(f), ONE); - c[m] = h; - f -= h * ascFact(t, m); - } - return (c); -} - -CyclRatFunct genFunctPower1(const SparsePolyRing& P, long k, long n) -// computes the generating function for -// \sum_j j^n (t^k)^j -{ - vector a = power2ascFact(P, n); - RingElem b(P); - vector u; - CyclRatFunct g(zero(P)), h(zero(P)); - long i, s = a.size(); - for (i = 0; i < s; ++i) { - u = makeDenom(k, i + 1); - b = a[i] * factorial(i); - g.set2(b, u); - h.addCRF(g); - } - return (h); -} - -void CyclRatFunct::extendDenom(const vector& target) -// extends the denominator to target -// by multiplying the numrerator with the remaining factor -{ - RingElem t = indets(owner(num))[0]; - long i, ns = target.size(), nf = denom.size(); - for (i = 1; i < ns; ++i) { - if (i > nf - 1) - num *= power(1 - power(t, i), (target[i])); - else if (target[i] > denom[i]) - num *= power(1 - power(t, i), (target[i] - denom[i])); - } - denom = target; -} - -vector lcmDenom(const vector& df, const vector& dg) { - // computes the lcm of ztwo denominators as used in CyclRatFunct - // (1-t^i and 1-t^j, i != j, are considered as coprime) - size_t nf = df.size(), ng = dg.size(), i; - size_t n = max(nf, ng); - vector dh = df; - dh.resize(n); - for (i = 1; i < n; ++i) - if (i < ng && dh[i] < dg[i]) - dh[i] = dg[i]; - return (dh); -} - -vector prodDenom(const vector& df, const vector& dg) { - // as above, but computes the profduct - size_t nf = df.size(), ng = dg.size(), i; - size_t n = max(nf, ng); - vector dh = df; - dh.resize(n); - for (i = 1; i < n; ++i) - if (i < ng) - dh[i] += dg[i]; - return (dh); -} - -vector degrees2denom(const vector& d) { - // converts a vector of degrees to a "denominator" - // listing at position i the multiplicity of i in d - long m = 0; - size_t i; - if (d.size() == 0) - return vector(0); - for (i = 0; i < d.size(); ++i) - m = max(m, d[i]); - vector e(m + 1); - for (i = 0; i < d.size(); ++i) - e[d[i]]++; - return (e); -} - -vector denom2degrees(const vector& d) { - // the converse operation - vector denomDeg; - for (size_t i = 0; i < d.size(); ++i) - for (long j = 0; j < d[i]; ++j) - denomDeg.push_back(i); - return (denomDeg); -} - -RingElem denom2poly(const SparsePolyRing& P, const vector& d) { - // converts a denominator into a real polynomial - // the variable for the denominator is x[0] - RingElem t = indets(P)[0]; - RingElem f(one(P)); - for (size_t i = 1; i < d.size(); ++i) - f *= power(1 - power(t, i), d[i]); - return (f); -} - -vector makeDenom(long k, long n) -// makes the denominator (1-t^k)^n -{ - vector d(k + 1); - d[k] = n; - return (d); -} - -void CyclRatFunct::addCRF(const CyclRatFunct& r) { - // adds r to *this, r is preserved in its given form - CyclRatFunct s(zero(owner(num))); - const vector lcmden(lcmDenom(denom, r.denom)); - s = r; - s.extendDenom(lcmden); - extendDenom(lcmden); - num += s.num; -} - -/* -void CyclRatFunct::multCRF(const CyclRatFunct& r) { - // nmultiplies *this by r - num *= r.num; - denom = prodDenom(denom, r.denom); -} -*/ - -void CyclRatFunct::showCRF() { - if (!verbose_INT) - return; - - verboseOutput() << num << endl; - for (size_t i = 1; i < denom.size(); ++i) - verboseOutput() << denom[i] << " "; - verboseOutput() << endl; -} - -void CyclRatFunct::showCoprimeCRF() { - // shows *this also with coprime numerator and denominator - // makes only sense if only x[0] appears in the numerator (not checked) - - if (!verbose_INT) - return; - - verboseOutput() << "--------------------------------------------" << endl << endl; - verboseOutput() << "Given form" << endl << endl; - showCRF(); - verboseOutput() << endl; - const SparsePolyRing& R = owner(num); - SparsePolyRing P = NewPolyRing_DMPI(RingQQ(), symbols("t")); - vector Im(NumIndets(R), zero(P)); - Im[0] = indets(P)[0]; - RingHom phi = PolyAlgebraHom(R, P, Im); - RingElem f(phi(num)); - RingElem g(denom2poly(P, denom)); - RingElem h = CoCoA::gcd(f, g); - f /= h; - g /= h; - verboseOutput() << "Coprime numerator (for denom with remaining factor 1)" << endl << endl; - factorization gf = factor(g); - verboseOutput() << f / gf.myRemainingFactor() << endl << endl << "Factorization of denominator" << endl << endl; - size_t nf = gf.myFactors().size(); - for (size_t i = 0; i < nf; ++i) - verboseOutput() << gf.myFactors()[i] << " mult " << gf.myMultiplicities()[i] << endl; - verboseOutput() << "--------------------------------------------" << endl; -} - -void CyclRatFunct::simplifyCRF() { - // cancels factors 1-t^i from the denominator that appear there explicitly - // (and not just as factors of 1-t^j for some j) - - const SparsePolyRing& R = owner(num); - long nd = denom.size(); - for (long i = 1; i < nd; i++) { - while (denom[i] > 0) { - if (!IsDivisible(num, 1 - power(indets(R)[0], i))) - break; - num /= 1 - power(indets(R)[0], i); - denom[i]--; - } - } -} - -void CyclRatFunct::set2(const RingElem& f, const vector& d) { - num = f; - denom = d; -} - -void CyclRatFunct::set2(const RingElem& f) { - num = f; - denom.resize(1, 0); -} - -CyclRatFunct::CyclRatFunct(const RingElem& c) - : num(c) -// constructor starting from a RingElem -// initialization necessary because RingElem has no default -// constructor -{ - denom.resize(1, 0); -} - -CyclRatFunct::CyclRatFunct(const RingElem& c, const vector& d) : num(c), denom(d) { -} - -} // namespace libnormaliz -#endif // NMZ_COCOA diff -Nru normaliz-3.8.5+ds/source/libnormaliz/normaliz_exception.h normaliz-3.8.9+ds/source/libnormaliz/normaliz_exception.h --- normaliz-3.8.5+ds/source/libnormaliz/normaliz_exception.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/normaliz_exception.h 2020-07-21 15:37:45.000000000 +0000 @@ -170,6 +170,52 @@ } }; +class LongException : public NormalizException { + public: + + ~LongException() noexcept { + } + + template + LongException(const Integer& convert_number) { + std::stringstream stream; + stream << "Could not convert " << convert_number << "to Long.\n"; + stream << "The number would break an absolute size barrier."; + msg = stream.str(); + } + + virtual const char* what() const noexcept { + return msg.c_str(); + } + + private: + std::string msg; + +}; + +class LongLongException : public NormalizException { + public: + + ~LongLongException() noexcept { + } + + template + LongLongException(const Integer& convert_number) { + std::stringstream stream; + stream << "Could not convert " << convert_number << "to Long long.\n"; + stream << "The number would break an absolute size barrier."; + msg = stream.str(); + } + + virtual const char* what() const noexcept { + return msg.c_str(); + } + + private: + std::string msg; + +}; + class PredictionErrorException : public NormalizException { public: virtual const char* what() const noexcept { diff -Nru normaliz-3.8.5+ds/source/libnormaliz/offload_handler.cpp normaliz-3.8.9+ds/source/libnormaliz/offload_handler.cpp --- normaliz-3.8.5+ds/source/libnormaliz/offload_handler.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/offload_handler.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -5,7 +5,7 @@ #include // offload system header #include "libnormaliz/matrix.h" #include "libnormaliz/full_cone.h" -#include "libnormaliz/list_operations.h" +#include "libnormaliz/list_and_map_operations.h" #include "libnormaliz/vector_operations.h" #include "libnormaliz/my_omp.h" #include "libnormaliz/HilbertSeries.h" diff -Nru normaliz-3.8.5+ds/source/libnormaliz/options.cpp normaliz-3.8.9+ds/source/libnormaliz/options.cpp --- normaliz-3.8.5+ds/source/libnormaliz/options.cpp 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/options.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -0,0 +1,386 @@ +/* + * Normaliz + * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * As an exception, when this program is distributed through (i) the App Store + * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play + * by Google Inc., then that store may impose any digital rights management, + * device limits and/or redistribution restrictions that are required by its + * terms of service. + */ + +#include "libnormaliz/options.h" + +#include +#include + +namespace libnormaliz { +using std::cout; +using std::endl; +using std::ifstream; + +static void printCopying() { + cout << "Copyright (C) 2007-2019 The Normaliz Team, University of Osnabrueck." << endl + << "This program comes with ABSOLUTELY NO WARRANTY; This is free software," << endl + << "and you are welcome to redistribute it under certain conditions;" << endl + << "See COPYING for details." << endl; +} + +static void printVersion() { + cout << "Normaliz " << string(STRINGIFY(NMZ_VERSION)) << endl; + printCopying(); +} + +static string pureName(const string& fullName) { + // extracts the pure filename + + string slash = "/"; +#ifdef _WIN32 // for 32 and 64 bit windows + slash = "\\"; +#endif + size_t found = fullName.rfind(slash); + if (found == std::string::npos) + return (fullName); + found++; + size_t length = fullName.size() - found; + + // cout << "**************************** " << fullName.substr(found,length) << endl; + // exit(1); + return (fullName.substr(found, length)); +} + +void OptionsHandler::setProjectName(const string& s) { + if (project_name_set) { + cerr << "Error: Second project name " << s << " in command line!" << endl; + exit(1); + } + project_name = s; + // check if we can read the .in file + string name_in = project_name + ".in"; + const char* file_in = name_in.c_str(); + ifstream in2; + in2.open(file_in, ifstream::in); + if (in2.is_open() == false) { + // check if user added ".in" and ignore it in this case + string suffix(".in"); + size_t found = project_name.rfind(suffix); + if (found != string::npos) { + project_name.erase(found); + } + } + else { + in2.close(); + } + project_name_set = true; +} + +bool OptionsHandler::handle_commandline(int argc, char* argv[]) { + vector LongOptions; + string ShortOptions; // all options concatenated (including -) + // read command line options + for (int i = 1; i < argc; i++) { + if (argv[i][0] == '-') { + if (argv[i][1] != '\0') { + if (argv[i][1] != 'x'){ + if (argv[i][1] == '-') { + string LO = argv[i]; + LO.erase(0, 2); + LongOptions.push_back(LO); + } + else + ShortOptions = ShortOptions + argv[i]; + } + else if (argv[i][2] == '=') { +#ifdef _OPENMP + string Threads = argv[i]; + Threads.erase(0, 3); + if ((istringstream(Threads) >> nr_threads) && nr_threads >= 0) { + set_thread_limit(nr_threads); + // omp_set_num_threads(nr_threads); -- now in cone.cpp + } + else { + cerr << "Error: Invalid option string " << argv[i] << endl; + exit(1); + } +#else + cerr << "Warning: Compiled without OpenMP support, option " << argv[i] << " ignored." << endl; +#endif + } + else { + cerr << "Error: Invalid option string " << argv[i] << endl; + exit(1); + } + } + } + else { + setProjectName(argv[i]); + } + } + return handle_options(LongOptions, ShortOptions); +} + + + +void OptionsHandler::setOutputDirName(const string& s) { + output_dir = s; + char slash = '/'; +#ifdef _WIN32 // for 32 and 64 bit windows + slash = '\\'; +#endif + if (output_dir[output_dir.size() - 1] != slash) + output_dir += slash; + output_dir_set = true; +} + +bool OptionsHandler::handle_options(vector& LongOptions, string& ShortOptions) { + // Analyzing short command line options + for (size_t i = 1; i < ShortOptions.size(); i++) { + switch (ShortOptions[i]) { + case '-': + break; + case 'c': + verbose = true; + break; + case 'f': + write_extra_files = true; + break; + case 'a': + write_all_files = true; + break; + case 'T': + to_compute.set(ConeProperty::Triangulation); + // to_compute.set(ConeProperty::Multiplicity); + break; + case 'F': + to_compute.set(ConeProperty::Descent); + break; + case 's': + to_compute.set(ConeProperty::SupportHyperplanes); + break; + case 'S': + to_compute.set(ConeProperty::Sublattice); + break; + case 't': + to_compute.set(ConeProperty::TriangulationSize); + break; + case 'v': + to_compute.set(ConeProperty::Multiplicity); + break; + case 'V': + to_compute.set(ConeProperty::Volume); + break; + case 'n': + to_compute.set(ConeProperty::HilbertBasis); + to_compute.set(ConeProperty::Multiplicity); + break; + case 'N': + to_compute.set(ConeProperty::HilbertBasis); + break; + case 'w': + to_compute.set(ConeProperty::WitnessNotIntegrallyClosed); + break; + case '1': + to_compute.set(ConeProperty::Deg1Elements); + break; + case 'q': + to_compute.set(ConeProperty::HilbertSeries); + break; + case 'p': + to_compute.set(ConeProperty::HilbertSeries); + to_compute.set(ConeProperty::Deg1Elements); + break; + case 'h': + to_compute.set(ConeProperty::HilbertBasis); + to_compute.set(ConeProperty::HilbertSeries); + break; + case 'y': + to_compute.set(ConeProperty::StanleyDec); + break; + case 'd': + to_compute.set(ConeProperty::DualMode); + break; + case 'r': + to_compute.set(ConeProperty::Approximate); + break; + case 'e': // check for arithmetic overflow + // test_arithmetic_overflow=true; + cerr << "WARNING: deprecated option -e is ignored." << endl; + break; + case 'B': // use Big Integer + to_compute.set(ConeProperty::BigInt); // use_Big_Integer=true; + break; + case 'b': // use the bottom decomposition for the triangulation + to_compute.set(ConeProperty::BottomDecomposition); + break; + case 'C': // compute the class group + to_compute.set(ConeProperty::ClassGroup); + break; + case 'k': // keep the order of the generators in Full_Cone + to_compute.set(ConeProperty::KeepOrder); + break; + case 'o': // suppress bottom decomposition in Full_Cone + to_compute.set(ConeProperty::NoBottomDec); + break; + case 'M': // compute minimal system of generators of integral closure + // as a module over original monoid + to_compute.set(ConeProperty::ModuleGeneratorsOverOriginalMonoid); + break; + case '?': // print help text and exit + return true; + break; + case 'x': // should be separated from other options + cerr << "Error: Option -x= has to be separated from other options" << endl; + exit(1); + break; + case 'I': + to_compute.set(ConeProperty::Integral); + break; + case 'L': + to_compute.set(ConeProperty::VirtualMultiplicity); + break; + case 'E': + to_compute.set(ConeProperty::WeightedEhrhartSeries); + break; + case 'i': + ignoreInFileOpt = true; + break; + case 'H': + to_compute.set(ConeProperty::IntegerHull); + break; + case 'D': + to_compute.set(ConeProperty::ConeDecomposition); + break; + case 'P': + to_compute.set(ConeProperty::PrimalMode); + break; + case 'Y': + to_compute.set(ConeProperty::Symmetrize); + break; + case 'X': + to_compute.set(ConeProperty::NoSymmetrization); + break; + case 'G': + to_compute.set(ConeProperty::IsGorenstein); + break; + case 'j': + to_compute.set(ConeProperty::Projection); + break; + case 'J': + to_compute.set(ConeProperty::ProjectionFloat); + break; + default: + cerr << "Error: Unknown option -" << ShortOptions[i] << endl; + exit(1); + break; + } + } + + // Remember to update also the --help text and the documentation when changing this! + vector AdmissibleOut; + string AdmissibleOutarray[] = {"gen", "cst", "inv", "ext", "ht1", "esp", + "egn", "typ", "lat", "msp", "mod"}; // "mod" must be last + for (const auto& i : AdmissibleOutarray) + AdmissibleOut.push_back(i); + assert(AdmissibleOut.back() == "mod"); + + // analyzing long options + for (const auto& LongOption : LongOptions) { + size_t j; + for (j = 0; j < LongOption.size(); ++j) { + if (LongOption[j] == '=') + break; + } + if (j < LongOption.size()) { + string OptName = LongOption.substr(0, j); + string OptValue = LongOption.substr(j + 1, LongOption.size() - 1); + if (OptName == "OutputDir") { + setOutputDirName(OptValue); + continue; + } + } + if (LongOption == "help") { + return true; // indicate printing of help text + } + if (LongOption == "verbose") { + verbose = true; + continue; + } + if (LongOption == "version") { + printVersion(); + exit(0); + } + /* if(LongOptions[i]=="BigInt"){ + use_Big_Integer=true; + continue; + }*/ + if (LongOption == "LongLong") { + use_long_long = true; + continue; + } + if (LongOption == "NoExtRaysOutput") { + no_ext_rays_output = true; + continue; + } + if (LongOption == "NoSuppHypsOutput") { + no_supp_hyps_output = true; + continue; + } + if (LongOption == "NoMatricesOutput") { + no_matrices_output = true; + continue; + } + if (LongOption == "ignore") { + ignoreInFileOpt = true; + continue; + } + if (LongOption == "files") { + write_extra_files = true; + continue; + } + if (LongOption == "all-files") { + write_all_files = true; + continue; + } + if (find(AdmissibleOut.begin(), AdmissibleOut.end(), LongOption) != AdmissibleOut.end()) { + OutFiles.push_back(LongOption); + continue; + } + try { + to_compute.set(toConeProperty(LongOption)); + continue; + } catch (const BadInputException&) { + }; + cerr << "Error: Unknown option --" << LongOption << endl; + exit(1); + } + + if (output_dir_set) { + output_file = output_dir + pureName(project_name); + } + else + output_file = project_name; + + return false; // no need to print help text +} + +bool OptionsHandler::activateDefaultMode() { + if (to_compute.goals().none() && !to_compute.test(ConeProperty::DualMode)) { + to_compute.set(ConeProperty::DefaultMode); + return true; + } + return false; +} + +} // name space diff -Nru normaliz-3.8.5+ds/source/libnormaliz/options.h normaliz-3.8.9+ds/source/libnormaliz/options.h --- normaliz-3.8.5+ds/source/libnormaliz/options.h 1970-01-01 00:00:00.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/options.h 2020-08-29 07:43:26.000000000 +0000 @@ -0,0 +1,274 @@ +/* + * Normaliz + * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * As an exception, when this program is distributed through (i) the App Store + * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play + * by Google Inc., then that store may impose any digital rights management, + * device limits and/or redistribution restrictions that are required by its + * terms of service. + */ + +#ifndef NORMALIZ_OPTIONS_H +#define NORMALIZ_OPTIONS_H + +#include "libnormaliz/general.h" +#include "libnormaliz/input_type.h" +#include "libnormaliz/output.h" + +#include +#include + +#ifndef STRINGIFY +#define STRINGIFYx(Token) #Token +#define STRINGIFY(Token) STRINGIFYx(Token) +#endif + +namespace libnormaliz { +using namespace std; + +//--------------------------------------------------------------------------- + +class OptionsHandler { + public: + OptionsHandler(); + + // returns true if a help should be printed, false otherwise + bool handle_commandline(int argc, char* argv[]); + + // returns true if default mode was activated, false otherwise + bool activateDefaultMode(); + + template + void applyOutputOptions(Output& Out); + + inline bool isFilenameSet() const { + return project_name_set; + } + + inline bool isIgnoreInFileOpt() const { + return ignoreInFileOpt; + } + + inline int getNrThreads() const { + return nr_threads; + } + + inline void activateConeProperty(ConeProperty::Enum cp) { + to_compute.set(cp, true); + } + + inline void activateInputFileConeProperty(ConeProperty::Enum cp) { + if (!ignoreInFileOpt) + to_compute.set(cp, true); + } + /* void activateInputFileBigInt() { + if (!ignoreInFileOpt) use_Big_Integer = true; + }*/ + inline void activateInputFileLongLong() { + if (!ignoreInFileOpt) + use_long_long = true; + } + + inline void activateNoExtRaysOutput() { + if (!ignoreInFileOpt) + no_ext_rays_output = true; + } + + inline void activateNoMatricesOutput() { + if (!ignoreInFileOpt) + no_matrices_output = true; + } + + inline void activateNoSuppHypsOutput() { + if (!ignoreInFileOpt) + no_supp_hyps_output = true; + } + + inline const ConeProperties& getToCompute() const { + return to_compute; + } + + /* bool isUseBigInteger() const { + return use_Big_Integer; + }*/ + inline bool isUseLongLong() const { + return use_long_long; + } + + inline bool isNoExtRaysOutput() const { + return no_ext_rays_output; + } + + inline bool isNoMatricesOutput() const { + return no_matrices_output; + } + + inline bool isNoSuppHypsOutput() const { + return no_supp_hyps_output; + } + + inline const string& getProjectName() const { + return project_name; + } + + inline const string& getOutputDir() const { + return output_dir; + } + + + //--------------------------------------------------------------------------- + +private: + bool project_name_set; + bool output_dir_set; + string project_name; + string output_dir; + string output_file; + + // bool use_Big_Integer; now in ConeProperty + bool use_long_long; + bool no_ext_rays_output; + bool no_supp_hyps_output; + bool no_matrices_output; + + bool ignoreInFileOpt; + + int nr_threads; + + ConeProperties to_compute; + + bool write_extra_files, write_all_files; + + vector OutFiles; + + // return true if help should be printed, false otherwise + bool handle_options(vector& LongOptions, string& ShortOptions); + + void setProjectName(const string& s); + void setOutputDirName(const string& s); +}; + +//--------------------------------------------------------------------------- + +inline OptionsHandler::OptionsHandler() { + project_name_set = false; + output_dir_set = false; + write_extra_files = false, write_all_files = false; + // use_Big_Integer = false; + use_long_long = false; + ignoreInFileOpt = false; + nr_threads = 0; + no_ext_rays_output = false; + no_supp_hyps_output = false; + no_matrices_output = false; +} + +template +void OptionsHandler::applyOutputOptions(Output& Out) { + if (no_ext_rays_output) + Out.set_no_ext_rays_output(); + if (no_supp_hyps_output) + Out.set_no_supp_hyps_output(); + if (no_matrices_output) + Out.set_no_matrices_output(); + if (write_all_files) { + Out.set_write_all_files(); + } + else if (write_extra_files) { + Out.set_write_extra_files(); + } + if (to_compute.test(ConeProperty::Triangulation) || to_compute.test(ConeProperty::ConeDecomposition) + || to_compute.test(ConeProperty::UnimodularTriangulation) || to_compute.test(ConeProperty::LatticePointTriangulation) + || to_compute.test(ConeProperty::AllGeneratorsTriangulation) + ) { + Out.set_write_tri(true); + Out.set_write_tgn(true); + Out.set_write_inv(true); + } + if (to_compute.test(ConeProperty::StanleyDec)) { + Out.set_write_dec(true); + Out.set_write_tgn(true); + Out.set_write_inv(true); + } + if (to_compute.test(ConeProperty::FaceLattice) || to_compute.test(ConeProperty::DualFaceLattice)) { + Out.set_write_fac(true); + } + if (to_compute.test(ConeProperty::Incidence) || to_compute.test(ConeProperty::DualIncidence)) { + Out.set_write_inc(true); + } + if (to_compute.test(ConeProperty::ExploitAutomsVectors) || to_compute.test(ConeProperty::ExploitAutomsMult) || + to_compute.test(ConeProperty::Automorphisms) || to_compute.test(ConeProperty::AmbientAutomorphisms) || + to_compute.test(ConeProperty::CombinatorialAutomorphisms) || to_compute.test(ConeProperty::RationalAutomorphisms) || + to_compute.test(ConeProperty::EuclideanAutomorphisms)) { + Out.set_write_aut(true); + } + for (const auto& OutFile : OutFiles) { + if (OutFile == "gen") { + Out.set_write_gen(true); + continue; + } + if (OutFile == "cst") { + Out.set_write_cst(true); + continue; + } + if (OutFile == "inv") { + Out.set_write_inv(true); + continue; + } + if (OutFile == "ht1") { + Out.set_write_ht1(true); + continue; + } + if (OutFile == "ext") { + Out.set_write_ext(true); + continue; + } + if (OutFile == "egn") { + Out.set_write_egn(true); + continue; + } + if (OutFile == "esp") { + Out.set_write_esp(true); + continue; + } + if (OutFile == "typ") { + Out.set_write_typ(true); + continue; + } + if (OutFile == "lat") { + Out.set_write_lat(true); + continue; + } + if (OutFile == "msp") { + Out.set_write_msp(true); + continue; + } + if (OutFile == "mod") { + Out.set_write_mod(true); + continue; + } + } + + if (!project_name_set) { + cerr << "ERROR: No project name set!" << endl; + exit(1); + } + Out.set_name(output_file); +} + +} // name space + +#endif // NMZ_OPTIONS_H diff -Nru normaliz-3.8.5+ds/source/libnormaliz/other_algorithms.cpp normaliz-3.8.9+ds/source/libnormaliz/other_algorithms.cpp --- normaliz-3.8.5+ds/source/libnormaliz/other_algorithms.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/other_algorithms.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,36 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(push, target(mic)) -#endif - -#include "libnormaliz/project_and_lift.cpp" -#include "libnormaliz/reduction.cpp" -#include "libnormaliz/cone_dual_mode.cpp" -#include "libnormaliz/descent.cpp" -#include "libnormaliz/automorph.cpp" - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(pop) -#endif diff -Nru normaliz-3.8.5+ds/source/libnormaliz/output.cpp normaliz-3.8.9+ds/source/libnormaliz/output.cpp --- normaliz-3.8.5+ds/source/libnormaliz/output.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/output.cpp 2020-09-25 14:54:40.000000000 +0000 @@ -23,7 +23,7 @@ //--------------------------------------------------------------------------- -#include +#include #include #include #include @@ -37,7 +37,7 @@ #include "output.h" #include "libnormaliz/matrix.h" #include "libnormaliz/vector_operations.h" -#include "libnormaliz/map_operations.h" +#include "libnormaliz/list_and_map_operations.h" #include "libnormaliz/automorph.h" namespace libnormaliz { @@ -168,7 +168,7 @@ os << "Real embedded number field:" << endl; // os << *Renf << endl; char *res, *res1; - res = fmpq_poly_get_str_pretty(Renf->get_renf()->nf->pol, "a"); + res = fmpq_poly_get_str_pretty(Renf->get_renf()->nf->pol, Renf->gen_name.c_str()); res1 = arb_get_str(Renf->get_renf()->emb, 64, 0); os << "min_poly " << "(" << res << ")" @@ -431,7 +431,7 @@ out << endl; out << "Cycle decompositions " << endl << endl; - ; + for (size_t i = 0; i < nr_items; ++i) { vector > dec = cycle_decomposition(Perms[i]); out << "Perm " << i + 1 << ": "; @@ -563,6 +563,39 @@ out << Result->getIncidence()[f][j + nr_vert]; out << endl; } + + out << "primal" << endl; + + out.close(); + } +} + +//--------------------------------------------------------------------------- + +template +void Output::write_dual_inc() const { + if (inc == true) { + string file_name = name + ".inc"; + ofstream out(file_name.c_str()); + + size_t nr_vert = 0; + if (Result->isInhomogeneous()) + nr_vert = Result->getNrVerticesOfPolyhedron(); + size_t nr_ext = Result->getNrExtremeRays(); + size_t nr_supp = Result->getNrSupportHyperplanes(); + + out << nr_vert << endl; + out << nr_ext << endl; + out << nr_supp << endl; + out << endl; + + for (size_t f = 0; f < Result->getDualIncidence().size(); ++f) { + for (size_t j = 0; j < nr_supp; ++j) + out << Result->getDualIncidence()[f][j]; + out << endl; + } + + out << "dual" << endl; out.close(); } @@ -583,6 +616,36 @@ out << f.first[k]; out << " " << f.second << endl; } + + out << "primal" << endl; + + out.close(); + } +} + +//--------------------------------------------------------------------------- + +template +void Output::write_dual_fac() const { + if (fac == true) { + string file_name = name + ".fac"; + ofstream out(file_name.c_str()); + out << Result->getDualFaceLattice().size() << endl; + if(Result->isInhomogeneous()){ + out << Result->getNrVerticesOfPolyhedron() << endl; + } + else{ + out << Result->getNrExtremeRays() << endl; + } + out << endl; + + for (const auto& f : Result->getDualFaceLattice()) { + for (size_t k = 0; k < f.first.size(); ++k) + out << f.first[k]; + out << " " << f.second << endl; + } + + out << "dual" << endl; out.close(); } @@ -666,6 +729,9 @@ } if (Result->isComputed(ConeProperty::FVector)) { inv << "vector " << Result->getFVector().size() << " f_vector = " << Result->getFVector(); + } + if (Result->isComputed(ConeProperty::DualFVector)) { + inv << "vector " << Result->getDualFVector().size() << " dual_f_vector = " << Result->getDualFVector(); } if (Result->isComputed(ConeProperty::MaximalSubspace)) { size_t dim_max_subspace = Result->getDimMaximalSubspace(); @@ -719,6 +785,8 @@ vector Linear_Form = Result->getDehomogenization(); inv << "vector " << Linear_Form.size() << " dehomogenization = " << Linear_Form; } + if(Result->isComputed(ConeProperty::AxesScaling)) + inv << "vector " << Result->getAxesScaling().size() << " axes_scaling " << Result->getAxesScaling(); if (Result->isComputed(ConeProperty::Grading) == false) { if (Result->isComputed(ConeProperty::ExtremeRays)) { inv << "boolean graded = " @@ -1041,8 +1109,8 @@ } esp_out.close(); } - if (tgn && Result->isComputed(ConeProperty::Generators)) - Result->getGeneratorsMatrix().print(name, "tgn"); + if (tgn && Result->isComputed(ConeProperty::TriangulationGenerators)) + Result->getTriangulationGeneratorsMatrix().print(name, "tgn"); if (tri && Result->isComputed(ConeProperty::Triangulation)) { // write triangulation write_tri(); } @@ -1050,10 +1118,18 @@ if (fac && Result->isComputed(ConeProperty::FaceLattice)) { // write face lattice write_fac(); } + + if (fac && Result->isComputed(ConeProperty::DualFaceLattice)) { // write dual face lattice + write_dual_fac(); + } - if (inc && Result->isComputed(ConeProperty::Incidence)) { // write face lattice + if (inc && Result->isComputed(ConeProperty::Incidence)) { // write incidence lattice write_inc(); } + + if (inc && Result->isComputed(ConeProperty::DualIncidence)) { // write incidence lattice + write_dual_inc(); + } if (out == true) { // printing .out file string name_open = name + ".out"; // preparing output files @@ -1110,6 +1186,12 @@ trunc = " (possibly truncated)"; out << "f-vector" << trunc << ":" << endl << Result->getFVector() << endl; } + if (Result->isComputed(ConeProperty::DualFVector)) { + string trunc = ""; + if (Result->getDualFVector()[0] != 1) + trunc = " (possibly truncated)"; + out << "dual f-vector" << trunc << ":" << endl << Result->getDualFVector() << endl; + } if (Result->isComputed(ConeProperty::ExcludedFaces)) { out << Result->getNrExcludedFaces() << " excluded faces" << endl; out << endl; @@ -1161,6 +1243,12 @@ } } out << endl; + if(Result->isComputed(ConeProperty::AxesScaling)){ + out << "scaling of axes" << endl; + out << Result->getAxesScaling(); + out << endl; + } + if (Result->isComputed(ConeProperty::TriangulationSize)) { out << "size of "; if (Result->isTriangulationNested()) @@ -1287,6 +1375,17 @@ out << endl; } } + + if (Result->isComputed(ConeProperty::IsEmptySemiOpen)) { + if (Result->isEmptySemiOpen()) { + out << "Semiopen polyhedron is empty" << endl; + out << "Covering face:" << endl; + out << Result->getCoveringFace(); + } + else + out << "Semiopen polyhedron is nonempty " << endl; + out << endl; + } if (Result->isComputed(ConeProperty::IsGorenstein)) { if (Result->isGorenstein()) { @@ -1398,16 +1497,19 @@ out << Result->getNrVerticesOfPolyhedron() << " vertices of polyhedron:" << endl; if (Result->isComputed(ConeProperty::VerticesFloat)) Result->getVerticesFloatMatrix().pretty_print( - out); // write_float(out,Result->getVerticesFloatMatrix(),Result->getNrVerticesFloat(),dim); + out); else Result->getVerticesOfPolyhedronMatrix().pretty_print(out); out << endl; } if (Result->isComputed(ConeProperty::ExtremeRays) && !no_ext_rays_output) { out << Result->getNrExtremeRays() << " extreme rays" << of_cone << ":" << endl; - if (homogeneous && Result->isComputed(ConeProperty::VerticesFloat)) - Result->getVerticesFloatMatrix().pretty_print( - out); // write_float(out,Result->getVerticesFloatMatrix(),Result->getNrVerticesFloat(),dim); + if (homogeneous && (Result->isComputed(ConeProperty::VerticesFloat) || Result->isComputed(ConeProperty::VerticesFloat)) ){ + if(Result->isComputed(ConeProperty::VerticesFloat)) + Result->getVerticesFloatMatrix().pretty_print(out); + else + Result->getExtremeRaysFloatMatrix().pretty_print(out); + } else Result->getExtremeRaysMatrix().pretty_print(out); out << endl; diff -Nru normaliz-3.8.5+ds/source/libnormaliz/output.h normaliz-3.8.9+ds/source/libnormaliz/output.h --- normaliz-3.8.5+ds/source/libnormaliz/output.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/output.h 2020-09-09 07:50:26.000000000 +0000 @@ -125,7 +125,9 @@ void write_tri() const; // writes the .tri file void write_aut() const; // writes the .aut file void write_fac() const; // writes the .fac file + void write_dual_fac() const; // writes the .fac file with duual face lattice void write_inc() const; // writes the .inc file + void write_dual_inc() const; // writes the .inc file with dual incidence void write_Stanley_dec() const; void write_matrix_ht1(const Matrix& M) const; // writes M to file name.ht1 diff -Nru normaliz-3.8.5+ds/source/libnormaliz/primal.cpp normaliz-3.8.9+ds/source/libnormaliz/primal.cpp --- normaliz-3.8.5+ds/source/libnormaliz/primal.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/primal.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,34 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(push, target(mic)) -#endif - -#include "libnormaliz/simplex.cpp" -#include "libnormaliz/full_cone.cpp" -#include "libnormaliz/bottom.cpp" - -#ifdef NMZ_MIC_OFFLOAD -#pragma offload_attribute(pop) -#endif diff -Nru normaliz-3.8.5+ds/source/libnormaliz/project_and_lift.cpp normaliz-3.8.9+ds/source/libnormaliz/project_and_lift.cpp --- normaliz-3.8.5+ds/source/libnormaliz/project_and_lift.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/project_and_lift.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -624,7 +624,7 @@ IntegerRet add_nr_Int = 0; if (MaxInterval >= MinInterval) add_nr_Int = 1 + MaxInterval - MinInterval; - long long add_nr = convertTo(add_nr_Int); + long long add_nr = convertToLongLong(add_nr_Int); if (dim == EmbDim && count_only && add_nr >= 1 && Congs.nr_of_rows() == 0 && Grading.size() == 0) { #pragma omp atomic TotalNrLP += add_nr; @@ -648,7 +648,7 @@ Deg1Thread[tn].push_back(NewPoint); if (Grading.size() > 0) { - long deg = convertTo(v_scalar_product(Grading, NewPoint)); + long deg = convertToLong(v_scalar_product(Grading, NewPoint)); if (deg >= 0) { if (deg >= (long)h_vec_pos_thread[tn].size()) h_vec_pos_thread[tn].resize(deg + 1); diff -Nru normaliz-3.8.5+ds/source/libnormaliz/reduction.h normaliz-3.8.9+ds/source/libnormaliz/reduction.h --- normaliz-3.8.5+ds/source/libnormaliz/reduction.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/reduction.h 2020-07-21 15:37:45.000000000 +0000 @@ -31,7 +31,7 @@ #include #include "libnormaliz/full_cone.h" -#include "libnormaliz/list_operations.h" +#include "libnormaliz/list_and_map_operations.h" //--------------------------------------------------------------------------- diff -Nru normaliz-3.8.5+ds/source/libnormaliz/simplex.cpp normaliz-3.8.9+ds/source/libnormaliz/simplex.cpp --- normaliz-3.8.5+ds/source/libnormaliz/simplex.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/simplex.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -30,16 +30,16 @@ #include #include -#include +#include #include "libnormaliz/integer.h" #include "libnormaliz/vector_operations.h" #include "libnormaliz/matrix.h" #include "libnormaliz/simplex.h" -#include "libnormaliz/list_operations.h" +#include "libnormaliz/list_and_map_operations.h" #include "libnormaliz/HilbertSeries.h" #include "libnormaliz/cone.h" -#include "libnormaliz/bottom.h" +// #include "libnormaliz/bottom.h" //--------------------------------------------------------------------------- @@ -47,6 +47,209 @@ using namespace std; //--------------------------------------------------------------------------- +// Subdivision of large simplices +//--------------------------------------------------------------------------- + +long SubDivBound = 1000000; + +template +bool bottom_points_inner(Matrix& gens, + list >& local_new_points, + vector >& local_q_gens, + size_t& stellar_det_sum); + +template +void bottom_points(list >& new_points, const Matrix& given_gens, Integer VolumeBound) { + /* gens.pretty_print(cout); + cout << "=======================" << endl; + + gens.transpose().pretty_print(cout); + cout << "=======================" << endl;*/ + + Matrix gens, Trans, Trans_inv; + // given_gens.LLL_transform_transpose(gens,Trans,Trans_inv); // now in optimal_subdivision_point() + gens = given_gens; + + Integer volume; + // int dim = gens[0].size(); + Matrix Support_Hyperplanes = gens.invert(volume); + + vector grading; // = grading_; + if (grading.empty()) + grading = gens.find_linear_form(); + // cout << grading; + + list > bottom_candidates; + bottom_candidates.splice(bottom_candidates.begin(), new_points); + // Matrix(bottom_candidates).pretty_print(cout); + + if (verbose) { + verboseOutput() << "Computing bbottom points using projection " << endl; + } + + if (verbose) { + verboseOutput() << "simplex volume " << volume << endl; + } + + //---------------------------- begin stellar subdivision ------------------- + + size_t stellar_det_sum = 0; + vector > q_gens; // for successive stellar subdivision + q_gens.push_back(gens); + int level = 0; // level of subdivision + + std::exception_ptr tmp_exception; + bool skip_remaining = false; +#pragma omp parallel // reduction(+:stellar_det_sum) + { + try { + vector > local_q_gens; + list > local_new_points; + + while (!q_gens.empty()) { + if (skip_remaining) + break; + if (verbose) { +#pragma omp single + verboseOutput() << q_gens.size() << " simplices on level " << level++ << endl; + } + +#pragma omp for schedule(static) + for (size_t i = 0; i < q_gens.size(); ++i) { + if (skip_remaining) + continue; + + try { + bottom_points_inner(q_gens[i], local_new_points, local_q_gens, stellar_det_sum); + } catch (const std::exception&) { + tmp_exception = std::current_exception(); + skip_remaining = true; +#pragma omp flush(skip_remaining) + } + } + +#pragma omp single + { q_gens.clear(); } +#pragma omp critical(LOCALQGENS) + { q_gens.insert(q_gens.end(), local_q_gens.begin(), local_q_gens.end()); } + local_q_gens.clear(); +#pragma omp barrier + } + +#pragma omp critical(LOCALNEWPOINTS) + { new_points.splice(new_points.end(), local_new_points, local_new_points.begin(), local_new_points.end()); } + + } catch (const std::exception&) { + tmp_exception = std::current_exception(); + skip_remaining = true; +#pragma omp flush(skip_remaining) + } + + } // end parallel + + //---------------------------- end stellar subdivision ----------------------- + + if (!(tmp_exception == 0)) + std::rethrow_exception(tmp_exception); + + // cout << new_points.size() << " new points accumulated" << endl; + new_points.sort(); + new_points.unique(); + if (verbose) { + verboseOutput() << new_points.size() << " bottom points accumulated in total." << endl; + verboseOutput() << "The sum of determinants of the stellar subdivision is " << stellar_det_sum << endl; + } + + /* for(auto& it : new_points) + it=Trans_inv.VxM(it); */ +} + +//----------------------------------------------------------------------------------------- + +template +bool bottom_points_inner(Matrix& gens, + list >& local_new_points, + vector >& local_q_gens, + size_t& stellar_det_sum) { + INTERRUPT_COMPUTATION_BY_EXCEPTION + + vector grading = gens.find_linear_form(); + Integer volume; + int dim = gens[0].size(); + Matrix Support_Hyperplanes = gens.invert(volume); + + if (volume < SubDivBound) { +#pragma omp atomic + stellar_det_sum += convertToLongLong(volume); + return false; // not subdivided + } + + // try st4ellar subdivision + Support_Hyperplanes = Support_Hyperplanes.transpose(); + Support_Hyperplanes.make_prime(); + vector new_point; + + if (new_point.empty()) { + list > Dummy; + new_point = gens.optimal_subdivision_point(); // projection method + } + + if (!new_point.empty()) { + // if (find(local_new_points.begin(), local_new_points.end(),new_point) == local_new_points.end()) + local_new_points.push_back(new_point); + Matrix stellar_gens(gens); + + int nr_hyps = 0; + for (int i = 0; i < dim; ++i) { + if (v_scalar_product(Support_Hyperplanes[i], new_point) != 0) { + stellar_gens[i] = new_point; + local_q_gens.push_back(stellar_gens); + + stellar_gens[i] = gens[i]; + } + else + nr_hyps++; + } + //#pragma omp critical(VERBOSE) + // cout << new_point << " liegt in " << nr_hyps <<" hyperebenen" << endl; + return true; // subdivided + } + else { // could not subdivided +#pragma omp atomic + stellar_det_sum += convertToLongLong(volume); + return false; + } +} + +// returns -1 if maximum is negative +template +double max_in_col(const Matrix& M, size_t j) { + Integer max = -1; + for (size_t i = 0; i < M.nr_of_rows(); ++i) { + if (M[i][j] > max) + max = M[i][j]; + } + return convert_to_double(max); +} + +// returns 1 if minimum is positive +template +double min_in_col(const Matrix& M, size_t j) { + Integer min = 1; + for (size_t i = 0; i < M.nr_of_rows(); ++i) { + if (M[i][j] < min) + min = M[i][j]; + } + return convert_to_double(min); +} + +#ifndef NMZ_MIC_OFFLOAD // offload with long is not supported +template void bottom_points(list >& new_points, const Matrix& gens, long VolumeBound); +#endif // NMZ_MIC_OFFLOAD +template void bottom_points(list >& new_points, const Matrix& gens, long long VolumeBound); +template void bottom_points(list >& new_points, const Matrix& gens, mpz_class VolumeBound); + +//--------------------------------------------------------------------------- // SimplexEvaluator //--------------------------------------------------------------------------- @@ -205,10 +408,10 @@ if (C.inhomogeneous) { for (i = 0; i < dim; i++) { - // gen_levels[i] = convertTo(C.gen_levels[key[i]]); + // gen_levels[i] = convertToLong(C.gen_levels[key[i]]); gen_levels[i] = C.gen_levels[key[i]]; if (C.do_h_vector) - gen_levels_long[i] = convertTo(C.gen_levels[key[i]]); + gen_levels_long[i] = convertToLong(C.gen_levels[key[i]]); if (gen_levels[i] == 0) { nr_level0_gens++; if (C.do_h_vector) @@ -436,7 +639,7 @@ if (C_ptr->do_Stanley_dec) { // prepare space for Stanley dec STANLEYDATA_int SimplStanley; // key + matrix of offsets SimplStanley.key = key; - Matrix offsets(convertTo(volume), dim); // volume rows, dim columns + Matrix offsets(convertToLong(volume), dim); // volume rows, dim columns convert(SimplStanley.offsets, offsets); #pragma omp critical(STANLEY) { @@ -445,7 +648,7 @@ } for (i = 0; i < dim; ++i) // the first vector is 0+offset if (Excluded[i]) - (*StanleyMat)[0][i] = convertTo(volume); + (*StanleyMat)[0][i] = convertToLong(volume); } StanIndex = 1; // counts the number of components in the Stanley dec. Vector at 0 already filled if necessary @@ -513,7 +716,7 @@ if (C.inhomogeneous) { for (i = 0; i < dim; i++) level_Int += element[i] * gen_levels[i]; - level = convertTo(level_Int / volume); // have to divide by volume; see above + level = convertToLong(level_Int / volume); // have to divide by volume; see above // cout << level << " ++ " << volume << " -- " << element; if (level > 1) @@ -532,7 +735,7 @@ size_t Deg = 0; if (C.do_h_vector) { - Deg = convertTo(normG / volume); + Deg = convertToLong(normG / volume); for (i = 0; i < dim; i++) { // take care of excluded facets and increase degree when necessary if (element[i] == 0 && Excluded[i]) { Deg += gen_degrees_long[i]; @@ -553,7 +756,7 @@ convert((*StanleyMat)[StanIndex], element); for (i = 0; i < dim; i++) if (Excluded[i] && element[i] == 0) - (*StanleyMat)[StanIndex][i] += convertTo(volume); + (*StanleyMat)[StanIndex][i] += convertToLong(volume); StanIndex++; } @@ -746,7 +949,7 @@ return true; take_care_of_0vector(C_ptr->Results[tn]); if (volume != 1) - evaluate_block(1, convertTo(volume) - 1, C_ptr->Results[tn]); + evaluate_block(1, convertToLong(volume) - 1, C_ptr->Results[tn]); conclude_evaluation(C_ptr->Results[tn]); return true; @@ -772,7 +975,7 @@ template void SimplexEvaluator::evaluation_loop_parallel() { size_t block_length = ParallelBlockLength; - size_t nr_elements = convertTo(volume) - 1; // 0-vector already taken care of + size_t nr_elements = convertToLong(volume) - 1; // 0-vector already taken care of size_t nr_blocks = nr_elements / ParallelBlockLength; if (nr_elements % ParallelBlockLength != 0) ++nr_blocks; @@ -873,7 +1076,7 @@ if (one_back > 0) { // define the last point processed before if it isn't 0 for (size_t i = 1; i <= dim; ++i) { point[dim - i] = one_back % GDiag[dim - i]; - one_back /= convertTo(GDiag[dim - i]); + one_back /= convertToLong(GDiag[dim - i]); } for (size_t i = 0; i < dim; ++i) { // put elements into the state at the end of the previous block @@ -921,6 +1124,13 @@ } } +template <> +void SimplexEvaluator::evaluate_block(long block_start, long block_end, Collector& Coll) { + + assert(false); + +} + //--------------------------------------------------------------------------- /* transfer the vector lists in the collectors to C_ptr->Results[0] */ @@ -1302,7 +1512,7 @@ size_t hv_max = 0; if (C_ptr->do_h_vector) { // we need the generators to be sorted by degree - long max_degree = convertTo(C_ptr->gen_degrees[C_ptr->nr_gen - 1]); + long max_degree = convertToLong(C_ptr->gen_degrees[C_ptr->nr_gen - 1]); hv_max = max_degree * C_ptr->dim; if (hv_max > 1000000) { throw BadInputException("Generator degrees are too huge, h-vector would contain more than 10^6 entires."); @@ -1367,4 +1577,24 @@ return collected_elements_size; } +#ifndef NMZ_MIC_OFFLOAD // offload with long is not supported +template class SimplexEvaluator; +#endif +template class SimplexEvaluator; +template class SimplexEvaluator; + +#ifdef ENFNORMALIZ +template class SimplexEvaluator; +#endif + +#ifndef NMZ_MIC_OFFLOAD // offload with long is not supported +template class Collector; +#endif +template class Collector; +template class Collector; + +#ifdef ENFNORMALIZ +template class Collector; +#endif + } // namespace libnormaliz diff -Nru normaliz-3.8.5+ds/source/libnormaliz/vector_operations.cpp normaliz-3.8.9+ds/source/libnormaliz/vector_operations.cpp --- normaliz-3.8.5+ds/source/libnormaliz/vector_operations.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/vector_operations.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,774 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -//--------------------------------------------------------------------------- - -#include -#include -#include -#include - -#include "libnormaliz/vector_operations.h" -#include "libnormaliz/matrix.h" - -//--------------------------------------------------------------------------- - -namespace libnormaliz { -using namespace std; - -//--------------------------------------------------------------------------- - -template -Integer v_scalar_product(const vector& av, const vector& bv) { - // loop stretching ; brings some small speed improvement - - Integer ans = 0; - size_t i, n = av.size(); - -#if 0 // #ifdef __MIC__ // not for newer compiler versions - // this version seems to be better vectorizable on the mic - for (i=0; i= 16) { - for (i = 0; i < (n >> 4); ++i, a += 16, b += 16) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - ans += a[4] * b[4]; - ans += a[5] * b[5]; - ans += a[6] * b[6]; - ans += a[7] * b[7]; - ans += a[8] * b[8]; - ans += a[9] * b[9]; - ans += a[10] * b[10]; - ans += a[11] * b[11]; - ans += a[12] * b[12]; - ans += a[13] * b[13]; - ans += a[14] * b[14]; - ans += a[15] * b[15]; - } - - n -= i << 4; - } - - if (n >= 8) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - ans += a[4] * b[4]; - ans += a[5] * b[5]; - ans += a[6] * b[6]; - ans += a[7] * b[7]; - - n -= 8; - a += 8; - b += 8; - } - - if (n >= 4) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - - n -= 4; - a += 4; - b += 4; - } - - if (n >= 2) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - - n -= 2; - a += 2; - b += 2; - } - - if (n > 0) - ans += a[0] * b[0]; -#endif // __MIC__ - - if (!check_range(ans)) { -#pragma omp atomic - GMP_scal_prod++; - - // cout << "av " << av; - // cout << "bv " << bv; - vector mpz_a(av.size()), mpz_b(bv.size()); - convert(mpz_a, av); - convert(mpz_b, bv); - convert(ans, v_scalar_product(mpz_a, mpz_b)); - } - - return ans; -} - -template <> -nmz_float v_scalar_product(const vector& av, const vector& bv) { - // loop stretching ; brings some small speed improvement - - nmz_float ans = 0; - size_t i, n = av.size(); - - auto a = av.begin(), b = bv.begin(); - - if (n >= 16) { - for (i = 0; i < (n >> 4); ++i, a += 16, b += 16) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - ans += a[4] * b[4]; - ans += a[5] * b[5]; - ans += a[6] * b[6]; - ans += a[7] * b[7]; - ans += a[8] * b[8]; - ans += a[9] * b[9]; - ans += a[10] * b[10]; - ans += a[11] * b[11]; - ans += a[12] * b[12]; - ans += a[13] * b[13]; - ans += a[14] * b[14]; - ans += a[15] * b[15]; - } - - n -= i << 4; - } - - if (n >= 8) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - ans += a[4] * b[4]; - ans += a[5] * b[5]; - ans += a[6] * b[6]; - ans += a[7] * b[7]; - - n -= 8; - a += 8; - b += 8; - } - - if (n >= 4) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - - n -= 4; - a += 4; - b += 4; - } - - if (n >= 2) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - - n -= 2; - a += 2; - b += 2; - } - - if (n > 0) - ans += a[0] * b[0]; - - return ans; -} - -#ifdef ENFNORMALIZ - -template <> -renf_elem_class v_scalar_product(const vector& av, const vector& bv) { - // loop stretching ; brings some small speed improvement - - assert(av.size() == bv.size()); - - renf_elem_class ans = 0; - size_t n = av.size(); - - for (size_t i = 0; i < n; ++i) { - if (av[i] != 0 && bv[i] != 0) - ans += av[i] * bv[i]; - } - return ans; - - /* typename vector::const_iterator a=av.begin(), b=bv.begin(); - - if( n >= 16 ) - { - for( i = 0; i < ( n >> 4 ); ++i, a += 16, b +=16 ){ - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - ans += a[4] * b[4]; - ans += a[5] * b[5]; - ans += a[6] * b[6]; - ans += a[7] * b[7]; - ans += a[8] * b[8]; - ans += a[9] * b[9]; - ans += a[10] * b[10]; - ans += a[11] * b[11]; - ans += a[12] * b[12]; - ans += a[13] * b[13]; - ans += a[14] * b[14]; - ans += a[15] * b[15]; - } - - n -= i<<4; - } - - if( n >= 8) - { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - ans += a[4] * b[4]; - ans += a[5] * b[5]; - ans += a[6] * b[6]; - ans += a[7] * b[7]; - - n -= 8; - a += 8; - b += 8; - } - - if( n >= 4) - { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - - n -= 4; - a += 4; - b += 4; - } - - if( n >= 2) - { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - - n -= 2; - a += 2; - b += 2; - } - - if(n>0) - ans += a[0]*b[0]; - - return ans;*/ -} - -#endif - -//--------------------------------------------------------------------------- - -template <> -mpq_class v_scalar_product(const vector& av, const vector& bv) { - // loop stretching ; brings some small speed improvement - - assert(false); - return 0; - -} - -/* body removed for the time being - mpq_class ans = 0; - size_t i, n = av.size(); - -#if 0 // #ifdef __MIC__ // not for newer compiler versions - // this version seems to be better vectorizable on the mic - for (i=0; i= 16) { - for (i = 0; i < (n >> 4); ++i, a += 16, b += 16) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - ans += a[4] * b[4]; - ans += a[5] * b[5]; - ans += a[6] * b[6]; - ans += a[7] * b[7]; - ans += a[8] * b[8]; - ans += a[9] * b[9]; - ans += a[10] * b[10]; - ans += a[11] * b[11]; - ans += a[12] * b[12]; - ans += a[13] * b[13]; - ans += a[14] * b[14]; - ans += a[15] * b[15]; - } - - n -= i << 4; - } - - if (n >= 8) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - ans += a[4] * b[4]; - ans += a[5] * b[5]; - ans += a[6] * b[6]; - ans += a[7] * b[7]; - - n -= 8; - a += 8; - b += 8; - } - - if (n >= 4) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - ans += a[2] * b[2]; - ans += a[3] * b[3]; - - n -= 4; - a += 4; - b += 4; - } - - if (n >= 2) { - ans += a[0] * b[0]; - ans += a[1] * b[1]; - - n -= 2; - a += 2; - b += 2; - } - - if (n > 0) - ans += a[0] * b[0]; -#endif // __MIC__ - - return ans; -} - -*/ - -//--------------------------------------------------------------------------- - -template -vector v_select_coordinates(const vector& v, const vector projection_key) { - vector w(projection_key.size()); - for (size_t i = 0; i < w.size(); ++i) - w[i] = v[projection_key[i]]; - return w; -} - -//--------------------------------------------------------------------------- - -template -vector v_insert_coordinates(const vector& v, const vector projection_key, const size_t nr_cols) { - vector w(nr_cols); - for (size_t i = 0; i < projection_key.size(); ++i) { - assert(projection_key[i] < nr_cols); - w[projection_key[i]] = v[i]; - } - return w; -} -//--------------------------------------------------------------------------- - - -nmz_float l1norm(vector& v) { - size_t i, size = v.size(); - nmz_float g = 0; - for (i = 0; i < size; i++) { - if (Iabs(v[i]) > nmz_epsilon) - g += Iabs(v[i]); - else - v[i] = 0; - } - return g; -} - -/* -mpq_class l1norm(vector& v) { - size_t i, size = v.size(); - mpq_class g = 0; - for (i = 0; i < size; i++) { - if (Iabs(v[i]) > 0) - g += Iabs(v[i]); - else - v[i] = 0; - } - return g; -} -*/ - -/* for nmz_float is norms the vector to l_1 norm 1. - * - * for mpq_class and renf_elem_class it makes the vector coefficents integral - * - * then it extracts the gcd of the coefficients - */ - -template -Integer v_make_prime(vector& v) { - size_t i, size = v.size(); - -#ifdef ENFNORMALIZ - if (using_renf()) { - v_standardize(v); - make_integral(v); - return (1); - } -#endif - - if (using_mpq_class()) - make_integral(v); - Integer g = v_gcd(v); - if (g != 0 && g != 1) { - for (i = 0; i < size; i++) { - v[i] /= g; - } - } - return g; -} - - -template <> -nmz_float v_make_prime(vector& v) { - size_t i, size = v.size(); - nmz_float g = l1norm(v); - if (g != 0) { - for (i = 0; i < size; i++) { - v[i] /= g; - } - } - return g; -} - - -//--------------------------------------------------------------- - -// swaps entry i and j of the vector v -void v_bool_entry_swap(vector& v, size_t i, size_t j) { - if (v[i] != v[j]) { - v[i].flip(); - v[j].flip(); - } -} - -//--------------------------------------------------------------- - -vector identity_key(size_t n) { - vector key(n); - for (size_t k = 0; k < n; ++k) - key[k] = k; - return key; -} - -vector reverse_key(size_t n) { - vector key(n); - for (size_t k = 0; k < n; ++k) - key[k] = (n - 1) - k; - return key; -} - -vector random_key(size_t n) { - vector key = identity_key(n); - for (size_t k = 0; k < 3*n; ++k) - swap(key[rand() % n], key[rand() % n]); - return key; -} - -// vector is special because ordinary swap is not defined for it -void order_by_perm_bool(vector& v, const vector& permfix) { - vector perm = permfix; // we may want to use permfix a second time - vector inv(perm.size()); - for (key_t i = 0; i < perm.size(); ++i) - inv[perm[i]] = i; - for (key_t i = 0; i < perm.size(); ++i) { - key_t j = perm[i]; - // v.swap(v[i],v[perm[i]]); - v_bool_entry_swap(v, i, perm[i]); - swap(perm[i], perm[inv[i]]); - swap(inv[i], inv[j]); - } -} - -//--------------------------------------------------------------------------- - -template -void v_scalar_division(vector& v, const Integer scalar) { - size_t i, size = v.size(); - assert(scalar != 0); - for (i = 0; i < size; i++) { - assert(v[i] % scalar == 0); - v[i] /= scalar; - } -} - -template <> -void v_scalar_division(vector& v, const nmz_float scalar) { - size_t i, size = v.size(); - assert(scalar != 0); - for (i = 0; i < size; i++) { - v[i] /= scalar; - } -} - - - -template <> -void v_scalar_division(vector& v, const mpq_class scalar) { - size_t i, size = v.size(); - assert(scalar != 0); - for (i = 0; i < size; i++) { - v[i] /= scalar; - } -} - - -#ifdef ENFNORMALIZ -template <> -void v_scalar_division(vector& v, const renf_elem_class scalar) { - size_t i, size = v.size(); - assert(scalar != 0); - renf_elem_class fact = 1 / scalar; - for (i = 0; i < size; i++) { - v[i] *= fact; - } -} -#endif - -/* v_standardize - * - * defined only for mpq_class, nmz_float and renf_elem_class - * - * makes the value under LF equal to 1 (checks for positivity of value) - * - * or the last component equal to +-1 - */ - -template -Integer v_standardize(vector& v, const vector& LF) { - assert(false); - return 0; -} - -template -Integer v_standardize(vector& v) { - vector LF; - return v_standardize(v, LF); -} - -template <> -nmz_float v_standardize(vector& v, const vector& LF) { - nmz_float denom = 0; - if (LF.size() == v.size()) { - denom = v_scalar_product(v, LF); - } - - if (denom == 0) { - for (long i = (long)v.size() - 1; i >= 0; --i) { - if (v[i] != 0) { - denom = v[i]; - break; - } - } - } - denom = Iabs(denom); - - if (denom == 0) - return denom; - if (denom != 1) - v_scalar_division(v, denom); - - return denom; -} - -/* -template <> -mpq_class v_standardize(vector& v, const vector& LF) { - mpq_class denom = 0; - if (LF.size() == v.size()) { - denom = v_scalar_product(v, LF); - }; - - if (denom == 0) { - for (long i = (long)v.size() - 1; i >= 0; --i) { - if (v[i] != 0) { - denom = v[i]; - break; - } - } - } - denom = Iabs(denom); - - if (denom == 0) - return denom; - if (denom != 1) - v_scalar_division(v, denom); - - return denom; -} -*/ - -#ifdef ENFNORMALIZ - -template <> -renf_elem_class v_standardize(vector& v, const vector& LF) { - renf_elem_class denom = 0; - if (LF.size() == v.size()) { - denom = v_scalar_product(v, LF); - } - - if (denom == 0) { - for (long i = (long)v.size() - 1; i >= 0; --i) { - if (v[i] != 0) { - denom = v[i]; - break; - } - } - } - denom = Iabs(denom); - - if (denom == 0) - return denom; - if (denom != 1) - v_scalar_division(v, denom); - - return denom; -} -#endif - -template long v_standardize(vector&, const vector&); -template long long v_standardize(vector&, const vector&); -template mpz_class v_standardize(vector&, const vector&); -#ifdef ENFNORMALIZ -template renf_elem_class v_standardize(vector&, const vector&); -#endif - -template long v_standardize(vector&); -template long long v_standardize(vector&); -template mpz_class v_standardize(vector&); -#ifdef ENFNORMALIZ -template renf_elem_class v_standardize(vector&); -#endif - -/* Not used presently -// the following function removes the denominators and then extracts the Gcd of the numerators -mpq_class v_standardize(vector& v, const vector& LF){ - size_t size=v.size(); - mpz_class d=1; - for (size_t i = 0; i < size; i++) - //d=lcm(d,v[i].get_den()); // GMP C++ function only available in GMP >= 6.1 - mpz_lcm(d.get_mpz_t(), d.get_mpz_t(), v[i].get_den().get_mpz_t()); - for (size_t i = 0; i < size; i++) - v[i]*=d; - mpz_class g=0; - for (size_t i = 0; i < size; i++) - //g=gcd(g,v[i].get_num()); // GMP C++ function only available in GMP >= 6.1 - mpz_gcd(g.get_mpz_t(), g.get_mpz_t(), v[i].get_num().get_mpz_t()); - if (g==0) - return 0; - for (size_t i = 0; i < size; i++) - v[i]/=g; - return 1; -} -*/ - -template -vector v_scalar_mult_mod(const vector& v, const Integer& scalar, const Integer& modulus) { - vector w(v.size()); - if (v_scalar_mult_mod_inner(w, v, scalar, modulus)) - return w; - -#pragma omp atomic - GMP_scal_prod++; - vector x, y(v.size()); - convert(x, v); - v_scalar_mult_mod_inner(y, x, convertTo(scalar), convertTo(modulus)); - return convertTo >(y); -} - -template vector v_scalar_mult_mod(const vector&, const long long&, const long long&); -template vector v_scalar_mult_mod(const vector&, const long&, const long&); -template vector v_scalar_mult_mod(const vector&, const mpz_class&, const mpz_class&); - -template void v_scalar_division(vector& v, const long scalar); -template void v_scalar_division(vector& v, const long long scalar); -template void v_scalar_division(vector& v, const mpz_class scalar); - -template long v_make_prime(vector&); -template long long v_make_prime(vector&); -template mpz_class v_make_prime(vector&); -#ifdef ENFNORMALIZ -template renf_elem_class v_make_prime(vector&); -#endif - -template long v_scalar_product(const vector& a, const vector& b); -template long long v_scalar_product(const vector& a, const vector& b); -template mpz_class v_scalar_product(const vector& a, const vector& b); - -vector bitset_to_bool(const dynamic_bitset& val) { - vector ret(val.size()); - for (size_t i = 0; i < val.size(); ++i) - ret[i] = val[i]; - return ret; -} - -dynamic_bitset bool_to_bitset(const vector& val) { - dynamic_bitset ret(val.size()); - for (size_t i = 0; i < val.size(); ++i) - ret[i] = val[i]; - return ret; -} - -vector bitset_to_key(const dynamic_bitset& val) { - vector ret; - for (size_t i = 0; i < val.size(); ++i) - if(val[i]) - ret.push_back(i); - return ret; -} - -dynamic_bitset key_to_bitset(const vector& key, long size){ - - dynamic_bitset bs(size); - for(size_t i=0; i< key.size(); ++i){ - assert(key[i] < size); - bs[key[i]] = 1; - } - return bs; -} - -} // end namespace libnormaliz diff -Nru normaliz-3.8.5+ds/source/libnormaliz/vector_operations.h normaliz-3.8.9+ds/source/libnormaliz/vector_operations.h --- normaliz-3.8.5+ds/source/libnormaliz/vector_operations.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/libnormaliz/vector_operations.h 2020-07-21 15:37:45.000000000 +0000 @@ -31,7 +31,7 @@ #include "libnormaliz/general.h" #include "libnormaliz/integer.h" -#include "libnormaliz/convert.h" +// #include "libnormaliz/convert.h" #include "libnormaliz/dynamic_bitset.h" #ifdef NMZ_FLINT @@ -588,6 +588,644 @@ v_scalar_multiplication(vec, fact); } +//============================================================= + +// old vector_operations.cpp + +template +Integer v_scalar_product(const vector& av, const vector& bv) { + // loop stretching ; brings some small speed improvement + + Integer ans = 0; + size_t i, n = av.size(); + +#if 0 // #ifdef __MIC__ // not for newer compiler versions + // this version seems to be better vectorizable on the mic + for (i=0; i= 16) { + for (i = 0; i < (n >> 4); ++i, a += 16, b += 16) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + ans += a[2] * b[2]; + ans += a[3] * b[3]; + ans += a[4] * b[4]; + ans += a[5] * b[5]; + ans += a[6] * b[6]; + ans += a[7] * b[7]; + ans += a[8] * b[8]; + ans += a[9] * b[9]; + ans += a[10] * b[10]; + ans += a[11] * b[11]; + ans += a[12] * b[12]; + ans += a[13] * b[13]; + ans += a[14] * b[14]; + ans += a[15] * b[15]; + } + + n -= i << 4; + } + + if (n >= 8) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + ans += a[2] * b[2]; + ans += a[3] * b[3]; + ans += a[4] * b[4]; + ans += a[5] * b[5]; + ans += a[6] * b[6]; + ans += a[7] * b[7]; + + n -= 8; + a += 8; + b += 8; + } + + if (n >= 4) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + ans += a[2] * b[2]; + ans += a[3] * b[3]; + + n -= 4; + a += 4; + b += 4; + } + + if (n >= 2) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + + n -= 2; + a += 2; + b += 2; + } + + if (n > 0) + ans += a[0] * b[0]; +#endif // __MIC__ + + if (!check_range(ans)) { +#pragma omp atomic + GMP_scal_prod++; + + // cout << "av " << av; + // cout << "bv " << bv; + vector mpz_a(av.size()), mpz_b(bv.size()); + convert(mpz_a, av); + convert(mpz_b, bv); + convert(ans, v_scalar_product(mpz_a, mpz_b)); + } + + return ans; +} + +template <> +inline nmz_float v_scalar_product(const vector& av, const vector& bv) { + // loop stretching ; brings some small speed improvement + + nmz_float ans = 0; + size_t i, n = av.size(); + + auto a = av.begin(), b = bv.begin(); + + if (n >= 16) { + for (i = 0; i < (n >> 4); ++i, a += 16, b += 16) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + ans += a[2] * b[2]; + ans += a[3] * b[3]; + ans += a[4] * b[4]; + ans += a[5] * b[5]; + ans += a[6] * b[6]; + ans += a[7] * b[7]; + ans += a[8] * b[8]; + ans += a[9] * b[9]; + ans += a[10] * b[10]; + ans += a[11] * b[11]; + ans += a[12] * b[12]; + ans += a[13] * b[13]; + ans += a[14] * b[14]; + ans += a[15] * b[15]; + } + + n -= i << 4; + } + + if (n >= 8) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + ans += a[2] * b[2]; + ans += a[3] * b[3]; + ans += a[4] * b[4]; + ans += a[5] * b[5]; + ans += a[6] * b[6]; + ans += a[7] * b[7]; + + n -= 8; + a += 8; + b += 8; + } + + if (n >= 4) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + ans += a[2] * b[2]; + ans += a[3] * b[3]; + + n -= 4; + a += 4; + b += 4; + } + + if (n >= 2) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + + n -= 2; + a += 2; + b += 2; + } + + if (n > 0) + ans += a[0] * b[0]; + + return ans; +} + +#ifdef ENFNORMALIZ + +template <> +inline renf_elem_class v_scalar_product(const vector& av, const vector& bv) { + // loop stretching ; brings some small speed improvement + + assert(av.size() == bv.size()); + + renf_elem_class ans = 0; + size_t n = av.size(); + + for (size_t i = 0; i < n; ++i) { + if (av[i] != 0 && bv[i] != 0) + ans += av[i] * bv[i]; + } + return ans; +} + +#endif + +//--------------------------------------------------------------------------- + +template <> +inline mpq_class v_scalar_product(const vector& av, const vector& bv) { + // loop stretching ; brings some small speed improvement + + assert(false); + return 0; + +} + +/* body removed for the time being + mpq_class ans = 0; + size_t i, n = av.size(); + +#if 0 // #ifdef __MIC__ // not for newer compiler versions + // this version seems to be better vectorizable on the mic + for (i=0; i= 16) { + for (i = 0; i < (n >> 4); ++i, a += 16, b += 16) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + ans += a[2] * b[2]; + ans += a[3] * b[3]; + ans += a[4] * b[4]; + ans += a[5] * b[5]; + ans += a[6] * b[6]; + ans += a[7] * b[7]; + ans += a[8] * b[8]; + ans += a[9] * b[9]; + ans += a[10] * b[10]; + ans += a[11] * b[11]; + ans += a[12] * b[12]; + ans += a[13] * b[13]; + ans += a[14] * b[14]; + ans += a[15] * b[15]; + } + + n -= i << 4; + } + + if (n >= 8) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + ans += a[2] * b[2]; + ans += a[3] * b[3]; + ans += a[4] * b[4]; + ans += a[5] * b[5]; + ans += a[6] * b[6]; + ans += a[7] * b[7]; + + n -= 8; + a += 8; + b += 8; + } + + if (n >= 4) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + ans += a[2] * b[2]; + ans += a[3] * b[3]; + + n -= 4; + a += 4; + b += 4; + } + + if (n >= 2) { + ans += a[0] * b[0]; + ans += a[1] * b[1]; + + n -= 2; + a += 2; + b += 2; + } + + if (n > 0) + ans += a[0] * b[0]; +#endif // __MIC__ + + return ans; +} + +*/ + +//--------------------------------------------------------------------------- + +template +vector v_select_coordinates(const vector& v, const vector projection_key) { + vector w(projection_key.size()); + for (size_t i = 0; i < w.size(); ++i) + w[i] = v[projection_key[i]]; + return w; +} + +//--------------------------------------------------------------------------- + +template +vector v_insert_coordinates(const vector& v, const vector projection_key, const size_t nr_cols) { + vector w(nr_cols); + for (size_t i = 0; i < projection_key.size(); ++i) { + assert(projection_key[i] < nr_cols); + w[projection_key[i]] = v[i]; + } + return w; +} +//--------------------------------------------------------------------------- + + +inline nmz_float l1norm(vector& v) { + size_t i, size = v.size(); + nmz_float g = 0; + for (i = 0; i < size; i++) { + if (Iabs(v[i]) > nmz_epsilon) + g += Iabs(v[i]); + else + v[i] = 0; + } + return g; +} + +/* +mpq_class l1norm(vector& v) { + size_t i, size = v.size(); + mpq_class g = 0; + for (i = 0; i < size; i++) { + if (Iabs(v[i]) > 0) + g += Iabs(v[i]); + else + v[i] = 0; + } + return g; +} +*/ + +/* for nmz_float is norms the vector to l_1 norm 1. + * + * for mpq_class and renf_elem_class it makes the vector coefficents integral + * + * then it extracts the gcd of the coefficients + */ + +template +Integer v_make_prime(vector& v) { + size_t i, size = v.size(); + +#ifdef ENFNORMALIZ + if (using_renf()) { + v_standardize(v); + make_integral(v); + return (1); + } +#endif + + if (using_mpq_class()) + make_integral(v); + Integer g = v_gcd(v); + if (g != 0 && g != 1) { + for (i = 0; i < size; i++) { + v[i] /= g; + } + } + return g; +} + + +template <> +inline nmz_float v_make_prime(vector& v) { + size_t i, size = v.size(); + nmz_float g = l1norm(v); + if (g != 0) { + for (i = 0; i < size; i++) { + v[i] /= g; + } + } + return g; +} + + +//--------------------------------------------------------------- + +// swaps entry i and j of the vector v +inline void v_bool_entry_swap(vector& v, size_t i, size_t j) { + if (v[i] != v[j]) { + v[i].flip(); + v[j].flip(); + } +} + +//--------------------------------------------------------------- + +inline vector identity_key(size_t n) { + vector key(n); + for (size_t k = 0; k < n; ++k) + key[k] = k; + return key; +} + +inline vector reverse_key(size_t n) { + vector key(n); + for (size_t k = 0; k < n; ++k) + key[k] = (n - 1) - k; + return key; +} + +inline vector random_key(size_t n) { + vector key = identity_key(n); + for (size_t k = 0; k < 3*n; ++k) + swap(key[rand() % n], key[rand() % n]); + return key; +} + +// vector is special because ordinary swap is not defined for it +inline void order_by_perm_bool(vector& v, const vector& permfix) { + vector perm = permfix; // we may want to use permfix a second time + vector inv(perm.size()); + for (key_t i = 0; i < perm.size(); ++i) + inv[perm[i]] = i; + for (key_t i = 0; i < perm.size(); ++i) { + key_t j = perm[i]; + // v.swap(v[i],v[perm[i]]); + v_bool_entry_swap(v, i, perm[i]); + swap(perm[i], perm[inv[i]]); + swap(inv[i], inv[j]); + } +} + +//--------------------------------------------------------------------------- + +template +void v_scalar_division(vector& v, const Integer scalar) { + size_t i, size = v.size(); + assert(scalar != 0); + for (i = 0; i < size; i++) { + assert(v[i] % scalar == 0); + v[i] /= scalar; + } +} + +template <> +inline void v_scalar_division(vector& v, const nmz_float scalar) { + size_t i, size = v.size(); + assert(scalar != 0); + for (i = 0; i < size; i++) { + v[i] /= scalar; + } +} + + + +template <> +inline void v_scalar_division(vector& v, const mpq_class scalar) { + size_t i, size = v.size(); + assert(scalar != 0); + for (i = 0; i < size; i++) { + v[i] /= scalar; + } +} + + +#ifdef ENFNORMALIZ +template <> +inline void v_scalar_division(vector& v, const renf_elem_class scalar) { + size_t i, size = v.size(); + assert(scalar != 0); + renf_elem_class fact = 1 / scalar; + for (i = 0; i < size; i++) { + v[i] *= fact; + } +} +#endif + +/* v_standardize + * + * defined only for mpq_class, nmz_float and renf_elem_class + * + * makes the value under LF equal to 1 (checks for positivity of value) + * + * or the last component equal to +-1 + */ + +template +Integer v_standardize(vector& v, const vector& LF) { + assert(false); + return 0; +} + +template +Integer v_standardize(vector& v) { + vector LF; + return v_standardize(v, LF); +} + +template <> +inline nmz_float v_standardize(vector& v, const vector& LF) { + nmz_float denom = 0; + if (LF.size() == v.size()) { + denom = v_scalar_product(v, LF); + } + + if (denom == 0) { + for (long i = (long)v.size() - 1; i >= 0; --i) { + if (v[i] != 0) { + denom = v[i]; + break; + } + } + } + denom = Iabs(denom); + + if (denom == 0) + return denom; + if (denom != 1) + v_scalar_division(v, denom); + + return denom; +} + +/* +template <> +mpq_class v_standardize(vector& v, const vector& LF) { + mpq_class denom = 0; + if (LF.size() == v.size()) { + denom = v_scalar_product(v, LF); + }; + + if (denom == 0) { + for (long i = (long)v.size() - 1; i >= 0; --i) { + if (v[i] != 0) { + denom = v[i]; + break; + } + } + } + denom = Iabs(denom); + + if (denom == 0) + return denom; + if (denom != 1) + v_scalar_division(v, denom); + + return denom; +} +*/ + +#ifdef ENFNORMALIZ + +template <> +inline renf_elem_class v_standardize(vector& v, const vector& LF) { + renf_elem_class denom = 0; + if (LF.size() == v.size()) { + denom = v_scalar_product(v, LF); + } + + if (denom == 0) { + for (long i = (long)v.size() - 1; i >= 0; --i) { + if (v[i] != 0) { + denom = v[i]; + break; + } + } + } + denom = Iabs(denom); + + if (denom == 0) + return denom; + if (denom != 1) + v_scalar_division(v, denom); + + return denom; +} +#endif + + +/* Not used presently +// the following function removes the denominators and then extracts the Gcd of the numerators +mpq_class v_standardize(vector& v, const vector& LF){ + size_t size=v.size(); + mpz_class d=1; + for (size_t i = 0; i < size; i++) + //d=lcm(d,v[i].get_den()); // GMP C++ function only available in GMP >= 6.1 + mpz_lcm(d.get_mpz_t(), d.get_mpz_t(), v[i].get_den().get_mpz_t()); + for (size_t i = 0; i < size; i++) + v[i]*=d; + mpz_class g=0; + for (size_t i = 0; i < size; i++) + //g=gcd(g,v[i].get_num()); // GMP C++ function only available in GMP >= 6.1 + mpz_gcd(g.get_mpz_t(), g.get_mpz_t(), v[i].get_num().get_mpz_t()); + if (g==0) + return 0; + for (size_t i = 0; i < size; i++) + v[i]/=g; + return 1; +} +*/ + +template +vector v_scalar_mult_mod(const vector& v, const Integer& scalar, const Integer& modulus) { + vector w(v.size()); + if (v_scalar_mult_mod_inner(w, v, scalar, modulus)) + return w; + +#pragma omp atomic + GMP_scal_prod++; + vector x, y(v.size()); + convert(x, v); + v_scalar_mult_mod_inner(y, x, convertTo(scalar), convertTo(modulus)); + return convertTo >(y); +} + +inline vector bitset_to_bool(const dynamic_bitset& val) { + vector ret(val.size()); + for (size_t i = 0; i < val.size(); ++i) + ret[i] = val[i]; + return ret; +} + +inline dynamic_bitset bool_to_bitset(const vector& val) { + dynamic_bitset ret(val.size()); + for (size_t i = 0; i < val.size(); ++i) + ret[i] = val[i]; + return ret; +} + +inline vector bitset_to_key(const dynamic_bitset& val) { + vector ret; + for (size_t i = 0; i < val.size(); ++i) + if(val[i]) + ret.push_back(i); + return ret; +} + +inline dynamic_bitset key_to_bitset(const vector& key, long size){ + + dynamic_bitset bs(size); + for(size_t i=0; i< key.size(); ++i){ + assert(key[i] < size); + bs[key[i]] = 1; + } + return bs; +} + + + } // namespace libnormaliz //--------------------------------------------------------------------------- diff -Nru normaliz-3.8.5+ds/source/Makefile.am normaliz-3.8.9+ds/source/Makefile.am --- normaliz-3.8.5+ds/source/Makefile.am 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/Makefile.am 2020-09-25 14:54:40.000000000 +0000 @@ -21,14 +21,13 @@ libnormaliz/automorph.h \ libnormaliz/cone_property.h \ libnormaliz/cone.h \ - libnormaliz/convert.h \ libnormaliz/dynamic_bitset.h \ libnormaliz/general.h \ libnormaliz/HilbertSeries.h \ libnormaliz/input_type.h \ libnormaliz/integer.h \ libnormaliz/libnormaliz.h \ - libnormaliz/map_operations.h \ + libnormaliz/list_and_map_operations.h \ libnormaliz/matrix.h \ libnormaliz/my_omp.h \ libnormaliz/nmz_config.h \ @@ -41,55 +40,45 @@ # Sources libnormaliz_la_SOURCES = \ - libnormaliz/cone_and_control.cpp \ - libnormaliz/dynamic_bitset.cpp \ - libnormaliz/enumeration.cpp \ - libnormaliz/linear_algebra.cpp \ + libnormaliz/automorph.cpp \ + libnormaliz/collection.cpp \ + libnormaliz/cone_dual_mode.cpp \ + libnormaliz/cone_property.cpp \ + libnormaliz/cone.cpp \ + libnormaliz/descent.cpp \ + libnormaliz/face_lattice.cpp \ + libnormaliz/full_cone.cpp \ + libnormaliz/general.cpp \ + libnormaliz/HilbertSeries.cpp \ + libnormaliz/input.cpp \ + libnormaliz/matrix.cpp \ + libnormaliz/nmz_integral.cpp \ libnormaliz/nmz_nauty.cpp \ libnormaliz/offload_handler.cpp \ - libnormaliz/other_algorithms.cpp \ + libnormaliz/options.cpp \ libnormaliz/output.cpp \ - libnormaliz/primal.cpp + libnormaliz/project_and_lift.cpp \ + libnormaliz/reduction.cpp \ + libnormaliz/simplex.cpp \ + libnormaliz/sublattice_representation.cpp + # Other headers (not installed) noinst_HEADERS = \ - libnormaliz/bottom.h \ libnormaliz/cone_dual_mode.h \ libnormaliz/descent.h \ + libnormaliz/face_lattice.h \ libnormaliz/full_cone.h \ - libnormaliz/list_operations.h \ libnormaliz/nmz_integrate.h \ libnormaliz/offload_handler.h \ libnormaliz/project_and_lift.h \ libnormaliz/reduction.h \ libnormaliz/simplex.h \ + libnormaliz/options.h \ + libnormaliz/input.h \ libnormaliz/collection.h -# Sources included from other source files: -noinst_HEADERS += \ - libnormaliz/automorph.cpp \ - libnormaliz/bottom.cpp \ - libnormaliz/cone_dual_mode.cpp \ - libnormaliz/cone_property.cpp \ - libnormaliz/cone.cpp \ - libnormaliz/descent.cpp \ - libnormaliz/full_cone.cpp \ - libnormaliz/general.cpp \ - libnormaliz/HilbertSeries.cpp \ - libnormaliz/input_type.cpp \ - libnormaliz/integer.cpp \ - libnormaliz/list_operations.cpp \ - libnormaliz/matrix.cpp \ - libnormaliz/nmz_integral.cpp \ - libnormaliz/nmz_polynomial.cpp \ - libnormaliz/project_and_lift.cpp \ - libnormaliz/reduction.cpp \ - libnormaliz/simplex.cpp \ - libnormaliz/sublattice_representation.cpp \ - libnormaliz/vector_operations.cpp \ - libnormaliz/collection.cpp - -libnormaliz_la_LDFLAGS = -no-undefined -version-info 11:5:8 +libnormaliz_la_LDFLAGS = -no-undefined -version-info 11:9:8 ## Conjecture: x.y.z yields x-z.z.y #libnormaliz_la_LIBADD = $(GMP_LIBS) @@ -98,10 +87,6 @@ normaliz_SOURCES = normaliz.cpp normaliz_LDADD = libnormaliz.la -noinst_HEADERS += options.h - -# Sources included via Normaliz.cpp -noinst_HEADERS += input.cpp options.cpp # Uninstalled binary maxsimplex noinst_PROGRAMS = maxsimplex/maxsimplex diff -Nru normaliz-3.8.5+ds/source/Makefile.classic normaliz-3.8.9+ds/source/Makefile.classic --- normaliz-3.8.5+ds/source/Makefile.classic 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/Makefile.classic 2020-07-21 15:37:45.000000000 +0000 @@ -21,7 +21,7 @@ ## normaliz.o: $(SOURCES) $(HEADERS) $(LIBHEADERS) ## $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c normaliz.cpp -o normaliz.o -normaliz: $(SOURCES) $(HEADERS) libnormaliz/libnormaliz.a +normaliz: $(SOURCES) $(HEADERS) libnormaliz/libnormaliz.a normaliz.cpp ## $(CXX) $(CXXFLAGS) $(NORMFLAGS) -c normaliz.cpp -o normaliz.o $(CXX) $(CXXFLAGS) $(NORMFLAGS) normaliz.cpp libnormaliz/libnormaliz.a $(LINKFLAGS) -o normaliz ## -rm normaliz.o diff -Nru normaliz-3.8.5+ds/source/maxsimplex/maxsimplex.cpp normaliz-3.8.9+ds/source/maxsimplex/maxsimplex.cpp --- normaliz-3.8.5+ds/source/maxsimplex/maxsimplex.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/maxsimplex/maxsimplex.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -1,6 +1,7 @@ -#include +#include #include #include +#include #ifdef _OPENMP #include #endif diff -Nru normaliz-3.8.5+ds/source/normaliz.cpp normaliz-3.8.9+ds/source/normaliz.cpp --- normaliz-3.8.5+ds/source/normaliz.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/normaliz.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -21,7 +21,7 @@ * terms of service. */ -#include +#include #include #include #include @@ -38,29 +38,13 @@ #include "libnormaliz/integer.h" #include "libnormaliz/cone.h" #include "libnormaliz/output.h" +#include "libnormaliz/input.h" using namespace libnormaliz; -#include "input.cpp" -#include "options.cpp" -#ifndef STRINGIFY -#define STRINGIFYx(Token) #Token -#define STRINGIFY(Token) STRINGIFYx(Token) -#endif long CCCCCCC = 0; -void printCopying() { - cout << "Copyright (C) 2007-2019 The Normaliz Team, University of Osnabrueck." << endl - << "This program comes with ABSOLUTELY NO WARRANTY; This is free software," << endl - << "and you are welcome to redistribute it under certain conditions;" << endl - << "See COPYING for details." << endl; -} - -void printVersion() { - cout << "Normaliz " << string(STRINGIFY(NMZ_VERSION)) << endl; - printCopying(); -} void printHeader() { cout << " \\.....|" << endl; @@ -198,7 +182,7 @@ printHeader(); } - renf_class number_field; // is bool without e-antic + renf_class number_field; // is long without e-antic process_data(options, command_line, number_field); diff -Nru normaliz-3.8.5+ds/source/options.cpp normaliz-3.8.9+ds/source/options.cpp --- normaliz-3.8.5+ds/source/options.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/options.cpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,484 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#include -#include -#include -#include -#include -#include -using namespace std; - -#include "libnormaliz/cone.h" -#include "libnormaliz/output.h" -using namespace libnormaliz; - -#include "options.h" - -void printHeader(); -void printCopying(); -void printVersion(); - -OptionsHandler::OptionsHandler() { - project_name_set = false; - output_dir_set = false; - write_extra_files = false, write_all_files = false; - // use_Big_Integer = false; - use_long_long = false; - ignoreInFileOpt = false; - nr_threads = 0; - no_ext_rays_output = false; - no_supp_hyps_output = false; - no_matrices_output = false; -} - -bool OptionsHandler::handle_commandline(int argc, char* argv[]) { - vector LongOptions; - string ShortOptions; // all options concatenated (including -) - // read command line options - for (int i = 1; i < argc; i++) { - if (argv[i][0] == '-') { - if (argv[i][1] != '\0') { - if (argv[i][1] != 'x') { - if (argv[i][1] == '-') { - string LO = argv[i]; - LO.erase(0, 2); - LongOptions.push_back(LO); - } - else - ShortOptions = ShortOptions + argv[i]; - } - else if (argv[i][2] == '=') { -#ifdef _OPENMP - string Threads = argv[i]; - Threads.erase(0, 3); - if ((istringstream(Threads) >> nr_threads) && nr_threads >= 0) { - set_thread_limit(nr_threads); - // omp_set_num_threads(nr_threads); -- now in cone.cpp - } - else { - cerr << "Error: Invalid option string " << argv[i] << endl; - exit(1); - } -#else - cerr << "Warning: Compiled without OpenMP support, option " << argv[i] << " ignored." << endl; -#endif - } - else { - cerr << "Error: Invalid option string " << argv[i] << endl; - exit(1); - } - } - } - else { - setProjectName(argv[i]); - } - } - return handle_options(LongOptions, ShortOptions); -} - -void OptionsHandler::setProjectName(const string& s) { - if (project_name_set) { - cerr << "Error: Second project name " << s << " in command line!" << endl; - exit(1); - } - project_name = s; - // check if we can read the .in file - string name_in = project_name + ".in"; - const char* file_in = name_in.c_str(); - ifstream in2; - in2.open(file_in, ifstream::in); - if (in2.is_open() == false) { - // check if user added ".in" and ignore it in this case - string suffix(".in"); - size_t found = project_name.rfind(suffix); - if (found != string::npos) { - project_name.erase(found); - } - } - else { - in2.close(); - } - project_name_set = true; -} - -void OptionsHandler::setOutputDirName(const string& s) { - output_dir = s; - char slash = '/'; -#ifdef _WIN32 // for 32 and 64 bit windows - slash = '\\'; -#endif - if (output_dir[output_dir.size() - 1] != slash) - output_dir += slash; - output_dir_set = true; -} - -bool OptionsHandler::handle_options(vector& LongOptions, string& ShortOptions) { - // Analyzing short command line options - for (size_t i = 1; i < ShortOptions.size(); i++) { - switch (ShortOptions[i]) { - case '-': - break; - case 'c': - verbose = true; - break; - case 'f': - write_extra_files = true; - break; - case 'a': - write_all_files = true; - break; - case 'T': - to_compute.set(ConeProperty::Triangulation); - // to_compute.set(ConeProperty::Multiplicity); - break; - case 'F': - to_compute.set(ConeProperty::Descent); - break; - case 's': - to_compute.set(ConeProperty::SupportHyperplanes); - break; - case 'S': - to_compute.set(ConeProperty::Sublattice); - break; - case 't': - to_compute.set(ConeProperty::TriangulationSize); - break; - case 'v': - to_compute.set(ConeProperty::Multiplicity); - break; - case 'V': - to_compute.set(ConeProperty::Volume); - break; - case 'n': - to_compute.set(ConeProperty::HilbertBasis); - to_compute.set(ConeProperty::Multiplicity); - break; - case 'N': - to_compute.set(ConeProperty::HilbertBasis); - break; - case 'w': - to_compute.set(ConeProperty::WitnessNotIntegrallyClosed); - break; - case '1': - to_compute.set(ConeProperty::Deg1Elements); - break; - case 'q': - to_compute.set(ConeProperty::HilbertSeries); - break; - case 'p': - to_compute.set(ConeProperty::HilbertSeries); - to_compute.set(ConeProperty::Deg1Elements); - break; - case 'h': - to_compute.set(ConeProperty::HilbertBasis); - to_compute.set(ConeProperty::HilbertSeries); - break; - case 'y': - to_compute.set(ConeProperty::StanleyDec); - break; - case 'd': - to_compute.set(ConeProperty::DualMode); - break; - case 'r': - to_compute.set(ConeProperty::Approximate); - break; - case 'e': // check for arithmetic overflow - // test_arithmetic_overflow=true; - cerr << "WARNING: deprecated option -e is ignored." << endl; - break; - case 'B': // use Big Integer - to_compute.set(ConeProperty::BigInt); // use_Big_Integer=true; - break; - case 'b': // use the bottom decomposition for the triangulation - to_compute.set(ConeProperty::BottomDecomposition); - break; - case 'C': // compute the class group - to_compute.set(ConeProperty::ClassGroup); - break; - case 'k': // keep the order of the generators in Full_Cone - to_compute.set(ConeProperty::KeepOrder); - break; - case 'o': // suppress bottom decomposition in Full_Cone - to_compute.set(ConeProperty::NoBottomDec); - break; - case 'M': // compute minimal system of generators of integral closure - // as a module over original monoid - to_compute.set(ConeProperty::ModuleGeneratorsOverOriginalMonoid); - break; - case '?': // print help text and exit - return true; - break; - case 'x': // should be separated from other options - cerr << "Error: Option -x= has to be separated from other options" << endl; - exit(1); - break; - case 'I': - to_compute.set(ConeProperty::Integral); - break; - case 'L': - to_compute.set(ConeProperty::VirtualMultiplicity); - break; - case 'E': - to_compute.set(ConeProperty::WeightedEhrhartSeries); - break; - case 'i': - ignoreInFileOpt = true; - break; - case 'H': - to_compute.set(ConeProperty::IntegerHull); - break; - case 'D': - to_compute.set(ConeProperty::ConeDecomposition); - break; - case 'P': - to_compute.set(ConeProperty::PrimalMode); - break; - case 'Y': - to_compute.set(ConeProperty::Symmetrize); - break; - case 'X': - to_compute.set(ConeProperty::NoSymmetrization); - break; - case 'G': - to_compute.set(ConeProperty::IsGorenstein); - break; - case 'j': - to_compute.set(ConeProperty::Projection); - break; - case 'J': - to_compute.set(ConeProperty::ProjectionFloat); - break; - default: - cerr << "Error: Unknown option -" << ShortOptions[i] << endl; - exit(1); - break; - } - } - - // Remember to update also the --help text and the documentation when changing this! - vector AdmissibleOut; - string AdmissibleOutarray[] = {"gen", "cst", "inv", "ext", "ht1", "esp", - "egn", "typ", "lat", "msp", "mod"}; // "mod" must be last - for (const auto& i : AdmissibleOutarray) - AdmissibleOut.push_back(i); - assert(AdmissibleOut.back() == "mod"); - - // analyzing long options - for (const auto& LongOption : LongOptions) { - size_t j; - for (j = 0; j < LongOption.size(); ++j) { - if (LongOption[j] == '=') - break; - } - if (j < LongOption.size()) { - string OptName = LongOption.substr(0, j); - string OptValue = LongOption.substr(j + 1, LongOption.size() - 1); - if (OptName == "OutputDir") { - setOutputDirName(OptValue); - continue; - } - } - if (LongOption == "help") { - return true; // indicate printing of help text - } - if (LongOption == "verbose") { - verbose = true; - continue; - } - if (LongOption == "version") { - printVersion(); - exit(0); - } - /* if(LongOptions[i]=="BigInt"){ - use_Big_Integer=true; - continue; - }*/ - if (LongOption == "LongLong") { - use_long_long = true; - continue; - } - if (LongOption == "NoExtRaysOutput") { - no_ext_rays_output = true; - continue; - } - if (LongOption == "NoSuppHypsOutput") { - no_supp_hyps_output = true; - continue; - } - if (LongOption == "NoMatricesOutput") { - no_matrices_output = true; - continue; - } - if (LongOption == "ignore") { - ignoreInFileOpt = true; - continue; - } - if (LongOption == "files") { - write_extra_files = true; - continue; - } - if (LongOption == "all-files") { - write_all_files = true; - continue; - } - if (find(AdmissibleOut.begin(), AdmissibleOut.end(), LongOption) != AdmissibleOut.end()) { - OutFiles.push_back(LongOption); - continue; - } - try { - to_compute.set(toConeProperty(LongOption)); - continue; - } catch (const BadInputException&) { - }; - cerr << "Error: Unknown option --" << LongOption << endl; - exit(1); - } - - if (output_dir_set) { - output_file = output_dir + pureName(project_name); - } - else - output_file = project_name; - - return false; // no need to print help text -} - -template -void OptionsHandler::applyOutputOptions(Output& Out) { - if (no_ext_rays_output) - Out.set_no_ext_rays_output(); - if (no_supp_hyps_output) - Out.set_no_supp_hyps_output(); - if (no_matrices_output) - Out.set_no_matrices_output(); - if (write_all_files) { - Out.set_write_all_files(); - } - else if (write_extra_files) { - Out.set_write_extra_files(); - } - if (to_compute.test(ConeProperty::Triangulation) || to_compute.test(ConeProperty::ConeDecomposition) - || to_compute.test(ConeProperty::UnimodularTriangulation) || to_compute.test(ConeProperty::LatticePointTriangulation) - || to_compute.test(ConeProperty::AllGeneratorsTriangulation) - ) { - Out.set_write_tri(true); - Out.set_write_tgn(true); - Out.set_write_inv(true); - } - if (to_compute.test(ConeProperty::StanleyDec)) { - Out.set_write_dec(true); - Out.set_write_tgn(true); - Out.set_write_inv(true); - } - if (to_compute.test(ConeProperty::FaceLattice)) { - Out.set_write_fac(true); - } - if (to_compute.test(ConeProperty::Incidence)) { - Out.set_write_inc(true); - } - if (to_compute.test(ConeProperty::ExploitAutomsVectors) || to_compute.test(ConeProperty::ExploitAutomsMult) || - to_compute.test(ConeProperty::Automorphisms) || to_compute.test(ConeProperty::AmbientAutomorphisms) || - to_compute.test(ConeProperty::CombinatorialAutomorphisms) || to_compute.test(ConeProperty::RationalAutomorphisms) || - to_compute.test(ConeProperty::EuclideanAutomorphisms)) { - Out.set_write_aut(true); - } - for (const auto& OutFile : OutFiles) { - if (OutFile == "gen") { - Out.set_write_gen(true); - continue; - } - if (OutFile == "cst") { - Out.set_write_cst(true); - continue; - } - if (OutFile == "inv") { - Out.set_write_inv(true); - continue; - } - if (OutFile == "ht1") { - Out.set_write_ht1(true); - continue; - } - if (OutFile == "ext") { - Out.set_write_ext(true); - continue; - } - if (OutFile == "egn") { - Out.set_write_egn(true); - continue; - } - if (OutFile == "esp") { - Out.set_write_esp(true); - continue; - } - if (OutFile == "typ") { - Out.set_write_typ(true); - continue; - } - if (OutFile == "lat") { - Out.set_write_lat(true); - continue; - } - if (OutFile == "msp") { - Out.set_write_msp(true); - continue; - } - if (OutFile == "mod") { - Out.set_write_mod(true); - continue; - } - } - - if (!project_name_set) { - cerr << "ERROR: No project name set!" << endl; - exit(1); - } - Out.set_name(output_file); -} - -bool OptionsHandler::activateDefaultMode() { - if (to_compute.goals().none() && !to_compute.test(ConeProperty::DualMode)) { - to_compute.set(ConeProperty::DefaultMode); - return true; - } - return false; -} - -string pureName(const string& fullName) { - // extracts the pure filename - - string slash = "/"; -#ifdef _WIN32 // for 32 and 64 bit windows - slash = "\\"; -#endif - size_t found = fullName.rfind(slash); - if (found == std::string::npos) - return (fullName); - found++; - size_t length = fullName.size() - found; - - // cout << "**************************** " << fullName.substr(found,length) << endl; - // exit(1); - return (fullName.substr(found, length)); -} diff -Nru normaliz-3.8.5+ds/source/options.h normaliz-3.8.9+ds/source/options.h --- normaliz-3.8.5+ds/source/options.h 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/options.h 1970-01-01 00:00:00.000000000 +0000 @@ -1,163 +0,0 @@ -/* - * Normaliz - * Copyright (C) 2007-2019 Winfried Bruns, Bogdan Ichim, Christof Soeger - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - * - * As an exception, when this program is distributed through (i) the App Store - * by Apple Inc.; (ii) the Mac App Store by Apple Inc.; or (iii) Google Play - * by Google Inc., then that store may impose any digital rights management, - * device limits and/or redistribution restrictions that are required by its - * terms of service. - */ - -#include -#include -#include -#include -using namespace std; - -#include "libnormaliz/input_type.h" -#include "libnormaliz/cone.h" -#include "libnormaliz/output.h" -using namespace libnormaliz; - -#ifndef NORMALIZ_OPTIONS_H -#define NORMALIZ_OPTIONS_H - -//--------------------------------------------------------------------------- - -class OptionsHandler { - public: - OptionsHandler(); - - // returns true if a help should be printed, false otherwise - bool handle_commandline(int argc, char* argv[]); - - // returns true if default mode was activated, false otherwise - bool activateDefaultMode(); - - template - void applyOutputOptions(Output& Out); - - bool isFilenameSet() const { - return project_name_set; - } - - bool isIgnoreInFileOpt() const { - return ignoreInFileOpt; - } - - int getNrThreads() const { - return nr_threads; - } - - void activateConeProperty(ConeProperty::Enum cp) { - to_compute.set(cp, true); - } - - void activateInputFileConeProperty(ConeProperty::Enum cp) { - if (!ignoreInFileOpt) - to_compute.set(cp, true); - } - /* void activateInputFileBigInt() { - if (!ignoreInFileOpt) use_Big_Integer = true; - }*/ - void activateInputFileLongLong() { - if (!ignoreInFileOpt) - use_long_long = true; - } - - void activateNoExtRaysOutput() { - if (!ignoreInFileOpt) - no_ext_rays_output = true; - } - - void activateNoMatricesOutput() { - if (!ignoreInFileOpt) - no_matrices_output = true; - } - - void activateNoSuppHypsOutput() { - if (!ignoreInFileOpt) - no_supp_hyps_output = true; - } - - const ConeProperties& getToCompute() const { - return to_compute; - } - - /* bool isUseBigInteger() const { - return use_Big_Integer; - }*/ - bool isUseLongLong() const { - return use_long_long; - } - - bool isNoExtRaysOutput() const { - return no_ext_rays_output; - } - - bool isNoMatricesOutput() const { - return no_matrices_output; - } - - bool isNoSuppHypsOutput() const { - return no_supp_hyps_output; - } - - const string& getProjectName() const { - return project_name; - } - - const string& getOutputDir() const { - return output_dir; - } - - void setProjectName(const string& s); - void setOutputDirName(const string& s); - - //--------------------------------------------------------------------------- - - private: - bool project_name_set; - bool output_dir_set; - string project_name; - string output_dir; - string output_file; - - // bool use_Big_Integer; now in ConeProperty - bool use_long_long; - bool no_ext_rays_output; - bool no_supp_hyps_output; - bool no_matrices_output; - - bool ignoreInFileOpt; - - int nr_threads; - - ConeProperties to_compute; - - bool write_extra_files, write_all_files; - - vector OutFiles; - - // return true if help should be printed, false otherwise - bool handle_options(vector& LongOptions, string& ShortOptions); -}; - -//--------------------------------------------------------------------------- - -string pureName(const string& fullName); // extracts the pure filename from a path - -#endif // NMZ_OPTIONS_H diff -Nru normaliz-3.8.5+ds/source/outerpar/outerpar.cpp normaliz-3.8.9+ds/source/outerpar/outerpar.cpp --- normaliz-3.8.5+ds/source/outerpar/outerpar.cpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/source/outerpar/outerpar.cpp 2020-07-21 15:37:45.000000000 +0000 @@ -1,4 +1,4 @@ -#include +#include #include #include #include diff -Nru normaliz-3.8.5+ds/test/catch.hpp normaliz-3.8.9+ds/test/catch.hpp --- normaliz-3.8.5+ds/test/catch.hpp 2020-06-06 09:29:05.000000000 +0000 +++ normaliz-3.8.9+ds/test/catch.hpp 1970-01-01 00:00:00.000000000 +0000 @@ -1,17075 +0,0 @@ -/* - * Catch v2.9.2 - * Generated: 2019-08-08 13:35:12.279703 - * ---------------------------------------------------------- - * This file has been merged from multiple headers. Please don't edit it directly - * Copyright (c) 2019 Two Blue Cubes Ltd. All rights reserved. - * - * Distributed under the Boost Software License, Version 1.0. (See accompanying - * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) - */ -#ifndef TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED -#define TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED -// start catch.hpp - - -#define CATCH_VERSION_MAJOR 2 -#define CATCH_VERSION_MINOR 9 -#define CATCH_VERSION_PATCH 2 - -#ifdef __clang__ -# pragma clang system_header -#elif defined __GNUC__ -# pragma GCC system_header -#endif - -// start catch_suppress_warnings.h - -#ifdef __clang__ -# ifdef __ICC // icpc defines the __clang__ macro -# pragma warning(push) -# pragma warning(disable: 161 1682) -# else // __ICC -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wpadded" -# pragma clang diagnostic ignored "-Wswitch-enum" -# pragma clang diagnostic ignored "-Wcovered-switch-default" -# endif -#elif defined __GNUC__ - // Because REQUIREs trigger GCC's -Wparentheses, and because still - // supported version of g++ have only buggy support for _Pragmas, - // Wparentheses have to be suppressed globally. -# pragma GCC diagnostic ignored "-Wparentheses" // See #674 for details - -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wunused-variable" -# pragma GCC diagnostic ignored "-Wpadded" -#endif -// end catch_suppress_warnings.h -#if defined(CATCH_CONFIG_MAIN) || defined(CATCH_CONFIG_RUNNER) -# define CATCH_IMPL -# define CATCH_CONFIG_ALL_PARTS -#endif - -// In the impl file, we want to have access to all parts of the headers -// Can also be used to sanely support PCHs -#if defined(CATCH_CONFIG_ALL_PARTS) -# define CATCH_CONFIG_EXTERNAL_INTERFACES -# if defined(CATCH_CONFIG_DISABLE_MATCHERS) -# undef CATCH_CONFIG_DISABLE_MATCHERS -# endif -# if !defined(CATCH_CONFIG_ENABLE_CHRONO_STRINGMAKER) -# define CATCH_CONFIG_ENABLE_CHRONO_STRINGMAKER -# endif -#endif - -#if !defined(CATCH_CONFIG_IMPL_ONLY) -// start catch_platform.h - -#ifdef __APPLE__ -# include -# if TARGET_OS_OSX == 1 -# define CATCH_PLATFORM_MAC -# elif TARGET_OS_IPHONE == 1 -# define CATCH_PLATFORM_IPHONE -# endif - -#elif defined(linux) || defined(__linux) || defined(__linux__) -# define CATCH_PLATFORM_LINUX - -#elif defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) || defined(__MINGW32__) -# define CATCH_PLATFORM_WINDOWS -#endif - -// end catch_platform.h - -#ifdef CATCH_IMPL -# ifndef CLARA_CONFIG_MAIN -# define CLARA_CONFIG_MAIN_NOT_DEFINED -# define CLARA_CONFIG_MAIN -# endif -#endif - -// start catch_user_interfaces.h - -namespace Catch { - unsigned int rngSeed(); -} - -// end catch_user_interfaces.h -// start catch_tag_alias_autoregistrar.h - -// start catch_common.h - -// start catch_compiler_capabilities.h - -// Detect a number of compiler features - by compiler -// The following features are defined: -// -// CATCH_CONFIG_COUNTER : is the __COUNTER__ macro supported? -// CATCH_CONFIG_WINDOWS_SEH : is Windows SEH supported? -// CATCH_CONFIG_POSIX_SIGNALS : are POSIX signals supported? -// CATCH_CONFIG_DISABLE_EXCEPTIONS : Are exceptions enabled? -// **************** -// Note to maintainers: if new toggles are added please document them -// in configuration.md, too -// **************** - -// In general each macro has a _NO_ form -// (e.g. CATCH_CONFIG_NO_POSIX_SIGNALS) which disables the feature. -// Many features, at point of detection, define an _INTERNAL_ macro, so they -// can be combined, en-mass, with the _NO_ forms later. - -#ifdef __cplusplus - -# if (__cplusplus >= 201402L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201402L) -# define CATCH_CPP14_OR_GREATER -# endif - -# if (__cplusplus >= 201703L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) -# define CATCH_CPP17_OR_GREATER -# endif - -#endif - -#if defined(CATCH_CPP17_OR_GREATER) -# define CATCH_INTERNAL_CONFIG_CPP17_UNCAUGHT_EXCEPTIONS -#endif - -#ifdef __clang__ - -# define CATCH_INTERNAL_SUPPRESS_GLOBALS_WARNINGS \ - _Pragma( "clang diagnostic push" ) \ - _Pragma( "clang diagnostic ignored \"-Wexit-time-destructors\"" ) \ - _Pragma( "clang diagnostic ignored \"-Wglobal-constructors\"") -# define CATCH_INTERNAL_UNSUPPRESS_GLOBALS_WARNINGS \ - _Pragma( "clang diagnostic pop" ) - -# define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ - _Pragma( "clang diagnostic push" ) \ - _Pragma( "clang diagnostic ignored \"-Wparentheses\"" ) -# define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ - _Pragma( "clang diagnostic pop" ) - -# define CATCH_INTERNAL_SUPPRESS_UNUSED_WARNINGS \ - _Pragma( "clang diagnostic push" ) \ - _Pragma( "clang diagnostic ignored \"-Wunused-variable\"" ) -# define CATCH_INTERNAL_UNSUPPRESS_UNUSED_WARNINGS \ - _Pragma( "clang diagnostic pop" ) - -# define CATCH_INTERNAL_SUPPRESS_ZERO_VARIADIC_WARNINGS \ - _Pragma( "clang diagnostic push" ) \ - _Pragma( "clang diagnostic ignored \"-Wgnu-zero-variadic-macro-arguments\"" ) -# define CATCH_INTERNAL_UNSUPPRESS_ZERO_VARIADIC_WARNINGS \ - _Pragma( "clang diagnostic pop" ) - -#endif // __clang__ - -//////////////////////////////////////////////////////////////////////////////// -// Assume that non-Windows platforms support posix signals by default -#if !defined(CATCH_PLATFORM_WINDOWS) - #define CATCH_INTERNAL_CONFIG_POSIX_SIGNALS -#endif - -//////////////////////////////////////////////////////////////////////////////// -// We know some environments not to support full POSIX signals -#if defined(__CYGWIN__) || defined(__QNX__) || defined(__EMSCRIPTEN__) || defined(__DJGPP__) - #define CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS -#endif - -#ifdef __OS400__ -# define CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS -# define CATCH_CONFIG_COLOUR_NONE -#endif - -//////////////////////////////////////////////////////////////////////////////// -// Android somehow still does not support std::to_string -#if defined(__ANDROID__) -# define CATCH_INTERNAL_CONFIG_NO_CPP11_TO_STRING -#endif - -//////////////////////////////////////////////////////////////////////////////// -// Not all Windows environments support SEH properly -#if defined(__MINGW32__) -# define CATCH_INTERNAL_CONFIG_NO_WINDOWS_SEH -#endif - -//////////////////////////////////////////////////////////////////////////////// -// PS4 -#if defined(__ORBIS__) -# define CATCH_INTERNAL_CONFIG_NO_NEW_CAPTURE -#endif - -//////////////////////////////////////////////////////////////////////////////// -// Cygwin -#ifdef __CYGWIN__ - -// Required for some versions of Cygwin to declare gettimeofday -// see: http://stackoverflow.com/questions/36901803/gettimeofday-not-declared-in-this-scope-cygwin -# define _BSD_SOURCE -// some versions of cygwin (most) do not support std::to_string. Use the libstd check. -// https://gcc.gnu.org/onlinedocs/gcc-4.8.2/libstdc++/api/a01053_source.html line 2812-2813 -# if !((__cplusplus >= 201103L) && defined(_GLIBCXX_USE_C99) \ - && !defined(_GLIBCXX_HAVE_BROKEN_VSWPRINTF)) - -# define CATCH_INTERNAL_CONFIG_NO_CPP11_TO_STRING - -# endif -#endif // __CYGWIN__ - -//////////////////////////////////////////////////////////////////////////////// -// Visual C++ -#ifdef _MSC_VER - -# if _MSC_VER >= 1900 // Visual Studio 2015 or newer -# define CATCH_INTERNAL_CONFIG_CPP17_UNCAUGHT_EXCEPTIONS -# endif - -// Universal Windows platform does not support SEH -// Or console colours (or console at all...) -# if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP) -# define CATCH_CONFIG_COLOUR_NONE -# else -# define CATCH_INTERNAL_CONFIG_WINDOWS_SEH -# endif - -// MSVC traditional preprocessor needs some workaround for __VA_ARGS__ -// _MSVC_TRADITIONAL == 0 means new conformant preprocessor -// _MSVC_TRADITIONAL == 1 means old traditional non-conformant preprocessor -# if !defined(_MSVC_TRADITIONAL) || (defined(_MSVC_TRADITIONAL) && _MSVC_TRADITIONAL) -# define CATCH_INTERNAL_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR -# endif -#endif // _MSC_VER - -#if defined(_REENTRANT) || defined(_MSC_VER) -// Enable async processing, as -pthread is specified or no additional linking is required -# define CATCH_INTERNAL_CONFIG_USE_ASYNC -#endif // _MSC_VER - -//////////////////////////////////////////////////////////////////////////////// -// Check if we are compiled with -fno-exceptions or equivalent -#if defined(__EXCEPTIONS) || defined(__cpp_exceptions) || defined(_CPPUNWIND) -# define CATCH_INTERNAL_CONFIG_EXCEPTIONS_ENABLED -#endif - -//////////////////////////////////////////////////////////////////////////////// -// DJGPP -#ifdef __DJGPP__ -# define CATCH_INTERNAL_CONFIG_NO_WCHAR -#endif // __DJGPP__ - -//////////////////////////////////////////////////////////////////////////////// -// Embarcadero C++Build -#if defined(__BORLANDC__) - #define CATCH_INTERNAL_CONFIG_POLYFILL_ISNAN -#endif - -//////////////////////////////////////////////////////////////////////////////// - -// Use of __COUNTER__ is suppressed during code analysis in -// CLion/AppCode 2017.2.x and former, because __COUNTER__ is not properly -// handled by it. -// Otherwise all supported compilers support COUNTER macro, -// but user still might want to turn it off -#if ( !defined(__JETBRAINS_IDE__) || __JETBRAINS_IDE__ >= 20170300L ) - #define CATCH_INTERNAL_CONFIG_COUNTER -#endif - -//////////////////////////////////////////////////////////////////////////////// - -// RTX is a special version of Windows that is real time. -// This means that it is detected as Windows, but does not provide -// the same set of capabilities as real Windows does. -#if defined(UNDER_RTSS) || defined(RTX64_BUILD) - #define CATCH_INTERNAL_CONFIG_NO_WINDOWS_SEH - #define CATCH_INTERNAL_CONFIG_NO_ASYNC - #define CATCH_CONFIG_COLOUR_NONE -#endif - -//////////////////////////////////////////////////////////////////////////////// -// Check if string_view is available and usable -// The check is split apart to work around v140 (VS2015) preprocessor issue... -#if defined(__has_include) -#if __has_include() && defined(CATCH_CPP17_OR_GREATER) -# define CATCH_INTERNAL_CONFIG_CPP17_STRING_VIEW -#endif -#endif - -//////////////////////////////////////////////////////////////////////////////// -// Check if optional is available and usable -#if defined(__has_include) -# if __has_include() && defined(CATCH_CPP17_OR_GREATER) -# define CATCH_INTERNAL_CONFIG_CPP17_OPTIONAL -# endif // __has_include() && defined(CATCH_CPP17_OR_GREATER) -#endif // __has_include - -//////////////////////////////////////////////////////////////////////////////// -// Check if byte is available and usable -#if defined(__has_include) -# if __has_include() && defined(CATCH_CPP17_OR_GREATER) -# define CATCH_INTERNAL_CONFIG_CPP17_BYTE -# endif // __has_include() && defined(CATCH_CPP17_OR_GREATER) -#endif // __has_include - -//////////////////////////////////////////////////////////////////////////////// -// Check if variant is available and usable -#if defined(__has_include) -# if __has_include() && defined(CATCH_CPP17_OR_GREATER) -# if defined(__clang__) && (__clang_major__ < 8) - // work around clang bug with libstdc++ https://bugs.llvm.org/show_bug.cgi?id=31852 - // fix should be in clang 8, workaround in libstdc++ 8.2 -# include -# if defined(__GLIBCXX__) && defined(_GLIBCXX_RELEASE) && (_GLIBCXX_RELEASE < 9) -# define CATCH_CONFIG_NO_CPP17_VARIANT -# else -# define CATCH_INTERNAL_CONFIG_CPP17_VARIANT -# endif // defined(__GLIBCXX__) && defined(_GLIBCXX_RELEASE) && (_GLIBCXX_RELEASE < 9) -# else -# define CATCH_INTERNAL_CONFIG_CPP17_VARIANT -# endif // defined(__clang__) && (__clang_major__ < 8) -# endif // __has_include() && defined(CATCH_CPP17_OR_GREATER) -#endif // __has_include - -#if defined(CATCH_INTERNAL_CONFIG_COUNTER) && !defined(CATCH_CONFIG_NO_COUNTER) && !defined(CATCH_CONFIG_COUNTER) -# define CATCH_CONFIG_COUNTER -#endif -#if defined(CATCH_INTERNAL_CONFIG_WINDOWS_SEH) && !defined(CATCH_CONFIG_NO_WINDOWS_SEH) && !defined(CATCH_CONFIG_WINDOWS_SEH) && !defined(CATCH_INTERNAL_CONFIG_NO_WINDOWS_SEH) -# define CATCH_CONFIG_WINDOWS_SEH -#endif -// This is set by default, because we assume that unix compilers are posix-signal-compatible by default. -#if defined(CATCH_INTERNAL_CONFIG_POSIX_SIGNALS) && !defined(CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_POSIX_SIGNALS) -# define CATCH_CONFIG_POSIX_SIGNALS -#endif -// This is set by default, because we assume that compilers with no wchar_t support are just rare exceptions. -#if !defined(CATCH_INTERNAL_CONFIG_NO_WCHAR) && !defined(CATCH_CONFIG_NO_WCHAR) && !defined(CATCH_CONFIG_WCHAR) -# define CATCH_CONFIG_WCHAR -#endif - -#if !defined(CATCH_INTERNAL_CONFIG_NO_CPP11_TO_STRING) && !defined(CATCH_CONFIG_NO_CPP11_TO_STRING) && !defined(CATCH_CONFIG_CPP11_TO_STRING) -# define CATCH_CONFIG_CPP11_TO_STRING -#endif - -#if defined(CATCH_INTERNAL_CONFIG_CPP17_OPTIONAL) && !defined(CATCH_CONFIG_NO_CPP17_OPTIONAL) && !defined(CATCH_CONFIG_CPP17_OPTIONAL) -# define CATCH_CONFIG_CPP17_OPTIONAL -#endif - -#if defined(CATCH_INTERNAL_CONFIG_CPP17_UNCAUGHT_EXCEPTIONS) && !defined(CATCH_CONFIG_NO_CPP17_UNCAUGHT_EXCEPTIONS) && !defined(CATCH_CONFIG_CPP17_UNCAUGHT_EXCEPTIONS) -# define CATCH_CONFIG_CPP17_UNCAUGHT_EXCEPTIONS -#endif - -#if defined(CATCH_INTERNAL_CONFIG_CPP17_STRING_VIEW) && !defined(CATCH_CONFIG_NO_CPP17_STRING_VIEW) && !defined(CATCH_CONFIG_CPP17_STRING_VIEW) -# define CATCH_CONFIG_CPP17_STRING_VIEW -#endif - -#if defined(CATCH_INTERNAL_CONFIG_CPP17_VARIANT) && !defined(CATCH_CONFIG_NO_CPP17_VARIANT) && !defined(CATCH_CONFIG_CPP17_VARIANT) -# define CATCH_CONFIG_CPP17_VARIANT -#endif - -#if defined(CATCH_INTERNAL_CONFIG_CPP17_BYTE) && !defined(CATCH_CONFIG_NO_CPP17_BYTE) && !defined(CATCH_CONFIG_CPP17_BYTE) -# define CATCH_CONFIG_CPP17_BYTE -#endif - -#if defined(CATCH_CONFIG_EXPERIMENTAL_REDIRECT) -# define CATCH_INTERNAL_CONFIG_NEW_CAPTURE -#endif - -#if defined(CATCH_INTERNAL_CONFIG_NEW_CAPTURE) && !defined(CATCH_INTERNAL_CONFIG_NO_NEW_CAPTURE) && !defined(CATCH_CONFIG_NO_NEW_CAPTURE) && !defined(CATCH_CONFIG_NEW_CAPTURE) -# define CATCH_CONFIG_NEW_CAPTURE -#endif - -#if !defined(CATCH_INTERNAL_CONFIG_EXCEPTIONS_ENABLED) && !defined(CATCH_CONFIG_DISABLE_EXCEPTIONS) -# define CATCH_CONFIG_DISABLE_EXCEPTIONS -#endif - -#if defined(CATCH_INTERNAL_CONFIG_POLYFILL_ISNAN) && !defined(CATCH_CONFIG_NO_POLYFILL_ISNAN) && !defined(CATCH_CONFIG_POLYFILL_ISNAN) -# define CATCH_CONFIG_POLYFILL_ISNAN -#endif - -#if defined(CATCH_INTERNAL_CONFIG_USE_ASYNC) && !defined(CATCH_INTERNAL_CONFIG_NO_ASYNC) && !defined(CATCH_CONFIG_NO_USE_ASYNC) && !defined(CATCH_CONFIG_USE_ASYNC) -# define CATCH_CONFIG_USE_ASYNC -#endif - -#if !defined(CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS) -# define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS -# define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS -#endif -#if !defined(CATCH_INTERNAL_SUPPRESS_GLOBALS_WARNINGS) -# define CATCH_INTERNAL_SUPPRESS_GLOBALS_WARNINGS -# define CATCH_INTERNAL_UNSUPPRESS_GLOBALS_WARNINGS -#endif -#if !defined(CATCH_INTERNAL_SUPPRESS_UNUSED_WARNINGS) -# define CATCH_INTERNAL_SUPPRESS_UNUSED_WARNINGS -# define CATCH_INTERNAL_UNSUPPRESS_UNUSED_WARNINGS -#endif -#if !defined(CATCH_INTERNAL_SUPPRESS_ZERO_VARIADIC_WARNINGS) -# define CATCH_INTERNAL_SUPPRESS_ZERO_VARIADIC_WARNINGS -# define CATCH_INTERNAL_UNSUPPRESS_ZERO_VARIADIC_WARNINGS -#endif - -#if defined(CATCH_CONFIG_DISABLE_EXCEPTIONS) -#define CATCH_TRY if ((true)) -#define CATCH_CATCH_ALL if ((false)) -#define CATCH_CATCH_ANON(type) if ((false)) -#else -#define CATCH_TRY try -#define CATCH_CATCH_ALL catch (...) -#define CATCH_CATCH_ANON(type) catch (type) -#endif - -#if defined(CATCH_INTERNAL_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR) && !defined(CATCH_CONFIG_NO_TRADITIONAL_MSVC_PREPROCESSOR) && !defined(CATCH_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR) -#define CATCH_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR -#endif - -// end catch_compiler_capabilities.h -#define INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) name##line -#define INTERNAL_CATCH_UNIQUE_NAME_LINE( name, line ) INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) -#ifdef CATCH_CONFIG_COUNTER -# define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __COUNTER__ ) -#else -# define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __LINE__ ) -#endif - -#include -#include -#include - -// We need a dummy global operator<< so we can bring it into Catch namespace later -struct Catch_global_namespace_dummy {}; -std::ostream& operator<<(std::ostream&, Catch_global_namespace_dummy); - -namespace Catch { - - struct CaseSensitive { enum Choice { - Yes, - No - }; }; - - class NonCopyable { - NonCopyable( NonCopyable const& ) = delete; - NonCopyable( NonCopyable && ) = delete; - NonCopyable& operator = ( NonCopyable const& ) = delete; - NonCopyable& operator = ( NonCopyable && ) = delete; - - protected: - NonCopyable(); - virtual ~NonCopyable(); - }; - - struct SourceLineInfo { - - SourceLineInfo() = delete; - SourceLineInfo( char const* _file, std::size_t _line ) noexcept - : file( _file ), - line( _line ) - {} - - SourceLineInfo( SourceLineInfo const& other ) = default; - SourceLineInfo& operator = ( SourceLineInfo const& ) = default; - SourceLineInfo( SourceLineInfo&& ) noexcept = default; - SourceLineInfo& operator = ( SourceLineInfo&& ) noexcept = default; - - bool empty() const noexcept; - bool operator == ( SourceLineInfo const& other ) const noexcept; - bool operator < ( SourceLineInfo const& other ) const noexcept; - - char const* file; - std::size_t line; - }; - - std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ); - - // Bring in operator<< from global namespace into Catch namespace - // This is necessary because the overload of operator<< above makes - // lookup stop at namespace Catch - using ::operator<<; - - // Use this in variadic streaming macros to allow - // >> +StreamEndStop - // as well as - // >> stuff +StreamEndStop - struct StreamEndStop { - std::string operator+() const; - }; - template - T const& operator + ( T const& value, StreamEndStop ) { - return value; - } -} - -#define CATCH_INTERNAL_LINEINFO \ - ::Catch::SourceLineInfo( __FILE__, static_cast( __LINE__ ) ) - -// end catch_common.h -namespace Catch { - - struct RegistrarForTagAliases { - RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ); - }; - -} // end namespace Catch - -#define CATCH_REGISTER_TAG_ALIAS( alias, spec ) \ - CATCH_INTERNAL_SUPPRESS_GLOBALS_WARNINGS \ - namespace{ Catch::RegistrarForTagAliases INTERNAL_CATCH_UNIQUE_NAME( AutoRegisterTagAlias )( alias, spec, CATCH_INTERNAL_LINEINFO ); } \ - CATCH_INTERNAL_UNSUPPRESS_GLOBALS_WARNINGS - -// end catch_tag_alias_autoregistrar.h -// start catch_test_registry.h - -// start catch_interfaces_testcase.h - -#include - -namespace Catch { - - class TestSpec; - - struct ITestInvoker { - virtual void invoke () const = 0; - virtual ~ITestInvoker(); - }; - - class TestCase; - struct IConfig; - - struct ITestCaseRegistry { - virtual ~ITestCaseRegistry(); - virtual std::vector const& getAllTests() const = 0; - virtual std::vector const& getAllTestsSorted( IConfig const& config ) const = 0; - }; - - bool isThrowSafe( TestCase const& testCase, IConfig const& config ); - bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ); - std::vector filterTests( std::vector const& testCases, TestSpec const& testSpec, IConfig const& config ); - std::vector const& getAllTestCasesSorted( IConfig const& config ); - -} - -// end catch_interfaces_testcase.h -// start catch_stringref.h - -#include -#include -#include - -namespace Catch { - - /// A non-owning string class (similar to the forthcoming std::string_view) - /// Note that, because a StringRef may be a substring of another string, - /// it may not be null terminated. c_str() must return a null terminated - /// string, however, and so the StringRef will internally take ownership - /// (taking a copy), if necessary. In theory this ownership is not externally - /// visible - but it does mean (substring) StringRefs should not be shared between - /// threads. - class StringRef { - public: - using size_type = std::size_t; - - private: - friend struct StringRefTestAccess; - - char const* m_start; - size_type m_size; - - char* m_data = nullptr; - - void takeOwnership(); - - static constexpr char const* const s_empty = ""; - - public: // construction/ assignment - StringRef() noexcept - : StringRef( s_empty, 0 ) - {} - - StringRef( StringRef const& other ) noexcept - : m_start( other.m_start ), - m_size( other.m_size ) - {} - - StringRef( StringRef&& other ) noexcept - : m_start( other.m_start ), - m_size( other.m_size ), - m_data( other.m_data ) - { - other.m_data = nullptr; - } - - StringRef( char const* rawChars ) noexcept; - - StringRef( char const* rawChars, size_type size ) noexcept - : m_start( rawChars ), - m_size( size ) - {} - - StringRef( std::string const& stdString ) noexcept - : m_start( stdString.c_str() ), - m_size( stdString.size() ) - {} - - ~StringRef() noexcept { - delete[] m_data; - } - - auto operator = ( StringRef const &other ) noexcept -> StringRef& { - delete[] m_data; - m_data = nullptr; - m_start = other.m_start; - m_size = other.m_size; - return *this; - } - - operator std::string() const; - - void swap( StringRef& other ) noexcept; - - public: // operators - auto operator == ( StringRef const& other ) const noexcept -> bool; - auto operator != ( StringRef const& other ) const noexcept -> bool; - - auto operator[] ( size_type index ) const noexcept -> char; - - public: // named queries - auto empty() const noexcept -> bool { - return m_size == 0; - } - auto size() const noexcept -> size_type { - return m_size; - } - - auto numberOfCharacters() const noexcept -> size_type; - auto c_str() const -> char const*; - - public: // substrings and searches - auto substr( size_type start, size_type size ) const noexcept -> StringRef; - - // Returns the current start pointer. - // Note that the pointer can change when if the StringRef is a substring - auto currentData() const noexcept -> char const*; - - private: // ownership queries - may not be consistent between calls - auto isOwned() const noexcept -> bool; - auto isSubstring() const noexcept -> bool; - }; - - auto operator + ( StringRef const& lhs, StringRef const& rhs ) -> std::string; - auto operator + ( StringRef const& lhs, char const* rhs ) -> std::string; - auto operator + ( char const* lhs, StringRef const& rhs ) -> std::string; - - auto operator += ( std::string& lhs, StringRef const& sr ) -> std::string&; - auto operator << ( std::ostream& os, StringRef const& sr ) -> std::ostream&; - - inline auto operator "" _sr( char const* rawChars, std::size_t size ) noexcept -> StringRef { - return StringRef( rawChars, size ); - } - -} // namespace Catch - -inline auto operator "" _catch_sr( char const* rawChars, std::size_t size ) noexcept -> Catch::StringRef { - return Catch::StringRef( rawChars, size ); -} - -// end catch_stringref.h -// start catch_type_traits.hpp - - -#include - -namespace Catch{ - -#ifdef CATCH_CPP17_OR_GREATER - template - inline constexpr auto is_unique = std::true_type{}; - - template - inline constexpr auto is_unique = std::bool_constant< - (!std::is_same_v && ...) && is_unique - >{}; -#else - -template -struct is_unique : std::true_type{}; - -template -struct is_unique : std::integral_constant -::value - && is_unique::value - && is_unique::value ->{}; - -#endif -} - -// end catch_type_traits.hpp -// start catch_preprocessor.hpp - - -#define CATCH_RECURSION_LEVEL0(...) __VA_ARGS__ -#define CATCH_RECURSION_LEVEL1(...) CATCH_RECURSION_LEVEL0(CATCH_RECURSION_LEVEL0(CATCH_RECURSION_LEVEL0(__VA_ARGS__))) -#define CATCH_RECURSION_LEVEL2(...) CATCH_RECURSION_LEVEL1(CATCH_RECURSION_LEVEL1(CATCH_RECURSION_LEVEL1(__VA_ARGS__))) -#define CATCH_RECURSION_LEVEL3(...) CATCH_RECURSION_LEVEL2(CATCH_RECURSION_LEVEL2(CATCH_RECURSION_LEVEL2(__VA_ARGS__))) -#define CATCH_RECURSION_LEVEL4(...) CATCH_RECURSION_LEVEL3(CATCH_RECURSION_LEVEL3(CATCH_RECURSION_LEVEL3(__VA_ARGS__))) -#define CATCH_RECURSION_LEVEL5(...) CATCH_RECURSION_LEVEL4(CATCH_RECURSION_LEVEL4(CATCH_RECURSION_LEVEL4(__VA_ARGS__))) - -#ifdef CATCH_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR -#define INTERNAL_CATCH_EXPAND_VARGS(...) __VA_ARGS__ -// MSVC needs more evaluations -#define CATCH_RECURSION_LEVEL6(...) CATCH_RECURSION_LEVEL5(CATCH_RECURSION_LEVEL5(CATCH_RECURSION_LEVEL5(__VA_ARGS__))) -#define CATCH_RECURSE(...) CATCH_RECURSION_LEVEL6(CATCH_RECURSION_LEVEL6(__VA_ARGS__)) -#else -#define CATCH_RECURSE(...) CATCH_RECURSION_LEVEL5(__VA_ARGS__) -#endif - -#define CATCH_REC_END(...) -#define CATCH_REC_OUT - -#define CATCH_EMPTY() -#define CATCH_DEFER(id) id CATCH_EMPTY() - -#define CATCH_REC_GET_END2() 0, CATCH_REC_END -#define CATCH_REC_GET_END1(...) CATCH_REC_GET_END2 -#define CATCH_REC_GET_END(...) CATCH_REC_GET_END1 -#define CATCH_REC_NEXT0(test, next, ...) next CATCH_REC_OUT -#define CATCH_REC_NEXT1(test, next) CATCH_DEFER ( CATCH_REC_NEXT0 ) ( test, next, 0) -#define CATCH_REC_NEXT(test, next) CATCH_REC_NEXT1(CATCH_REC_GET_END test, next) - -#define CATCH_REC_LIST0(f, x, peek, ...) , f(x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST1) ) ( f, peek, __VA_ARGS__ ) -#define CATCH_REC_LIST1(f, x, peek, ...) , f(x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST0) ) ( f, peek, __VA_ARGS__ ) -#define CATCH_REC_LIST2(f, x, peek, ...) f(x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST1) ) ( f, peek, __VA_ARGS__ ) - -#define CATCH_REC_LIST0_UD(f, userdata, x, peek, ...) , f(userdata, x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST1_UD) ) ( f, userdata, peek, __VA_ARGS__ ) -#define CATCH_REC_LIST1_UD(f, userdata, x, peek, ...) , f(userdata, x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST0_UD) ) ( f, userdata, peek, __VA_ARGS__ ) -#define CATCH_REC_LIST2_UD(f, userdata, x, peek, ...) f(userdata, x) CATCH_DEFER ( CATCH_REC_NEXT(peek, CATCH_REC_LIST1_UD) ) ( f, userdata, peek, __VA_ARGS__ ) - -// Applies the function macro `f` to each of the remaining parameters, inserts commas between the results, -// and passes userdata as the first parameter to each invocation, -// e.g. CATCH_REC_LIST_UD(f, x, a, b, c) evaluates to f(x, a), f(x, b), f(x, c) -#define CATCH_REC_LIST_UD(f, userdata, ...) CATCH_RECURSE(CATCH_REC_LIST2_UD(f, userdata, __VA_ARGS__, ()()(), ()()(), ()()(), 0)) - -#define CATCH_REC_LIST(f, ...) CATCH_RECURSE(CATCH_REC_LIST2(f, __VA_ARGS__, ()()(), ()()(), ()()(), 0)) - -#define INTERNAL_CATCH_EXPAND1(param) INTERNAL_CATCH_EXPAND2(param) -#define INTERNAL_CATCH_EXPAND2(...) INTERNAL_CATCH_NO## __VA_ARGS__ -#define INTERNAL_CATCH_DEF(...) INTERNAL_CATCH_DEF __VA_ARGS__ -#define INTERNAL_CATCH_NOINTERNAL_CATCH_DEF -#define INTERNAL_CATCH_STRINGIZE(...) INTERNAL_CATCH_STRINGIZE2(__VA_ARGS__) -#ifndef CATCH_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR -#define INTERNAL_CATCH_STRINGIZE2(...) #__VA_ARGS__ -#define INTERNAL_CATCH_STRINGIZE_WITHOUT_PARENS(param) INTERNAL_CATCH_STRINGIZE(INTERNAL_CATCH_REMOVE_PARENS(param)) -#else -// MSVC is adding extra space and needs another indirection to expand INTERNAL_CATCH_NOINTERNAL_CATCH_DEF -#define INTERNAL_CATCH_STRINGIZE2(...) INTERNAL_CATCH_STRINGIZE3(__VA_ARGS__) -#define INTERNAL_CATCH_STRINGIZE3(...) #__VA_ARGS__ -#define INTERNAL_CATCH_STRINGIZE_WITHOUT_PARENS(param) (INTERNAL_CATCH_STRINGIZE(INTERNAL_CATCH_REMOVE_PARENS(param)) + 1) -#endif - -#define INTERNAL_CATCH_MAKE_NAMESPACE2(...) ns_##__VA_ARGS__ -#define INTERNAL_CATCH_MAKE_NAMESPACE(name) INTERNAL_CATCH_MAKE_NAMESPACE2(name) - -#define INTERNAL_CATCH_REMOVE_PARENS(...) INTERNAL_CATCH_EXPAND1(INTERNAL_CATCH_DEF __VA_ARGS__) - -#ifndef CATCH_CONFIG_TRADITIONAL_MSVC_PREPROCESSOR -#define INTERNAL_CATCH_MAKE_TYPE_LIST2(...) decltype(get_wrapper()) -#define INTERNAL_CATCH_MAKE_TYPE_LIST(...) INTERNAL_CATCH_MAKE_TYPE_LIST2(INTERNAL_CATCH_REMOVE_PARENS(__VA_ARGS__)) -#else -#define INTERNAL_CATCH_MAKE_TYPE_LIST2(...) INTERNAL_CATCH_EXPAND_VARGS(decltype(get_wrapper())) -#define INTERNAL_CATCH_MAKE_TYPE_LIST(...) INTERNAL_CATCH_EXPAND_VARGS(INTERNAL_CATCH_MAKE_TYPE_LIST2(INTERNAL_CATCH_REMOVE_PARENS(__VA_ARGS__))) -#endif - -#define INTERNAL_CATCH_MAKE_TYPE_LISTS_FROM_TYPES(...)\ - CATCH_REC_LIST(INTERNAL_CATCH_MAKE_TYPE_LIST,__VA_ARGS__) - -#define INTERNAL_CATCH_REMOVE_PARENS_1_ARG(_0) INTERNAL_CATCH_REMOVE_PARENS(_0) -#define INTERNAL_CATCH_REMOVE_PARENS_2_ARG(_0, _1) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_1_ARG(_1) -#define INTERNAL_CATCH_REMOVE_PARENS_3_ARG(_0, _1, _2) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_2_ARG(_1, _2) -#define INTERNAL_CATCH_REMOVE_PARENS_4_ARG(_0, _1, _2, _3) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_3_ARG(_1, _2, _3) -#define INTERNAL_CATCH_REMOVE_PARENS_5_ARG(_0, _1, _2, _3, _4) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_4_ARG(_1, _2, _3, _4) -#define INTERNAL_CATCH_REMOVE_PARENS_6_ARG(_0, _1, _2, _3, _4, _5) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_5_ARG(_1, _2, _3, _4, _5) -#define INTERNAL_CATCH_REMOVE_PARENS_7_ARG(_0, _1, _2, _3, _4, _5, _6) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_6_ARG(_1, _2, _4, _5, _6) -#define INTERNAL_CATCH_REMOVE_PARENS_8_ARG(_0, _1, _2, _3, _4, _5, _6, _7) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_7_ARG(_1, _2, _3, _4, _5, _6, _7) -#define INTERNAL_CATCH_REMOVE_PARENS_9_ARG(_0, _1, _2, _3, _4, _5, _6, _7, _8) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_8_ARG(_1, _2, _3, _4, _5, _6, _7, _8) -#define INTERNAL_CATCH_REMOVE_PARENS_10_ARG(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_9_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9) -#define INTERNAL_CATCH_REMOVE_PARENS_11_ARG(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10) INTERNAL_CATCH_REMOVE_PARENS(_0), INTERNAL_CATCH_REMOVE_PARENS_10_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10) - -#define INTERNAL_CATCH_VA_NARGS_IMPL(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N - -#define INTERNAL_CATCH_TYPE_GEN\ - template struct TypeList {};\ - template\ - constexpr auto get_wrapper() noexcept -> TypeList { return {}; }\ - \ - template class L1, typename...E1, template class L2, typename...E2> \ - constexpr auto append(L1, L2) noexcept -> L1 { return {}; }\ - template< template class L1, typename...E1, template class L2, typename...E2, typename...Rest>\ - constexpr auto append(L1, L2, Rest...) noexcept -> decltype(append(L1{}, Rest{}...)) { return {}; }\ - template< template class L1, typename...E1, typename...Rest>\ - constexpr auto append(L1, TypeList, Rest...) noexcept -> L1 { return {}; }\ - \ - template< template class Container, template class List, typename...elems>\ - constexpr auto rewrap(List) noexcept -> TypeList> { return {}; }\ - template< template class Container, template class List, class...Elems, typename...Elements>\ - constexpr auto rewrap(List,Elements...) noexcept -> decltype(append(TypeList>{}, rewrap(Elements{}...))) { return {}; }\ - \ - template